{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'IndexTTS2' ) { link.textContent = 'IndexTTS2'; link.href = 'https://vibevoice.info/indextts2'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n--------------------------------------------------------------------------------------------------\n\n

Model description

\n\nThis is a MiniLMv2 [1] model for the Italian language, fine-tuned for Extractive Question Answering on the [SQuAD-IT](https://huggingface.co/datasets/squad_it) dataset [2].\n\n

Training and Performances

\n\nThe model is trained to perform question answering, given a context and a question (under the assumption that the context contains the answer to the question). It has been fine-tuned for Extractive Question Answering, using the SQuAD-IT dataset, for 2 epochs with a linearly decaying learning rate starting from 3e-5, maximum sequence length of 384 and document stride of 128.\n
The dataset includes 54.159 training instances and 7.609 test instances\n\nupdate: version 2.0\n\nThe 2.0 version further improves the performances by exploiting a 2-phases fine-tuning strategy: the model is first fine-tuned on the English SQuAD v2 (1 epoch, 20% warmup ratio, and max learning rate of 3e-5) then further fine-tuned on the Italian SQuAD (2 epochs, no warmup, initial learning rate of 3e-5)\n\nIn order to maximize the benefits of the multilingual procedure, [L6xH384 mMiniLMv2](https://github.com/microsoft/unilm/tree/master/minilm) is used as a pre-trained model. When the double fine-tuning is completed, the embedding layer is then compressed as in [minilm-l6-h384-italian-cased](https://huggingface.co/osiria/minilm-l6-h384-italian-cased) to obtain a mono-lingual model size\n\n\nThe performances on the test set are reported in the following table:\n\n(version 2.0 performances)\n\n| EM | F1 |\n| ------ | ------ |\n| 60.28 | 72.04 |\n\nTesting notebook: https://huggingface.co/osiria/minilm-italian-l6-h384-question-answering/blob/main/osiria_minilm_l6_h384_italian_qa_evaluation.ipynb\n\n

Quick usage

\n\nIn order to get the best possible outputs from the model, it is recommended to use the following pipeline\n\n```python\nfrom transformers import AutoTokenizer, AutoModelForQuestionAnswering\nimport re\nimport string\nfrom transformers.pipelines import QuestionAnsweringPipeline\n\ntokenizer = AutoTokenizer.from_pretrained(\"osiria/minilm-italian-l6-h384-question-answering\")\nmodel = AutoModelForQuestionAnswering.from_pretrained(\"osiria/minilm-italian-l6-h384-question-answering\")\n\nclass OsiriaQA(QuestionAnsweringPipeline):\n \n def __init__(self, punctuation = ',;.:!?()[\\]{}', **kwargs):\n\n QuestionAnsweringPipeline.__init__(self, **kwargs)\n self.post_regex_left = \"^[\\s\" + punctuation + \"]+\"\n self.post_regex_right = \"[\\s\" + punctuation + \"]+$\"\n \n def postprocess(self, output):\n \n output = QuestionAnsweringPipeline.postprocess(self, model_outputs=output)\n output_length = len(output[\"answer\"])\n output[\"answer\"] = re.sub(self.post_regex_left, \"\", output[\"answer\"])\n output[\"start\"] = output[\"start\"] + (output_length - len(output[\"answer\"]))\n output_length = len(output[\"answer\"])\n output[\"answer\"] = re.sub(self.post_regex_right, \"\", output[\"answer\"])\n output[\"end\"] = output[\"end\"] - (output_length - len(output[\"answer\"]))\n \n return output\n \npipeline_qa = OsiriaQA(model = model, tokenizer = tokenizer)\npipeline_qa(context = \"Alessandro Manzoni è nato a Milano nel 1785\",\n question = \"Dove è nato Manzoni?\")\n\n# {'score': 0.9492858052253723, 'start': 28, 'end': 34, 'answer': 'Milano'}\n```\n\nYou can also try the model online using this web app: https://huggingface.co/spaces/osiria/minilm-l6-h384-italian-question-answering\n\n

References

\n\n[1] https://arxiv.org/abs/2012.15828\n\n[2] https://link.springer.com/chapter/10.1007/978-3-030-03840-3_29\n\n

Limitations

\n\nThis model was trained on the English SQuAD v2 and on SQuAD-IT, which is mainly a machine translated version of the original SQuAD v1.1. This means that the quality of the training set is limited by the machine translation.\nMoreover, the model is meant to answer questions under the assumption that the required information is actually contained in the given context (which is the underlying assumption of SQuAD v1.1). \nIf the assumption is violated, the model will try to return an answer in any case, which is going to be incorrect.\n\n

License

\n\nThe model is released under MIT license"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n--------------------------------------------------------------------------------------------------\n\n\n\n
\n    Task: Question Answering\n
\n    Model: MiniLM\n
\n    Lang: IT\n
\n  \n
\n\n\n\n--------------------------------------------------------------------------------------------------\n\n

Model description

\n\nThis is a MiniLMv2 [1] model for the Italian language, fine-tuned for Extractive Question Answering on the [SQuAD-IT](https://huggingface.co/datasets/squad_it) dataset [2].\n\n

Training and Performances

\n\nThe model is trained to perform question answering, given a context and a question (under the assumption that the context contains the answer to the question). It has been fine-tuned for Extractive Question Answering, using the SQuAD-IT dataset, for 2 epochs with a linearly decaying learning rate starting from 3e-5, maximum sequence length of 384 and document stride of 128.\n
The dataset includes 54.159 training instances and 7.609 test instances\n\nupdate: version 2.0\n\nThe 2.0 version further improves the performances by exploiting a 2-phases fine-tuning strategy: the model is first fine-tuned on the English SQuAD v2 (1 epoch, 20% warmup ratio, and max learning rate of 3e-5) then further fine-tuned on the Italian SQuAD (2 epochs, no warmup, initial learning rate of 3e-5)\n\nIn order to maximize the benefits of the multilingual procedure, [L6xH384 mMiniLMv2](https://github.com/microsoft/unilm/tree/master/minilm) is used as a pre-trained model. When the double fine-tuning is completed, the embedding layer is then compressed as in [minilm-l6-h384-italian-cased](https://huggingface.co/osiria/minilm-l6-h384-italian-cased) to obtain a mono-lingual model size\n\n\nThe performances on the test set are reported in the following table:\n\n(version 2.0 performances)\n\n| EM | F1 |\n| ------ | ------ |\n| 60.28 | 72.04 |\n\nTesting notebook: https://huggingface.co/osiria/minilm-italian-l6-h384-question-answering/blob/main/osiria_minilm_l6_h384_italian_qa_evaluation.ipynb\n\n

Quick usage

\n\nIn order to get the best possible outputs from the model, it is recommended to use the following pipeline\n\n```python\nfrom transformers import AutoTokenizer, AutoModelForQuestionAnswering\nimport re\nimport string\nfrom transformers.pipelines import QuestionAnsweringPipeline\n\ntokenizer = AutoTokenizer.from_pretrained(\"osiria/minilm-italian-l6-h384-question-answering\")\nmodel = AutoModelForQuestionAnswering.from_pretrained(\"osiria/minilm-italian-l6-h384-question-answering\")\n\nclass OsiriaQA(QuestionAnsweringPipeline):\n \n def __init__(self, punctuation = ',;.:!?()[\\]{}', **kwargs):\n\n QuestionAnsweringPipeline.__init__(self, **kwargs)\n self.post_regex_left = \"^[\\s\" + punctuation + \"]+\"\n self.post_regex_right = \"[\\s\" + punctuation + \"]+$\"\n \n def postprocess(self, output):\n \n output = QuestionAnsweringPipeline.postprocess(self, model_outputs=output)\n output_length = len(output[\"answer\"])\n output[\"answer\"] = re.sub(self.post_regex_left, \"\", output[\"answer\"])\n output[\"start\"] = output[\"start\"] + (output_length - len(output[\"answer\"]))\n output_length = len(output[\"answer\"])\n output[\"answer\"] = re.sub(self.post_regex_right, \"\", output[\"answer\"])\n output[\"end\"] = output[\"end\"] - (output_length - len(output[\"answer\"]))\n \n return output\n \npipeline_qa = OsiriaQA(model = model, tokenizer = tokenizer)\npipeline_qa(context = \"Alessandro Manzoni è nato a Milano nel 1785\",\n question = \"Dove è nato Manzoni?\")\n\n# {'score': 0.9492858052253723, 'start': 28, 'end': 34, 'answer': 'Milano'}\n```\n\nYou can also try the model online using this web app: https://huggingface.co/spaces/osiria/minilm-l6-h384-italian-question-answering\n\n

References

\n\n[1] https://arxiv.org/abs/2012.15828\n\n[2] https://link.springer.com/chapter/10.1007/978-3-030-03840-3_29\n\n

Limitations

\n\nThis model was trained on the English SQuAD v2 and on SQuAD-IT, which is mainly a machine translated version of the original SQuAD v1.1. This means that the quality of the training set is limited by the machine translation.\nMoreover, the model is meant to answer questions under the assumption that the required information is actually contained in the given context (which is the underlying assumption of SQuAD v1.1). \nIf the assumption is violated, the model will try to return an answer in any case, which is going to be incorrect.\n\n

License

\n\nThe model is released under MIT license"},"metadata":{"kind":"string","value":"{\"datasets\": [\"squad_it\"], \"language\": [\"it\"], \"license\": \"mit\", \"pipeline_tag\": \"question-answering\", \"widget\": [{\"text\": \"Quale libro fu scritto da Alessandro Manzoni?\", \"context\": \"Alessandro Manzoni pubblicò la prima versione de I Promessi Sposi nel 1827\"}, {\"text\": \"In quali competizioni gareggia la Ferrari?\", \"context\": \"La Scuderia Ferrari è una squadra corse italiana di Formula 1 con sede a Maranello\"}, {\"text\": \"Quale sport è riferito alla Serie A?\", \"context\": \"Il campionato di Serie A è la massima divisione professionistica del campionato italiano di calcio maschile\"}], \"model-index\": [{\"name\": \"osiria/minilm-italian-l6-h384-question-answering\", \"results\": [{\"task\": {\"type\": \"question-answering\", \"name\": \"Question Answering\"}, \"dataset\": {\"name\": \"squad_it\", \"type\": \"squad_it\"}, \"metrics\": [{\"type\": \"exact-match\", \"value\": 0.6028, \"name\": \"Exact Match\"}, {\"type\": \"f1\", \"value\": 0.7204, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING","TRANSLATION"],"string":"[\n \"QUESTION_ANSWERING\",\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45509,"string":"45,509"}}},{"rowIdx":43733,"cells":{"id":{"kind":"string","value":"tomaarsen/span-marker-bert-base-uncased-acronyms"},"author":{"kind":"string","value":"tomaarsen"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["span-marker","pytorch","tensorboard","safetensors","token-classification","ner","named-entity-recognition","generated_from_span_marker_trainer","en","dataset:acronym_identification","base_model:google-bert/bert-base-uncased","base_model:finetune:google-bert/bert-base-uncased","license:apache-2.0","model-index","co2_eq_emissions","region:us"],"string":"[\n \"span-marker\",\n \"pytorch\",\n \"tensorboard\",\n \"safetensors\",\n \"token-classification\",\n \"ner\",\n \"named-entity-recognition\",\n \"generated_from_span_marker_trainer\",\n \"en\",\n \"dataset:acronym_identification\",\n \"base_model:google-bert/bert-base-uncased\",\n \"base_model:finetune:google-bert/bert-base-uncased\",\n \"license:apache-2.0\",\n \"model-index\",\n \"co2_eq_emissions\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-14T06:49:22Z","string":"2023-08-14T06:49:22Z"},"last_modified":{"kind":"string","value":"2023-09-27T12:33:22+00:00"},"downloads":{"kind":"number","value":1778,"string":"1,778"},"likes":{"kind":"number","value":5,"string":"5"},"README":{"kind":"string","value":"---\nbase_model: bert-base-uncased\ndatasets:\n- acronym_identification\nlanguage:\n- en\nlibrary_name: span-marker\nlicense: apache-2.0\nmetrics:\n- precision\n- recall\n- f1\npipeline_tag: token-classification\ntags:\n- span-marker\n- token-classification\n- ner\n- named-entity-recognition\n- generated_from_span_marker_trainer\nwidget:\n- text: here, da = direct assessment, rr = relative ranking, ds = discrete scale and\n cs = continuous scale.\n example_title: Uncased 1\n- text: modifying or replacing the erasable programmable read only memory (eprom)\n in a phone would allow the configuration of any esn and min via software for cellular\n devices.\n example_title: Uncased 2\n- text: we propose a technique called aggressive stochastic weight averaging (aswa)\n and an extension called norm-filtered aggressive stochastic weight averaging (naswa)\n which improves te stability of models over random seeds.\n example_title: Uncased 3\n- text: the choice of the encoder and decoder modules of dnpg can be quite flexible,\n for instance long-short term memory networks (lstm) or convolutional neural network\n (cnn).\n example_title: Uncased 4\nco2_eq_emissions:\n emissions: 31.203903222402037\n source: codecarbon\n training_type: fine-tuning\n on_cloud: false\n cpu_model: 13th Gen Intel(R) Core(TM) i7-13700K\n ram_total_size: 31.777088165283203\n hours_used: 0.272\n hardware_used: 1 x NVIDIA GeForce RTX 3090\nmodel-index:\n- name: SpanMarker with bert-base-uncased on Acronym Identification\n results:\n - task:\n type: token-classification\n name: Named Entity Recognition\n dataset:\n name: Acronym Identification\n type: acronym_identification\n split: validation\n metrics:\n - type: f1\n value: 0.9198933333333332\n name: F1\n - type: precision\n value: 0.9339397877409573\n name: Precision\n - type: recall\n value: 0.9062631357713324\n name: Recall\n---\n\n# SpanMarker with bert-base-uncased on Acronym Identification\n\nThis is a [SpanMarker](https://github.com/tomaarsen/SpanMarkerNER) model trained on the [Acronym Identification](https://huggingface.co/datasets/acronym_identification) dataset that can be used for Named Entity Recognition. This SpanMarker model uses [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the underlying encoder. See [train.py](train.py) for the training script.\n\nIs your data always capitalized correctly? Then consider using the cased variant of this model instead for better performance: [tomaarsen/span-marker-bert-base-acronyms](https://huggingface.co/tomaarsen/span-marker-bert-base-acronyms).\n\n## Model Details\n\n### Model Description\n\n- **Model Type:** SpanMarker\n- **Encoder:** [bert-base-uncased](https://huggingface.co/bert-base-uncased)\n- **Maximum Sequence Length:** 256 tokens\n- **Maximum Entity Length:** 8 words\n- **Training Dataset:** [Acronym Identification](https://huggingface.co/datasets/acronym_identification)\n- **Language:** en\n- **License:** apache-2.0\n\n### Model Sources\n\n- **Repository:** [SpanMarker on GitHub](https://github.com/tomaarsen/SpanMarkerNER)\n- **Thesis:** [SpanMarker For Named Entity Recognition](https://raw.githubusercontent.com/tomaarsen/SpanMarkerNER/main/thesis.pdf)\n\n### Model Labels\n| Label | Examples |\n|:------|:------------------------------------------------------------------------------------------------------|\n| long | \"successive convex approximation\", \"controlled natural language\", \"Conversational Question Answering\" |\n| short | \"SODA\", \"CNL\", \"CoQA\" |\n\n## Evaluation\n\n### Metrics\n| Label | Precision | Recall | F1 |\n|:--------|:----------|:-------|:-------|\n| **all** | 0.9339 | 0.9063 | 0.9199 |\n| long | 0.9314 | 0.8845 | 0.9074 |\n| short | 0.9352 | 0.9174 | 0.9262 |\n\n## Uses\n\n### Direct Use for Inference\n\n```python\nfrom span_marker import SpanMarkerModel\n\n# Download from the 🤗 Hub\nmodel = SpanMarkerModel.from_pretrained(\"tomaarsen/span-marker-bert-base-uncased-acronyms\")\n# Run inference\nentities = model.predict(\"compression algorithms like principal component analysis (pca) can reduce noise and complexity.\")\n```\n\n### Downstream Use\nYou can finetune this model on your own dataset.\n\n
Click to expand\n\n```python\nfrom span_marker import SpanMarkerModel, Trainer\n\n# Download from the 🤗 Hub\nmodel = SpanMarkerModel.from_pretrained(\"tomaarsen/span-marker-bert-base-uncased-acronyms\")\n\n# Specify a Dataset with \"tokens\" and \"ner_tag\" columns\ndataset = load_dataset(\"conll2003\") # For example CoNLL2003\n\n# Initialize a Trainer using the pretrained model & dataset\ntrainer = Trainer(\n model=model,\n train_dataset=dataset[\"train\"],\n eval_dataset=dataset[\"validation\"],\n)\ntrainer.train()\ntrainer.save_model(\"tomaarsen/span-marker-bert-base-uncased-acronyms-finetuned\")\n```\n
\n\n\n\n\n\n\n\n## Training Details\n\n### Training Set Metrics\n| Training set | Min | Median | Max |\n|:----------------------|:----|:--------|:----|\n| Sentence length | 4 | 32.3372 | 170 |\n| Entities per sentence | 0 | 2.6775 | 24 |\n\n### Training Hyperparameters\n- learning_rate: 5e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 2\n\n### Training Results\n| Epoch | Step | Validation Loss | Validation Precision | Validation Recall | Validation F1 | Validation Accuracy |\n|:------:|:----:|:---------------:|:--------------------:|:-----------------:|:-------------:|:-------------------:|\n| 0.3120 | 200 | 0.0097 | 0.8999 | 0.8731 | 0.8863 | 0.9718 |\n| 0.6240 | 400 | 0.0075 | 0.9163 | 0.8995 | 0.9078 | 0.9769 |\n| 0.9360 | 600 | 0.0076 | 0.9079 | 0.9153 | 0.9116 | 0.9773 |\n| 1.2480 | 800 | 0.0069 | 0.9267 | 0.9006 | 0.9135 | 0.9778 |\n| 1.5601 | 1000 | 0.0065 | 0.9268 | 0.9044 | 0.9154 | 0.9782 |\n| 1.8721 | 1200 | 0.0065 | 0.9279 | 0.9061 | 0.9168 | 0.9787 |\n\n### Environmental Impact\nCarbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon).\n- **Carbon Emitted**: 0.031 kg of CO2\n- **Hours Used**: 0.272 hours\n\n### Training Hardware\n- **On Cloud**: No\n- **GPU Model**: 1 x NVIDIA GeForce RTX 3090\n- **CPU Model**: 13th Gen Intel(R) Core(TM) i7-13700K\n- **RAM Size**: 31.78 GB\n\n### Framework Versions\n\n- Python: 3.9.16\n- SpanMarker: 1.3.1.dev\n- Transformers: 4.30.0\n- PyTorch: 2.0.1+cu118\n- Datasets: 2.14.0\n- Tokenizers: 0.13.2\n\n## Citation\n\n### BibTeX\n```\n@software{Aarsen_SpanMarker,\n author = {Aarsen, Tom},\n license = {Apache-2.0},\n title = {{SpanMarker for Named Entity Recognition}},\n url = {https://github.com/tomaarsen/SpanMarkerNER}\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SpanMarker with bert-base-uncased on Acronym Identification\n\nThis is a [SpanMarker](https://github.com/tomaarsen/SpanMarkerNER) model trained on the [Acronym Identification](https://huggingface.co/datasets/acronym_identification) dataset that can be used for Named Entity Recognition. This SpanMarker model uses [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the underlying encoder. See [train.py](train.py) for the training script.\n\nIs your data always capitalized correctly? Then consider using the cased variant of this model instead for better performance: [tomaarsen/span-marker-bert-base-acronyms](https://huggingface.co/tomaarsen/span-marker-bert-base-acronyms).\n\n## Model Details\n\n### Model Description\n\n- **Model Type:** SpanMarker\n- **Encoder:** [bert-base-uncased](https://huggingface.co/bert-base-uncased)\n- **Maximum Sequence Length:** 256 tokens\n- **Maximum Entity Length:** 8 words\n- **Training Dataset:** [Acronym Identification](https://huggingface.co/datasets/acronym_identification)\n- **Language:** en\n- **License:** apache-2.0\n\n### Model Sources\n\n- **Repository:** [SpanMarker on GitHub](https://github.com/tomaarsen/SpanMarkerNER)\n- **Thesis:** [SpanMarker For Named Entity Recognition](https://raw.githubusercontent.com/tomaarsen/SpanMarkerNER/main/thesis.pdf)\n\n### Model Labels\n| Label | Examples |\n|:------|:------------------------------------------------------------------------------------------------------|\n| long | \"successive convex approximation\", \"controlled natural language\", \"Conversational Question Answering\" |\n| short | \"SODA\", \"CNL\", \"CoQA\" |\n\n## Evaluation\n\n### Metrics\n| Label | Precision | Recall | F1 |\n|:--------|:----------|:-------|:-------|\n| **all** | 0.9339 | 0.9063 | 0.9199 |\n| long | 0.9314 | 0.8845 | 0.9074 |\n| short | 0.9352 | 0.9174 | 0.9262 |\n\n## Uses\n\n### Direct Use for Inference\n\n```python\nfrom span_marker import SpanMarkerModel\n\n# Download from the 🤗 Hub\nmodel = SpanMarkerModel.from_pretrained(\"tomaarsen/span-marker-bert-base-uncased-acronyms\")\n# Run inference\nentities = model.predict(\"compression algorithms like principal component analysis (pca) can reduce noise and complexity.\")\n```\n\n### Downstream Use\nYou can finetune this model on your own dataset.\n\n
Click to expand\n\n```python\nfrom span_marker import SpanMarkerModel, Trainer\n\n# Download from the 🤗 Hub\nmodel = SpanMarkerModel.from_pretrained(\"tomaarsen/span-marker-bert-base-uncased-acronyms\")\n\n# Specify a Dataset with \"tokens\" and \"ner_tag\" columns\ndataset = load_dataset(\"conll2003\") # For example CoNLL2003\n\n# Initialize a Trainer using the pretrained model & dataset\ntrainer = Trainer(\n model=model,\n train_dataset=dataset[\"train\"],\n eval_dataset=dataset[\"validation\"],\n)\ntrainer.train()\ntrainer.save_model(\"tomaarsen/span-marker-bert-base-uncased-acronyms-finetuned\")\n```\n
\n\n\n\n\n\n\n\n## Training Details\n\n### Training Set Metrics\n| Training set | Min | Median | Max |\n|:----------------------|:----|:--------|:----|\n| Sentence length | 4 | 32.3372 | 170 |\n| Entities per sentence | 0 | 2.6775 | 24 |\n\n### Training Hyperparameters\n- learning_rate: 5e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_ratio: 0.1\n- num_epochs: 2\n\n### Training Results\n| Epoch | Step | Validation Loss | Validation Precision | Validation Recall | Validation F1 | Validation Accuracy |\n|:------:|:----:|:---------------:|:--------------------:|:-----------------:|:-------------:|:-------------------:|\n| 0.3120 | 200 | 0.0097 | 0.8999 | 0.8731 | 0.8863 | 0.9718 |\n| 0.6240 | 400 | 0.0075 | 0.9163 | 0.8995 | 0.9078 | 0.9769 |\n| 0.9360 | 600 | 0.0076 | 0.9079 | 0.9153 | 0.9116 | 0.9773 |\n| 1.2480 | 800 | 0.0069 | 0.9267 | 0.9006 | 0.9135 | 0.9778 |\n| 1.5601 | 1000 | 0.0065 | 0.9268 | 0.9044 | 0.9154 | 0.9782 |\n| 1.8721 | 1200 | 0.0065 | 0.9279 | 0.9061 | 0.9168 | 0.9787 |\n\n### Environmental Impact\nCarbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon).\n- **Carbon Emitted**: 0.031 kg of CO2\n- **Hours Used**: 0.272 hours\n\n### Training Hardware\n- **On Cloud**: No\n- **GPU Model**: 1 x NVIDIA GeForce RTX 3090\n- **CPU Model**: 13th Gen Intel(R) Core(TM) i7-13700K\n- **RAM Size**: 31.78 GB\n\n### Framework Versions\n\n- Python: 3.9.16\n- SpanMarker: 1.3.1.dev\n- Transformers: 4.30.0\n- PyTorch: 2.0.1+cu118\n- Datasets: 2.14.0\n- Tokenizers: 0.13.2\n\n## Citation\n\n### BibTeX\n```\n@software{Aarsen_SpanMarker,\n author = {Aarsen, Tom},\n license = {Apache-2.0},\n title = {{SpanMarker for Named Entity Recognition}},\n url = {https://github.com/tomaarsen/SpanMarkerNER}\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"bert-base-uncased\", \"datasets\": [\"acronym_identification\"], \"language\": [\"en\"], \"library_name\": \"span-marker\", \"license\": \"apache-2.0\", \"metrics\": [\"precision\", \"recall\", \"f1\"], \"pipeline_tag\": \"token-classification\", \"tags\": [\"span-marker\", \"token-classification\", \"ner\", \"named-entity-recognition\", \"generated_from_span_marker_trainer\"], \"widget\": [{\"text\": \"here, da = direct assessment, rr = relative ranking, ds = discrete scale and cs = continuous scale.\", \"example_title\": \"Uncased 1\"}, {\"text\": \"modifying or replacing the erasable programmable read only memory (eprom) in a phone would allow the configuration of any esn and min via software for cellular devices.\", \"example_title\": \"Uncased 2\"}, {\"text\": \"we propose a technique called aggressive stochastic weight averaging (aswa) and an extension called norm-filtered aggressive stochastic weight averaging (naswa) which improves te stability of models over random seeds.\", \"example_title\": \"Uncased 3\"}, {\"text\": \"the choice of the encoder and decoder modules of dnpg can be quite flexible, for instance long-short term memory networks (lstm) or convolutional neural network (cnn).\", \"example_title\": \"Uncased 4\"}], \"co2_eq_emissions\": {\"emissions\": 31.203903222402037, \"source\": \"codecarbon\", \"training_type\": \"fine-tuning\", \"on_cloud\": false, \"cpu_model\": \"13th Gen Intel(R) Core(TM) i7-13700K\", \"ram_total_size\": 31.777088165283203, \"hours_used\": 0.272, \"hardware_used\": \"1 x NVIDIA GeForce RTX 3090\"}, \"model-index\": [{\"name\": \"SpanMarker with bert-base-uncased on Acronym Identification\", \"results\": [{\"task\": {\"type\": \"token-classification\", \"name\": \"Named Entity Recognition\"}, \"dataset\": {\"name\": \"Acronym Identification\", \"type\": \"acronym_identification\", \"split\": \"validation\"}, \"metrics\": [{\"type\": \"f1\", \"value\": 0.9198933333333332, \"name\": \"F1\"}, {\"type\": \"precision\", \"value\": 0.9339397877409573, \"name\": \"Precision\"}, {\"type\": \"recall\", \"value\": 0.9062631357713324, \"name\": \"Recall\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["NAMED_ENTITY_RECOGNITION","QUESTION_ANSWERING"],"string":"[\n \"NAMED_ENTITY_RECOGNITION\",\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45510,"string":"45,510"}}},{"rowIdx":43734,"cells":{"id":{"kind":"string","value":"microsoft/git-large-r-coco"},"author":{"kind":"string","value":"microsoft"},"task_category":{"kind":"string","value":"image-to-text"},"tags":{"kind":"list like","value":["transformers","pytorch","git","image-text-to-text","vision","image-captioning","image-to-text","en","arxiv:2205.14100","license:mit","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"git\",\n \"image-text-to-text\",\n \"vision\",\n \"image-captioning\",\n \"image-to-text\",\n \"en\",\n \"arxiv:2205.14100\",\n \"license:mit\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-01-22T19:04:40Z","string":"2023-01-22T19:04:40Z"},"last_modified":{"kind":"string","value":"2023-02-08T10:50:12+00:00"},"downloads":{"kind":"number","value":356,"string":"356"},"likes":{"kind":"number","value":10,"string":"10"},"README":{"kind":"string","value":"---\nlanguage: en\nlicense: mit\nmodel_name: microsoft/git-large-coco\npipeline_tag: image-to-text\ntags:\n- vision\n- image-captioning\n---\n\n# GIT (GenerativeImage2Text), large-sized, fine-tuned on COCO, R*\n\nR = re-trained by removing some offensive captions in cc12m dataset\n\nGIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on COCO. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text).\n\nDisclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team.\n\n## Model description\n\nGIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using \"teacher forcing\" on a lot of (image, text) pairs.\n\nThe goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens.\n\nThe model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token.\n\n![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg)\n\nThis allows the model to be used for tasks like:\n\n- image and video captioning\n- visual question answering (VQA) on images and videos\n- even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text).\n\n## Intended uses & limitations\n\nYou can use the raw model for image captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for\nfine-tuned versions on a task that interests you.\n\n### How to use\n\nFor code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/model_doc/git#transformers.GitForCausalLM.forward.example).\n\n## Training data\n\nFrom the paper:\n\n> We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions\n(CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016),\nConceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B\ndata following a similar collection procedure in Hu et al. (2021a).\n\n=> however this is for the model referred to as \"GIT\" in the paper, which is not open-sourced.\n\nThis checkpoint is \"GIT-large\", which is a smaller variant of GIT trained on 20 million image-text pairs.\n\nNext, the model was fine-tuned on COCO.\n\nSee table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details.\n\n### Preprocessing\n\nWe refer to the original repo regarding details for preprocessing during training.\n\nDuring validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation.\n\n## Evaluation results\n\nFor evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100)."},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# GIT (GenerativeImage2Text), large-sized, fine-tuned on COCO, R*\n\nR = re-trained by removing some offensive captions in cc12m dataset\n\nGIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on COCO. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text).\n\nDisclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team.\n\n## Model description\n\nGIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using \"teacher forcing\" on a lot of (image, text) pairs.\n\nThe goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens.\n\nThe model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token.\n\n![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg)\n\nThis allows the model to be used for tasks like:\n\n- image and video captioning\n- visual question answering (VQA) on images and videos\n- even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text).\n\n## Intended uses & limitations\n\nYou can use the raw model for image captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for\nfine-tuned versions on a task that interests you.\n\n### How to use\n\nFor code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/model_doc/git#transformers.GitForCausalLM.forward.example).\n\n## Training data\n\nFrom the paper:\n\n> We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions\n(CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016),\nConceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B\ndata following a similar collection procedure in Hu et al. (2021a).\n\n=> however this is for the model referred to as \"GIT\" in the paper, which is not open-sourced.\n\nThis checkpoint is \"GIT-large\", which is a smaller variant of GIT trained on 20 million image-text pairs.\n\nNext, the model was fine-tuned on COCO.\n\nSee table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details.\n\n### Preprocessing\n\nWe refer to the original repo regarding details for preprocessing during training.\n\nDuring validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation.\n\n## Evaluation results\n\nFor evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100)."},"metadata":{"kind":"string","value":"{\"language\": \"en\", \"license\": \"mit\", \"model_name\": \"microsoft/git-large-coco\", \"pipeline_tag\": \"image-to-text\", \"tags\": [\"vision\", \"image-captioning\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING"],"string":"[\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45511,"string":"45,511"}}},{"rowIdx":43735,"cells":{"id":{"kind":"string","value":"TransferGraph/neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news"},"author":{"kind":"string","value":"TransferGraph"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["peft","safetensors","parquet","text-classification","dataset:ag_news","base_model:neibla/distilbert-base-uncased-finetuned-emotion","base_model:adapter:neibla/distilbert-base-uncased-finetuned-emotion","license:apache-2.0","model-index","region:us"],"string":"[\n \"peft\",\n \"safetensors\",\n \"parquet\",\n \"text-classification\",\n \"dataset:ag_news\",\n \"base_model:neibla/distilbert-base-uncased-finetuned-emotion\",\n \"base_model:adapter:neibla/distilbert-base-uncased-finetuned-emotion\",\n \"license:apache-2.0\",\n \"model-index\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-27T22:48:53Z","string":"2024-02-27T22:48:53Z"},"last_modified":{"kind":"string","value":"2024-02-28T00:41:23+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: neibla/distilbert-base-uncased-finetuned-emotion\ndatasets:\n- ag_news\nlibrary_name: peft\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- parquet\n- text-classification\nmodel-index:\n- name: neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: ag_news\n type: ag_news\n config: default\n split: test\n args: default\n metrics:\n - type: accuracy\n value: 0.939078947368421\n name: accuracy\n---\n\n\n\n# neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news\n\nThis model is a fine-tuned version of [neibla/distilbert-base-uncased-finetuned-emotion](https://huggingface.co/neibla/distilbert-base-uncased-finetuned-emotion) on the ag_news dataset.\nIt achieves the following results on the evaluation set:\n- accuracy: 0.9391\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0004\n- train_batch_size: 24\n- eval_batch_size: 24\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 4\n\n### Training results\n\n| accuracy | train_loss | epoch |\n|:--------:|:----------:|:-----:|\n| 0.2512 | None | 0 |\n| 0.9261 | 0.2630 | 0 |\n| 0.9305 | 0.1988 | 1 |\n| 0.9357 | 0.1769 | 2 |\n| 0.9391 | 0.1612 | 3 |\n\n\n### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.37.2\n- Pytorch 2.2.0\n- Datasets 2.16.1\n- Tokenizers 0.15.2"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news\n\nThis model is a fine-tuned version of [neibla/distilbert-base-uncased-finetuned-emotion](https://huggingface.co/neibla/distilbert-base-uncased-finetuned-emotion) on the ag_news dataset.\nIt achieves the following results on the evaluation set:\n- accuracy: 0.9391\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0004\n- train_batch_size: 24\n- eval_batch_size: 24\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 4\n\n### Training results\n\n| accuracy | train_loss | epoch |\n|:--------:|:----------:|:-----:|\n| 0.2512 | None | 0 |\n| 0.9261 | 0.2630 | 0 |\n| 0.9305 | 0.1988 | 1 |\n| 0.9357 | 0.1769 | 2 |\n| 0.9391 | 0.1612 | 3 |\n\n\n### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.37.2\n- Pytorch 2.2.0\n- Datasets 2.16.1\n- Tokenizers 0.15.2"},"metadata":{"kind":"string","value":"{\"base_model\": \"neibla/distilbert-base-uncased-finetuned-emotion\", \"datasets\": [\"ag_news\"], \"library_name\": \"peft\", \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\"], \"tags\": [\"parquet\", \"text-classification\"], \"model-index\": [{\"name\": \"neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"ag_news\", \"type\": \"ag_news\", \"config\": \"default\", \"split\": \"test\", \"args\": \"default\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.939078947368421, \"name\": \"accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45512,"string":"45,512"}}},{"rowIdx":43736,"cells":{"id":{"kind":"string","value":"UKP-SQuARE/NewsQA_Adapter_RoBERTa"},"author":{"kind":"string","value":"UKP-SQuARE"},"task_category":{"kind":"string","value":"question-answering"},"tags":{"kind":"list like","value":["adapter-transformers","question-answering","en","dataset:mrqa","license:mit","region:us"],"string":"[\n \"adapter-transformers\",\n \"question-answering\",\n \"en\",\n \"dataset:mrqa\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-03-11T20:38:30Z","string":"2023-03-11T20:38:30Z"},"last_modified":{"kind":"string","value":"2023-03-12T10:17:33+00:00"},"downloads":{"kind":"number","value":8,"string":"8"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- mrqa\nlanguage:\n- en\nlibrary_name: adapter-transformers\nlicense: mit\nmetrics:\n- squad\npipeline_tag: question-answering\n---\n\n# Description\nThis is the single-dataset adapter for the NewsQA partition of the MRQA 2019 Shared Task Dataset. The adapter was created by Friedman et al. (2021) and should be used with the `roberta-base` encoder.\n\n\n\nThe UKP-SQuARE team created this model repository to simplify the deployment of this model on the UKP-SQuARE platform. The GitHub repository of the original authors is https://github.com/princeton-nlp/MADE\n\n# Usage\nThis model contains the same weights as https://huggingface.co/princeton-nlp/MADE/resolve/main/single_dataset_adapters/NewsQA/model.pt. The only difference is that our repository follows the standard format of AdapterHub. Therefore, you could load this model as follows:\n\n```\nfrom transformers import RobertaForQuestionAnswering, RobertaTokenizerFast\n\nmodel = RobertaForQuestionAnswering.from_pretrained(\"roberta-base\")\nmodel.load_adapter(\"UKP-SQuARE/NewsQA_Adapter_RoBERTa\", source=\"hf\")\nmodel.set_active_adapters(\"NewsQA\")\n\ntokenizer = RobertaTokenizerFast.from_pretrained('roberta-base')\n\npipe = pipeline(\"question-answering\", model=model, tokenizer=tokenizer)\npipe({\"question\": \"What is the capital of Germany?\", \"context\": \"The capital of Germany is Berlin.\"})\n```\n\nNote you need the adapter-transformers library https://adapterhub.ml\n\n# Evaluation\nFriedman et al. report an F1 score of **70.9 on NewsQA**.\n\nPlease refer to the original publication for more information.\n\n# Citation\nSingle-dataset Experts for Multi-dataset Question Answering (Friedman et al., EMNLP 2021)"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Description\nThis is the single-dataset adapter for the NewsQA partition of the MRQA 2019 Shared Task Dataset. The adapter was created by Friedman et al. (2021) and should be used with the `roberta-base` encoder.\n\n\n\nThe UKP-SQuARE team created this model repository to simplify the deployment of this model on the UKP-SQuARE platform. The GitHub repository of the original authors is https://github.com/princeton-nlp/MADE\n\n# Usage\nThis model contains the same weights as https://huggingface.co/princeton-nlp/MADE/resolve/main/single_dataset_adapters/NewsQA/model.pt. The only difference is that our repository follows the standard format of AdapterHub. Therefore, you could load this model as follows:\n\n```\nfrom transformers import RobertaForQuestionAnswering, RobertaTokenizerFast\n\nmodel = RobertaForQuestionAnswering.from_pretrained(\"roberta-base\")\nmodel.load_adapter(\"UKP-SQuARE/NewsQA_Adapter_RoBERTa\", source=\"hf\")\nmodel.set_active_adapters(\"NewsQA\")\n\ntokenizer = RobertaTokenizerFast.from_pretrained('roberta-base')\n\npipe = pipeline(\"question-answering\", model=model, tokenizer=tokenizer)\npipe({\"question\": \"What is the capital of Germany?\", \"context\": \"The capital of Germany is Berlin.\"})\n```\n\nNote you need the adapter-transformers library https://adapterhub.ml\n\n# Evaluation\nFriedman et al. report an F1 score of **70.9 on NewsQA**.\n\nPlease refer to the original publication for more information.\n\n# Citation\nSingle-dataset Experts for Multi-dataset Question Answering (Friedman et al., EMNLP 2021)"},"metadata":{"kind":"string","value":"{\"datasets\": [\"mrqa\"], \"language\": [\"en\"], \"library_name\": \"adapter-transformers\", \"license\": \"mit\", \"metrics\": [\"squad\"], \"pipeline_tag\": \"question-answering\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING"],"string":"[\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45513,"string":"45,513"}}},{"rowIdx":43737,"cells":{"id":{"kind":"string","value":"TheOriginalMarcelo/my_awesome_model"},"author":{"kind":"string","value":"TheOriginalMarcelo"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","distilbert","text-classification","generated_from_trainer","dataset:imdb","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:imdb\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-11-29T22:14:44Z","string":"2023-11-29T22:14:44Z"},"last_modified":{"kind":"string","value":"2023-11-29T22:53:37+00:00"},"downloads":{"kind":"number","value":103,"string":"103"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: distilbert-base-uncased\ndatasets:\n- imdb\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: my_awesome_model\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: imdb\n type: imdb\n config: plain_text\n split: test\n args: plain_text\n metrics:\n - type: accuracy\n value: 0.92688\n name: Accuracy\n---\n\n\n\n# my_awesome_model\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.1955\n- Accuracy: 0.9269\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 0.2164 | 1.0 | 1563 | 0.1955 | 0.9269 |\n\n\n### Framework versions\n\n- Transformers 4.36.0.dev0\n- Pytorch 2.1.0+cu118\n- Datasets 2.15.0\n- Tokenizers 0.15.0\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# my_awesome_model\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.1955\n- Accuracy: 0.9269\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 0.2164 | 1.0 | 1563 | 0.1955 | 0.9269 |\n\n\n### Framework versions\n\n- Transformers 4.36.0.dev0\n- Pytorch 2.1.0+cu118\n- Datasets 2.15.0\n- Tokenizers 0.15.0\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"distilbert-base-uncased\", \"datasets\": [\"imdb\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"my_awesome_model\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"imdb\", \"type\": \"imdb\", \"config\": \"plain_text\", \"split\": \"test\", \"args\": \"plain_text\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.92688, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45514,"string":"45,514"}}},{"rowIdx":43738,"cells":{"id":{"kind":"string","value":"TransferGraph/socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony"},"author":{"kind":"string","value":"TransferGraph"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["peft","safetensors","parquet","text-classification","dataset:tweet_eval","base_model:socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased","base_model:adapter:socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased","model-index","region:us"],"string":"[\n \"peft\",\n \"safetensors\",\n \"parquet\",\n \"text-classification\",\n \"dataset:tweet_eval\",\n \"base_model:socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased\",\n \"base_model:adapter:socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased\",\n \"model-index\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-27T17:29:45Z","string":"2024-02-27T17:29:45Z"},"last_modified":{"kind":"string","value":"2024-02-29T13:36:48+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased\ndatasets:\n- tweet_eval\nlibrary_name: peft\nmetrics:\n- accuracy\ntags:\n- parquet\n- text-classification\nmodel-index:\n- name: socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: tweet_eval\n type: tweet_eval\n config: irony\n split: validation\n args: irony\n metrics:\n - type: accuracy\n value: 0.5916230366492147\n name: accuracy\n---\n\n\n\n# socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony\n\nThis model is a fine-tuned version of [socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased](https://huggingface.co/socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased) on the tweet_eval dataset.\nIt achieves the following results on the evaluation set:\n- accuracy: 0.5916\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0005\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 8\n\n### Training results\n\n| accuracy | train_loss | epoch |\n|:--------:|:----------:|:-----:|\n| 0.4963 | None | 0 |\n| 0.5204 | 0.7997 | 0 |\n| 0.5497 | 0.6917 | 1 |\n| 0.5696 | 0.6823 | 2 |\n| 0.5770 | 0.6693 | 3 |\n| 0.5791 | 0.6611 | 4 |\n| 0.5948 | 0.6538 | 5 |\n| 0.5874 | 0.6487 | 6 |\n| 0.5916 | 0.6428 | 7 |\n\n\n### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.37.2\n- Pytorch 2.2.0\n- Datasets 2.16.1\n- Tokenizers 0.15.2"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony\n\nThis model is a fine-tuned version of [socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased](https://huggingface.co/socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased) on the tweet_eval dataset.\nIt achieves the following results on the evaluation set:\n- accuracy: 0.5916\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0005\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 8\n\n### Training results\n\n| accuracy | train_loss | epoch |\n|:--------:|:----------:|:-----:|\n| 0.4963 | None | 0 |\n| 0.5204 | 0.7997 | 0 |\n| 0.5497 | 0.6917 | 1 |\n| 0.5696 | 0.6823 | 2 |\n| 0.5770 | 0.6693 | 3 |\n| 0.5791 | 0.6611 | 4 |\n| 0.5948 | 0.6538 | 5 |\n| 0.5874 | 0.6487 | 6 |\n| 0.5916 | 0.6428 | 7 |\n\n\n### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.37.2\n- Pytorch 2.2.0\n- Datasets 2.16.1\n- Tokenizers 0.15.2"},"metadata":{"kind":"string","value":"{\"base_model\": \"socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased\", \"datasets\": [\"tweet_eval\"], \"library_name\": \"peft\", \"metrics\": [\"accuracy\"], \"tags\": [\"parquet\", \"text-classification\"], \"model-index\": [{\"name\": \"socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"tweet_eval\", \"type\": \"tweet_eval\", \"config\": \"irony\", \"split\": \"validation\", \"args\": \"irony\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.5916230366492147, \"name\": \"accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45515,"string":"45,515"}}},{"rowIdx":43739,"cells":{"id":{"kind":"string","value":"magicsword/wy-mt-en-zh-2"},"author":{"kind":"string","value":"magicsword"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","safetensors","marian","text2text-generation","autotrain","translation","unk","dataset:magicsword/autotrain-data-wy-mt-en-zh","co2_eq_emissions","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"safetensors\",\n \"marian\",\n \"text2text-generation\",\n \"autotrain\",\n \"translation\",\n \"unk\",\n \"dataset:magicsword/autotrain-data-wy-mt-en-zh\",\n \"co2_eq_emissions\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-07-16T15:15:50Z","string":"2023-07-16T15:15:50Z"},"last_modified":{"kind":"string","value":"2023-07-16T17:27:39+00:00"},"downloads":{"kind":"number","value":12,"string":"12"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- magicsword/autotrain-data-wy-mt-en-zh\nlanguage:\n- unk\n- unk\ntags:\n- autotrain\n- translation\nco2_eq_emissions:\n emissions: 71.14399741050826\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Translation\n- Model ID: 74981139786\n- CO2 Emissions (in grams): 71.1440\n\n## Validation Metrics\n\n- Loss: 2.220\n- SacreBLEU: 12.949\n- Gen len: 16.386"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Model Trained Using AutoTrain\n\n- Problem type: Translation\n- Model ID: 74981139786\n- CO2 Emissions (in grams): 71.1440\n\n## Validation Metrics\n\n- Loss: 2.220\n- SacreBLEU: 12.949\n- Gen len: 16.386"},"metadata":{"kind":"string","value":"{\"datasets\": [\"magicsword/autotrain-data-wy-mt-en-zh\"], \"language\": [\"unk\", \"unk\"], \"tags\": [\"autotrain\", \"translation\"], \"co2_eq_emissions\": {\"emissions\": 71.14399741050826}}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45516,"string":"45,516"}}},{"rowIdx":43740,"cells":{"id":{"kind":"string","value":"Lots-of-LoRAs/Mistral-7B-Instruct-v0.2-4b-r16-task1652"},"author":{"kind":"string","value":"Lots-of-LoRAs"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["pytorch","safetensors","en","arxiv:1910.09700","arxiv:2407.00066","base_model:mistralai/Mistral-7B-Instruct-v0.2","base_model:finetune:mistralai/Mistral-7B-Instruct-v0.2","license:mit","region:us"],"string":"[\n \"pytorch\",\n \"safetensors\",\n \"en\",\n \"arxiv:1910.09700\",\n \"arxiv:2407.00066\",\n \"base_model:mistralai/Mistral-7B-Instruct-v0.2\",\n \"base_model:finetune:mistralai/Mistral-7B-Instruct-v0.2\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-01-02T14:24:05Z","string":"2025-01-02T14:24:05Z"},"last_modified":{"kind":"string","value":"2025-01-02T14:24:11+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: mistralai/Mistral-7B-Instruct-v0.2\nlanguage: en\nlibrary_name: pytorch\nlicense: mit\n---\n\n# Model Card for Mistral-7B-Instruct-v0.2-4b-r16-task1652\n\n\n\n\n\n## Model Details\n\n### Model Description\n\n\n\nLoRA trained on task1652_opus_books_ca-en_translation\n\n- **Developed by:** bruel\n- **Funded by [optional]:** [More Information Needed]\n- **Shared by [optional]:** [More Information Needed]\n- **Model type:** LoRA\n- **Language(s) (NLP):** en\n- **License:** mit\n- **Finetuned from model [optional]:** mistralai/Mistral-7B-Instruct-v0.2\n\n### Model Sources [optional]\n\n\n\n- **Repository:** https://github.com/bruel-gabrielsson\n- **Paper [optional]:** \"Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead\" (2024), Rickard Brüel Gabrielsson, Jiacheng Zhu, Onkar Bhardwaj, Leshem Choshen, Kristjan Greenewald, Mikhail Yurochkin and Justin Solomon\n- **Demo [optional]:** [More Information Needed]\n\n## Uses\n\n\n\n### Direct Use\n\n\n\n[More Information Needed]\n\n### Downstream Use [optional]\n\n\n\n[More Information Needed]\n\n### Out-of-Scope Use\n\n\n\n[More Information Needed]\n\n## Bias, Risks, and Limitations\n\n\n\n[More Information Needed]\n\n### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.\n\n## How to Get Started with the Model\n\nUse the code below to get started with the model.\n\n[More Information Needed]\n\n## Training Details\n\n### Training Data\n\n\n\nhttps://huggingface.co/datasets/Lots-of-LoRAs/task1652_opus_books_ca-en_translation sourced from https://github.com/allenai/natural-instructions\n\n### Training Procedure\n\n\n\n#### Preprocessing [optional]\n\n[More Information Needed]\n\n\n#### Training Hyperparameters\n\n- **Training regime:** [More Information Needed] \n\n#### Speeds, Sizes, Times [optional]\n\n\n\n[More Information Needed]\n\n## Evaluation\n\n\n\n### Testing Data, Factors & Metrics\n\n#### Testing Data\n\n\n\n[More Information Needed]\n\n#### Factors\n\n\n\n[More Information Needed]\n\n#### Metrics\n\n\n\n[More Information Needed]\n\n### Results\n\n[More Information Needed]\n\n#### Summary\n\n\n\n## Model Examination [optional]\n\n\n\n[More Information Needed]\n\n## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).\n\n- **Hardware Type:** [More Information Needed]\n- **Hours used:** [More Information Needed]\n- **Cloud Provider:** [More Information Needed]\n- **Compute Region:** [More Information Needed]\n- **Carbon Emitted:** [More Information Needed]\n\n## Technical Specifications [optional]\n\n### Model Architecture and Objective\n\n[More Information Needed]\n\n### Compute Infrastructure\n\n[More Information Needed]\n\n#### Hardware\n\n[More Information Needed]\n\n#### Software\n\n[More Information Needed]\n\n## Citation [optional]\n\n\n\n**BibTeX:**\n\n@misc{brüelgabrielsson2024compressserveservingthousands,\n title={Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead}, \n author={Rickard Brüel-Gabrielsson and Jiacheng Zhu and Onkar Bhardwaj and Leshem Choshen and Kristjan Greenewald and Mikhail Yurochkin and Justin Solomon},\n year={2024},\n eprint={2407.00066},\n archivePrefix={arXiv},\n primaryClass={cs.DC},\n url={https://arxiv.org/abs/2407.00066}, \n}\n\n**APA:**\n\n[More Information Needed]\n\n## Glossary [optional]\n\n\n\n[More Information Needed]\n\n## More Information [optional]\n\n[More Information Needed]\n\n## Model Card Authors [optional]\n\n[More Information Needed]\n\n## Model Card Contact\n\n[More Information Needed]"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Model Card for Mistral-7B-Instruct-v0.2-4b-r16-task1652\n\n\n\n\n\n## Model Details\n\n### Model Description\n\n\n\nLoRA trained on task1652_opus_books_ca-en_translation\n\n- **Developed by:** bruel\n- **Funded by [optional]:** [More Information Needed]\n- **Shared by [optional]:** [More Information Needed]\n- **Model type:** LoRA\n- **Language(s) (NLP):** en\n- **License:** mit\n- **Finetuned from model [optional]:** mistralai/Mistral-7B-Instruct-v0.2\n\n### Model Sources [optional]\n\n\n\n- **Repository:** https://github.com/bruel-gabrielsson\n- **Paper [optional]:** \"Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead\" (2024), Rickard Brüel Gabrielsson, Jiacheng Zhu, Onkar Bhardwaj, Leshem Choshen, Kristjan Greenewald, Mikhail Yurochkin and Justin Solomon\n- **Demo [optional]:** [More Information Needed]\n\n## Uses\n\n\n\n### Direct Use\n\n\n\n[More Information Needed]\n\n### Downstream Use [optional]\n\n\n\n[More Information Needed]\n\n### Out-of-Scope Use\n\n\n\n[More Information Needed]\n\n## Bias, Risks, and Limitations\n\n\n\n[More Information Needed]\n\n### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.\n\n## How to Get Started with the Model\n\nUse the code below to get started with the model.\n\n[More Information Needed]\n\n## Training Details\n\n### Training Data\n\n\n\nhttps://huggingface.co/datasets/Lots-of-LoRAs/task1652_opus_books_ca-en_translation sourced from https://github.com/allenai/natural-instructions\n\n### Training Procedure\n\n\n\n#### Preprocessing [optional]\n\n[More Information Needed]\n\n\n#### Training Hyperparameters\n\n- **Training regime:** [More Information Needed] \n\n#### Speeds, Sizes, Times [optional]\n\n\n\n[More Information Needed]\n\n## Evaluation\n\n\n\n### Testing Data, Factors & Metrics\n\n#### Testing Data\n\n\n\n[More Information Needed]\n\n#### Factors\n\n\n\n[More Information Needed]\n\n#### Metrics\n\n\n\n[More Information Needed]\n\n### Results\n\n[More Information Needed]\n\n#### Summary\n\n\n\n## Model Examination [optional]\n\n\n\n[More Information Needed]\n\n## Environmental Impact\n\n\n\nCarbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).\n\n- **Hardware Type:** [More Information Needed]\n- **Hours used:** [More Information Needed]\n- **Cloud Provider:** [More Information Needed]\n- **Compute Region:** [More Information Needed]\n- **Carbon Emitted:** [More Information Needed]\n\n## Technical Specifications [optional]\n\n### Model Architecture and Objective\n\n[More Information Needed]\n\n### Compute Infrastructure\n\n[More Information Needed]\n\n#### Hardware\n\n[More Information Needed]\n\n#### Software\n\n[More Information Needed]\n\n## Citation [optional]\n\n\n\n**BibTeX:**\n\n@misc{brüelgabrielsson2024compressserveservingthousands,\n title={Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead}, \n author={Rickard Brüel-Gabrielsson and Jiacheng Zhu and Onkar Bhardwaj and Leshem Choshen and Kristjan Greenewald and Mikhail Yurochkin and Justin Solomon},\n year={2024},\n eprint={2407.00066},\n archivePrefix={arXiv},\n primaryClass={cs.DC},\n url={https://arxiv.org/abs/2407.00066}, \n}\n\n**APA:**\n\n[More Information Needed]\n\n## Glossary [optional]\n\n\n\n[More Information Needed]\n\n## More Information [optional]\n\n[More Information Needed]\n\n## Model Card Authors [optional]\n\n[More Information Needed]\n\n## Model Card Contact\n\n[More Information Needed]"},"metadata":{"kind":"string","value":"{\"base_model\": \"mistralai/Mistral-7B-Instruct-v0.2\", \"language\": \"en\", \"library_name\": \"pytorch\", \"license\": \"mit\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45517,"string":"45,517"}}},{"rowIdx":43741,"cells":{"id":{"kind":"string","value":"yannryanhelsinki/setfit-language-guess"},"author":{"kind":"string","value":"yannryanhelsinki"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["setfit","safetensors","mpnet","sentence-transformers","text-classification","generated_from_setfit_trainer","arxiv:2209.11055","base_model:sentence-transformers/paraphrase-mpnet-base-v2","base_model:finetune:sentence-transformers/paraphrase-mpnet-base-v2","model-index","region:us"],"string":"[\n \"setfit\",\n \"safetensors\",\n \"mpnet\",\n \"sentence-transformers\",\n \"text-classification\",\n \"generated_from_setfit_trainer\",\n \"arxiv:2209.11055\",\n \"base_model:sentence-transformers/paraphrase-mpnet-base-v2\",\n \"base_model:finetune:sentence-transformers/paraphrase-mpnet-base-v2\",\n \"model-index\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-27T08:17:52Z","string":"2024-06-27T08:17:52Z"},"last_modified":{"kind":"string","value":"2024-06-27T08:18:21+00:00"},"downloads":{"kind":"number","value":4,"string":"4"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: sentence-transformers/paraphrase-mpnet-base-v2\nlibrary_name: setfit\nmetrics:\n- accuracy\npipeline_tag: text-classification\ntags:\n- setfit\n- sentence-transformers\n- text-classification\n- generated_from_setfit_trainer\nwidget:\n- text: 'Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui'\n- text: Ioannis Lodovici Vivis Von Underweÿsung ayner christlichen Frauwen drey Bücher\n ...erklärt unnd verteütscht. Durch Christophorum Brunonem .../Von Underweysung\n ayner christlichen Frauwen drey Bücher\n- text: Absolvtissimae in Hebraicam lingvam institvtiones accvratissime in vsvm studiosæ\n juuentutis conscriptæ ...Avtore Iohanne Isaaco Leuita Germano/Absolutissimae in\n Hebraicam linguam institutiones accuratissime in usum studiosæ juventutis conscriptæ\n ... Autore Iohanne Isaaco Levita Germano\n- text: In tertiam partem D. Thomæ Aqvinatis commentaria Ioannis Wiggers ... a quæstione\n I. vsque ad quæstionem XXVI. de verbo incarnatoIn tertiam partem D. Thomae Aquinatis\n commentaria Ioannis Wiggers ... a quaestione I. usque ad quaestionem XXVI. de\n verbo incarnato\n- text: Tabvla in grammaticen Hebræam,authore Nicolao Clenardo. A Iohanne Quinquarboreo\n Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata./Tabula\n in grammaticen Hebraeam, authore Nicolao Clenardo. A Johanne Quinquarboreo Aurilacensi\n à mendis quibus scatebat repurgata, & annotationibus illustrata\ninference: true\nmodel-index:\n- name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: Unknown\n type: unknown\n split: test\n metrics:\n - type: accuracy\n value: 0.735\n name: Accuracy\n---\n\n# SetFit with sentence-transformers/paraphrase-mpnet-base-v2\n\nThis is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.\n\nThe model has been trained using an efficient few-shot learning technique that involves:\n\n1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.\n2. Training a classification head with features from the fine-tuned Sentence Transformer.\n\n## Model Details\n\n### Model Description\n- **Model Type:** SetFit\n- **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2)\n- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance\n- **Maximum Sequence Length:** 512 tokens\n- **Number of Classes:** 2 classes\n\n\n\n\n### Model Sources\n\n- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)\n- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)\n- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)\n\n### Model Labels\n| Label | Examples |\n|:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| no |
  • 'Exomologesis sive Modus confitendi,per Erasmum Roterodamũ .../Exomologesis sive modus confitendi per Erasmum Roterodamum'
  • 'Aen-wysinge van de macht en de eer die aen Jesus-Christus toe-komt. En van de eerbiedinghe die-men schuldigh is aen sijn aldersuyverste moeder Maria, en andere heyligen.'
  • 'Staatkundige vermaningen en voorbeelden, die de deughden en zonden der vorsten betreffen.Nieuwelijks door I.H. Glazemaker vertaalt.'
|\n| yes |
  • 'Reclamations des trois états du duché de Brabant sur les atteintes portées a leurs droits et loix constitutionnelles au nom de S.M. Joseph II.'
  • 'Brief van het Magistraet van Brugge van date 16 February 1788 aen de ordinaire Gedeputeerde der Staeten van Vlaenderen tenderende om staets gewyze te doen naedere Representatie tegen de opregtinge van een Seminarie Generael tot Loven ...'
  • \"Bericht voor d'Universiteyt &c. van Leuven, over de wijtloopige memorie, en andere schriften en documenten daer by, overgegeven aen haer Ho. Mog. door de vicarissen van Doornik\"
|\n\n## Evaluation\n\n### Metrics\n| Label | Accuracy |\n|:--------|:---------|\n| **all** | 0.735 |\n\n## Uses\n\n### Direct Use for Inference\n\nFirst install the SetFit library:\n\n```bash\npip install setfit\n```\n\nThen you can load this model and run inference.\n\n```python\nfrom setfit import SetFitModel\n\n# Download from the 🤗 Hub\nmodel = SetFitModel.from_pretrained(\"yannryanhelsinki/setfit-language-guess\")\n# Run inference\npreds = model(\"Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui\")\n```\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Set Metrics\n| Training set | Min | Median | Max |\n|:-------------|:----|:--------|:----|\n| Word count | 5 | 29.2759 | 92 |\n\n| Label | Training Sample Count |\n|:------|:----------------------|\n| no | 44 |\n| yes | 72 |\n\n### Training Hyperparameters\n- batch_size: (16, 16)\n- num_epochs: (1, 1)\n- max_steps: -1\n- sampling_strategy: oversampling\n- num_iterations: 20\n- body_learning_rate: (2e-05, 2e-05)\n- head_learning_rate: 2e-05\n- loss: CosineSimilarityLoss\n- distance_metric: cosine_distance\n- margin: 0.25\n- end_to_end: False\n- use_amp: False\n- warmup_proportion: 0.1\n- seed: 42\n- eval_max_steps: -1\n- load_best_model_at_end: False\n\n### Training Results\n| Epoch | Step | Training Loss | Validation Loss |\n|:------:|:----:|:-------------:|:---------------:|\n| 0.0034 | 1 | 0.2242 | - |\n| 0.1724 | 50 | 0.1951 | - |\n| 0.3448 | 100 | 0.0342 | - |\n| 0.5172 | 150 | 0.0008 | - |\n| 0.6897 | 200 | 0.0006 | - |\n| 0.8621 | 250 | 0.0003 | - |\n\n### Framework Versions\n- Python: 3.10.12\n- SetFit: 1.0.3\n- Sentence Transformers: 3.0.1\n- Transformers: 4.39.0\n- PyTorch: 2.3.0+cu121\n- Datasets: 2.20.0\n- Tokenizers: 0.15.2\n\n## Citation\n\n### BibTeX\n```bibtex\n@article{https://doi.org/10.48550/arxiv.2209.11055,\n doi = {10.48550/ARXIV.2209.11055},\n url = {https://arxiv.org/abs/2209.11055},\n author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},\n keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},\n title = {Efficient Few-Shot Learning Without Prompts},\n publisher = {arXiv},\n year = {2022},\n copyright = {Creative Commons Attribution 4.0 International}\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SetFit with sentence-transformers/paraphrase-mpnet-base-v2\n\nThis is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification.\n\nThe model has been trained using an efficient few-shot learning technique that involves:\n\n1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.\n2. Training a classification head with features from the fine-tuned Sentence Transformer.\n\n## Model Details\n\n### Model Description\n- **Model Type:** SetFit\n- **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2)\n- **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance\n- **Maximum Sequence Length:** 512 tokens\n- **Number of Classes:** 2 classes\n\n\n\n\n### Model Sources\n\n- **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit)\n- **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055)\n- **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit)\n\n### Model Labels\n| Label | Examples |\n|:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n| no |
  • 'Exomologesis sive Modus confitendi,per Erasmum Roterodamũ .../Exomologesis sive modus confitendi per Erasmum Roterodamum'
  • 'Aen-wysinge van de macht en de eer die aen Jesus-Christus toe-komt. En van de eerbiedinghe die-men schuldigh is aen sijn aldersuyverste moeder Maria, en andere heyligen.'
  • 'Staatkundige vermaningen en voorbeelden, die de deughden en zonden der vorsten betreffen.Nieuwelijks door I.H. Glazemaker vertaalt.'
|\n| yes |
  • 'Reclamations des trois états du duché de Brabant sur les atteintes portées a leurs droits et loix constitutionnelles au nom de S.M. Joseph II.'
  • 'Brief van het Magistraet van Brugge van date 16 February 1788 aen de ordinaire Gedeputeerde der Staeten van Vlaenderen tenderende om staets gewyze te doen naedere Representatie tegen de opregtinge van een Seminarie Generael tot Loven ...'
  • \"Bericht voor d'Universiteyt &c. van Leuven, over de wijtloopige memorie, en andere schriften en documenten daer by, overgegeven aen haer Ho. Mog. door de vicarissen van Doornik\"
|\n\n## Evaluation\n\n### Metrics\n| Label | Accuracy |\n|:--------|:---------|\n| **all** | 0.735 |\n\n## Uses\n\n### Direct Use for Inference\n\nFirst install the SetFit library:\n\n```bash\npip install setfit\n```\n\nThen you can load this model and run inference.\n\n```python\nfrom setfit import SetFitModel\n\n# Download from the 🤗 Hub\nmodel = SetFitModel.from_pretrained(\"yannryanhelsinki/setfit-language-guess\")\n# Run inference\npreds = model(\"Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui\")\n```\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Set Metrics\n| Training set | Min | Median | Max |\n|:-------------|:----|:--------|:----|\n| Word count | 5 | 29.2759 | 92 |\n\n| Label | Training Sample Count |\n|:------|:----------------------|\n| no | 44 |\n| yes | 72 |\n\n### Training Hyperparameters\n- batch_size: (16, 16)\n- num_epochs: (1, 1)\n- max_steps: -1\n- sampling_strategy: oversampling\n- num_iterations: 20\n- body_learning_rate: (2e-05, 2e-05)\n- head_learning_rate: 2e-05\n- loss: CosineSimilarityLoss\n- distance_metric: cosine_distance\n- margin: 0.25\n- end_to_end: False\n- use_amp: False\n- warmup_proportion: 0.1\n- seed: 42\n- eval_max_steps: -1\n- load_best_model_at_end: False\n\n### Training Results\n| Epoch | Step | Training Loss | Validation Loss |\n|:------:|:----:|:-------------:|:---------------:|\n| 0.0034 | 1 | 0.2242 | - |\n| 0.1724 | 50 | 0.1951 | - |\n| 0.3448 | 100 | 0.0342 | - |\n| 0.5172 | 150 | 0.0008 | - |\n| 0.6897 | 200 | 0.0006 | - |\n| 0.8621 | 250 | 0.0003 | - |\n\n### Framework Versions\n- Python: 3.10.12\n- SetFit: 1.0.3\n- Sentence Transformers: 3.0.1\n- Transformers: 4.39.0\n- PyTorch: 2.3.0+cu121\n- Datasets: 2.20.0\n- Tokenizers: 0.15.2\n\n## Citation\n\n### BibTeX\n```bibtex\n@article{https://doi.org/10.48550/arxiv.2209.11055,\n doi = {10.48550/ARXIV.2209.11055},\n url = {https://arxiv.org/abs/2209.11055},\n author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},\n keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},\n title = {Efficient Few-Shot Learning Without Prompts},\n publisher = {arXiv},\n year = {2022},\n copyright = {Creative Commons Attribution 4.0 International}\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"sentence-transformers/paraphrase-mpnet-base-v2\", \"library_name\": \"setfit\", \"metrics\": [\"accuracy\"], \"pipeline_tag\": \"text-classification\", \"tags\": [\"setfit\", \"sentence-transformers\", \"text-classification\", \"generated_from_setfit_trainer\"], \"widget\": [{\"text\": \"Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui\"}, {\"text\": \"Ioannis Lodovici Vivis Von Underweÿsung ayner christlichen Frauwen drey Bücher ...erklärt unnd verteütscht. Durch Christophorum Brunonem .../Von Underweysung ayner christlichen Frauwen drey Bücher\"}, {\"text\": \"Absolvtissimae in Hebraicam lingvam institvtiones accvratissime in vsvm studiosæ juuentutis conscriptæ ...Avtore Iohanne Isaaco Leuita Germano/Absolutissimae in Hebraicam linguam institutiones accuratissime in usum studiosæ juventutis conscriptæ ... Autore Iohanne Isaaco Levita Germano\"}, {\"text\": \"In tertiam partem D. Thomæ Aqvinatis commentaria Ioannis Wiggers ... a quæstione I. vsque ad quæstionem XXVI. de verbo incarnatoIn tertiam partem D. Thomae Aquinatis commentaria Ioannis Wiggers ... a quaestione I. usque ad quaestionem XXVI. de verbo incarnato\"}, {\"text\": \"Tabvla in grammaticen Hebræam,authore Nicolao Clenardo. A Iohanne Quinquarboreo Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata./Tabula in grammaticen Hebraeam, authore Nicolao Clenardo. A Johanne Quinquarboreo Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata\"}], \"inference\": true, \"model-index\": [{\"name\": \"SetFit with sentence-transformers/paraphrase-mpnet-base-v2\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"Unknown\", \"type\": \"unknown\", \"split\": \"test\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.735, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45518,"string":"45,518"}}},{"rowIdx":43742,"cells":{"id":{"kind":"string","value":"mrm8488/t5-base-finetuned-emotion"},"author":{"kind":"string","value":"mrm8488"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","jax","t5","text2text-generation","en","dataset:emotion","arxiv:1910.10683","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"jax\",\n \"t5\",\n \"text2text-generation\",\n \"en\",\n \"dataset:emotion\",\n \"arxiv:1910.10683\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-03-02T23:29:05Z","string":"2022-03-02T23:29:05Z"},"last_modified":{"kind":"string","value":"2021-06-23T12:46:24+00:00"},"downloads":{"kind":"number","value":8106,"string":"8,106"},"likes":{"kind":"number","value":52,"string":"52"},"README":{"kind":"string","value":"---\ndatasets:\n- emotion\nlanguage: en\nwidget:\n- text: I wish you were here but it is impossible\n---\n\n# T5-base fine-tuned for Emotion Recognition 😂😢😡😃😯\n\n\n[Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) base fine-tuned on [emotion recognition](https://github.com/dair-ai/emotion_dataset) dataset for **Emotion Recognition** downstream task.\n\n## Details of T5\n\nThe **T5** model was presented in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) by *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* in Here the abstract:\n\nTransfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new “Colossal Clean Crawled Corpus”, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.\n\n![model image](https://i.imgur.com/jVFMMWR.png)\n\n## Details of the downstream task (Sentiment Recognition) - Dataset 📚\n\n[Elvis Saravia](https://twitter.com/omarsar0) has gathered a great [dataset](https://github.com/dair-ai/emotion_dataset) for emotion recognition. It allows to classifiy the text into one of the following **6** emotions:\n\n - sadness 😢\n - joy 😃\n - love 🥰\n - anger 😡\n - fear 😱\n - surprise 😯\n\n## Model fine-tuning 🏋️‍\n\nThe training script is a slightly modified version of [this Colab Notebook](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) created by [Suraj Patil](https://github.com/patil-suraj), so all credits to him!\n\n## Test set metrics 🧾\n\n| |precision | recall | f1-score |support|\n|----------|----------|---------|----------|-------|\n|anger | 0.93| 0.92| 0.93| 275|\n|fear | 0.91| 0.87| 0.89| 224|\n|joy | 0.97| 0.94| 0.95| 695|\n|love | 0.80| 0.91| 0.85| 159|\n|sadness | 0.97| 0.97| 0.97| 521|\n|surpirse | 0.73| 0.89| 0.80| 66|\n| |\n|accuracy| | | 0.93| 2000|\n|macro avg| 0.89| 0.92| 0.90| 2000|\n|weighted avg| 0.94| 0.93| 0.93| 2000|\n \n \n \n \n\n\n\n## Model in Action 🚀\n\n```python\nfrom transformers import AutoTokenizer, AutoModelWithLMHead\n\ntokenizer = AutoTokenizer.from_pretrained(\"mrm8488/t5-base-finetuned-emotion\")\n\nmodel = AutoModelWithLMHead.from_pretrained(\"mrm8488/t5-base-finetuned-emotion\")\n\ndef get_emotion(text):\n input_ids = tokenizer.encode(text + '', return_tensors='pt')\n\n output = model.generate(input_ids=input_ids,\n max_length=2)\n \n dec = [tokenizer.decode(ids) for ids in output]\n label = dec[0]\n return label\n \n get_emotion(\"i feel as if i havent blogged in ages are at least truly blogged i am doing an update cute\") # Output: 'joy'\n \n get_emotion(\"i have a feeling i kinda lost my best friend\") # Output: 'sadness'\n```\n\n> Created by [Manuel Romero/@mrm8488](https://twitter.com/mrm8488) | [LinkedIn](https://www.linkedin.com/in/manuel-romero-cs/)\n\n> Made with ♥ in Spain\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# T5-base fine-tuned for Emotion Recognition 😂😢😡😃😯\n\n\n[Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) base fine-tuned on [emotion recognition](https://github.com/dair-ai/emotion_dataset) dataset for **Emotion Recognition** downstream task.\n\n## Details of T5\n\nThe **T5** model was presented in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) by *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* in Here the abstract:\n\nTransfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new “Colossal Clean Crawled Corpus”, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code.\n\n![model image](https://i.imgur.com/jVFMMWR.png)\n\n## Details of the downstream task (Sentiment Recognition) - Dataset 📚\n\n[Elvis Saravia](https://twitter.com/omarsar0) has gathered a great [dataset](https://github.com/dair-ai/emotion_dataset) for emotion recognition. It allows to classifiy the text into one of the following **6** emotions:\n\n - sadness 😢\n - joy 😃\n - love 🥰\n - anger 😡\n - fear 😱\n - surprise 😯\n\n## Model fine-tuning 🏋️‍\n\nThe training script is a slightly modified version of [this Colab Notebook](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) created by [Suraj Patil](https://github.com/patil-suraj), so all credits to him!\n\n## Test set metrics 🧾\n\n| |precision | recall | f1-score |support|\n|----------|----------|---------|----------|-------|\n|anger | 0.93| 0.92| 0.93| 275|\n|fear | 0.91| 0.87| 0.89| 224|\n|joy | 0.97| 0.94| 0.95| 695|\n|love | 0.80| 0.91| 0.85| 159|\n|sadness | 0.97| 0.97| 0.97| 521|\n|surpirse | 0.73| 0.89| 0.80| 66|\n| |\n|accuracy| | | 0.93| 2000|\n|macro avg| 0.89| 0.92| 0.90| 2000|\n|weighted avg| 0.94| 0.93| 0.93| 2000|\n \n \n \n \n\n\n\n## Model in Action 🚀\n\n```python\nfrom transformers import AutoTokenizer, AutoModelWithLMHead\n\ntokenizer = AutoTokenizer.from_pretrained(\"mrm8488/t5-base-finetuned-emotion\")\n\nmodel = AutoModelWithLMHead.from_pretrained(\"mrm8488/t5-base-finetuned-emotion\")\n\ndef get_emotion(text):\n input_ids = tokenizer.encode(text + '', return_tensors='pt')\n\n output = model.generate(input_ids=input_ids,\n max_length=2)\n \n dec = [tokenizer.decode(ids) for ids in output]\n label = dec[0]\n return label\n \n get_emotion(\"i feel as if i havent blogged in ages are at least truly blogged i am doing an update cute\") # Output: 'joy'\n \n get_emotion(\"i have a feeling i kinda lost my best friend\") # Output: 'sadness'\n```\n\n> Created by [Manuel Romero/@mrm8488](https://twitter.com/mrm8488) | [LinkedIn](https://www.linkedin.com/in/manuel-romero-cs/)\n\n> Made with ♥ in Spain\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"emotion\"], \"language\": \"en\", \"widget\": [{\"text\": \"I wish you were here but it is impossible\"}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION","QUESTION_ANSWERING","SUMMARIZATION"],"string":"[\n \"TEXT_CLASSIFICATION\",\n \"QUESTION_ANSWERING\",\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45519,"string":"45,519"}}},{"rowIdx":43743,"cells":{"id":{"kind":"string","value":"KYUNGHYUN9/ko-sroberta-itos-training-example_v0.02"},"author":{"kind":"string","value":"KYUNGHYUN9"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","roberta","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:574417","loss:MultipleNegativesRankingLoss","loss:CosineSimilarityLoss","arxiv:1908.10084","arxiv:1705.00652","base_model:klue/roberta-base","base_model:finetune:klue/roberta-base","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"roberta\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:574417\",\n \"loss:MultipleNegativesRankingLoss\",\n \"loss:CosineSimilarityLoss\",\n \"arxiv:1908.10084\",\n \"arxiv:1705.00652\",\n \"base_model:klue/roberta-base\",\n \"base_model:finetune:klue/roberta-base\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-27T08:34:33Z","string":"2024-06-27T08:34:33Z"},"last_modified":{"kind":"string","value":"2024-06-27T08:35:29+00:00"},"downloads":{"kind":"number","value":7,"string":"7"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\r\nbase_model: klue/roberta-base\r\ndatasets: []\r\nlanguage: []\r\nlibrary_name: sentence-transformers\r\nmetrics:\r\n- pearson_cosine\r\n- spearman_cosine\r\n- pearson_manhattan\r\n- spearman_manhattan\r\n- pearson_euclidean\r\n- spearman_euclidean\r\n- pearson_dot\r\n- spearman_dot\r\n- pearson_max\r\n- spearman_max\r\npipeline_tag: sentence-similarity\r\ntags:\r\n- sentence-transformers\r\n- sentence-similarity\r\n- feature-extraction\r\n- generated_from_trainer\r\n- dataset_size:574417\r\n- loss:MultipleNegativesRankingLoss\r\n- loss:CosineSimilarityLoss\r\nwidget:\r\n- source_sentence: 이집트 대통령 선거에서 가까운 여론조사\r\n sentences:\r\n - 알 카에다 충돌, 폭발로 예멘에서 35명의 군인이 사망\r\n - '보도자료 : 예멘 대통령 선거'\r\n - 반 파이프에 스케이트보드를 신은 남자\r\n- source_sentence: 한 소년이 팽창식 슬라이드를 내려간다.\r\n sentences:\r\n - 빨간 옷을 입은 소년이 부풀릴 수 있는 놀이기구를 타고 내려간다.\r\n - 새들이 물속에서 헤엄치고 있다.\r\n - 여자는 녹색 후추를 썰었다.\r\n- source_sentence: 비상 차량들이 현장에 있다.\r\n sentences:\r\n - 구급차와 소방차가 현장에서 도움을 주려고 한다.\r\n - 유물을 보는 사람들이 있다.\r\n - 구급차와 소방차에 불이 붙었다.\r\n- source_sentence: 그들은 서로 가까이 있지 않다.\r\n sentences:\r\n - 그 품질은 레이저에 가깝다.\r\n - 그들은 샤토와 매우 가깝다.\r\n - 그들은 샤토와 서로 어느 정도 떨어져 있다.\r\n- source_sentence: 딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.\r\n sentences:\r\n - 남자가 자고 있다.\r\n - 사람들이 말하고 있다.\r\n - 한 남자가 트럭을 보고 있다.\r\nmodel-index:\r\n- name: SentenceTransformer based on klue/roberta-base\r\n results:\r\n - task:\r\n type: semantic-similarity\r\n name: Semantic Similarity\r\n dataset:\r\n name: sts dev\r\n type: sts-dev\r\n metrics:\r\n - type: pearson_cosine\r\n value: 0.8650328554572645\r\n name: Pearson Cosine\r\n - type: spearman_cosine\r\n value: 0.8667952293243948\r\n name: Spearman Cosine\r\n - type: pearson_manhattan\r\n value: 0.8558437246473041\r\n name: Pearson Manhattan\r\n - type: spearman_manhattan\r\n value: 0.860673936504169\r\n name: Spearman Manhattan\r\n - type: pearson_euclidean\r\n value: 0.8562228685196989\r\n name: Pearson Euclidean\r\n - type: spearman_euclidean\r\n value: 0.8612884653822855\r\n name: Spearman Euclidean\r\n - type: pearson_dot\r\n value: 0.830160661850442\r\n name: Pearson Dot\r\n - type: spearman_dot\r\n value: 0.8275972106510755\r\n name: Spearman Dot\r\n - type: pearson_max\r\n value: 0.8650328554572645\r\n name: Pearson Max\r\n - type: spearman_max\r\n value: 0.8667952293243948\r\n name: Spearman Max\r\n---\r\n\r\n# SentenceTransformer based on klue/roberta-base\r\n\r\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [klue/roberta-base](https://huggingface.co/klue/roberta-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\r\n\r\n## Model Details\r\n\r\n### Model Description\r\n- **Model Type:** Sentence Transformer\r\n- **Base model:** [klue/roberta-base](https://huggingface.co/klue/roberta-base) \r\n- **Maximum Sequence Length:** 128 tokens\r\n- **Output Dimensionality:** 768 tokens\r\n- **Similarity Function:** Cosine Similarity\r\n\r\n\r\n\r\n\r\n### Model Sources\r\n\r\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\r\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\r\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\r\n\r\n### Full Model Architecture\r\n\r\n```\r\nSentenceTransformer(\r\n (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: RobertaModel \r\n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\r\n)\r\n```\r\n\r\n## Usage\r\n\r\n### Direct Usage (Sentence Transformers)\r\n\r\nFirst install the Sentence Transformers library:\r\n\r\n```bash\r\npip install -U sentence-transformers\r\n```\r\n\r\nThen you can load this model and run inference.\r\n```python\r\nfrom sentence_transformers import SentenceTransformer\r\n\r\n# Download from the 🤗 Hub\r\nmodel = SentenceTransformer(\"sentence_transformers_model_id\")\r\n# Run inference\r\nsentences = [\r\n '딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.',\r\n '한 남자가 트럭을 보고 있다.',\r\n '남자가 자고 있다.',\r\n]\r\nembeddings = model.encode(sentences)\r\nprint(embeddings.shape)\r\n# [3, 768]\r\n\r\n# Get the similarity scores for the embeddings\r\nsimilarities = model.similarity(embeddings, embeddings)\r\nprint(similarities.shape)\r\n# [3, 3]\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n## Evaluation\r\n\r\n### Metrics\r\n\r\n#### Semantic Similarity\r\n* Dataset: `sts-dev`\r\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\r\n\r\n| Metric | Value |\r\n|:-------------------|:-----------|\r\n| pearson_cosine | 0.865 |\r\n| spearman_cosine | 0.8668 |\r\n| pearson_manhattan | 0.8558 |\r\n| spearman_manhattan | 0.8607 |\r\n| pearson_euclidean | 0.8562 |\r\n| spearman_euclidean | 0.8613 |\r\n| pearson_dot | 0.8302 |\r\n| spearman_dot | 0.8276 |\r\n| pearson_max | 0.865 |\r\n| **spearman_max** | **0.8668** |\r\n\r\n\r\n\r\n\r\n\r\n## Training Details\r\n\r\n### Training Datasets\r\n\r\n#### Unnamed Dataset\r\n\r\n\r\n* Size: 568,640 training samples\r\n* Columns: sentence_0, sentence_1, and sentence_2\r\n* Approximate statistics based on the first 1000 samples:\r\n | | sentence_0 | sentence_1 | sentence_2 |\r\n |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|\r\n | type | string | string | string |\r\n | details |
  • min: 4 tokens
  • mean: 19.21 tokens
  • max: 128 tokens
|
  • min: 3 tokens
  • mean: 18.29 tokens
  • max: 93 tokens
|
  • min: 4 tokens
  • mean: 14.61 tokens
  • max: 57 tokens
|\r\n* Samples:\r\n | sentence_0 | sentence_1 | sentence_2 |\r\n |:----------------------------------------|:-------------------------------------------------|:--------------------------------------|\r\n | 발생 부하가 함께 5% 적습니다. | 발생 부하의 5% 감소와 함께 11. | 발생 부하가 5% 증가합니다. |\r\n | 어떤 행사를 위해 음식과 옷을 배급하는 여성들. | 여성들은 음식과 옷을 나눠줌으로써 난민들을 돕고 있다. | 여자들이 사막에서 오토바이를 운전하고 있다. |\r\n | 어린 아이들은 그 지식을 얻을 필요가 있다. | 응, 우리 젊은이들 중 많은 사람들이 그걸 배워야 할 것 같아. | 젊은 사람들은 배울 필요가 없다. |\r\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\r\n ```json\r\n {\r\n \"scale\": 20.0,\r\n \"similarity_fct\": \"cos_sim\"\r\n }\r\n ```\r\n\r\n#### Unnamed Dataset\r\n\r\n\r\n* Size: 5,777 training samples\r\n* Columns: sentence_0, sentence_1, and label\r\n* Approximate statistics based on the first 1000 samples:\r\n | | sentence_0 | sentence_1 | label |\r\n |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------|\r\n | type | string | string | float |\r\n | details |
  • min: 3 tokens
  • mean: 17.61 tokens
  • max: 65 tokens
|
  • min: 3 tokens
  • mean: 17.66 tokens
  • max: 76 tokens
|
  • min: 0.0
  • mean: 0.54
  • max: 1.0
|\r\n* Samples:\r\n | sentence_0 | sentence_1 | label |\r\n |:-----------------------------------------|:------------------------------------------|:--------------------------------|\r\n | 몰디브 대통령이 경찰의 반란 이후 사임하고, 시위 | 몰디브 대통령이 몇 주 동안의 시위 끝에 그만두다. | 0.6799999999999999 |\r\n | 사자가 밀폐된 지역을 걷고 있다. | 사자가 주위를 돌아다니고 있다. | 0.52 |\r\n | 한 소년이 노래를 부르고 피아노를 치고 있다. | 한 소년이 피아노를 치고 있다. | 0.6 |\r\n* Loss: [CosineSimilarityLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters:\r\n ```json\r\n {\r\n \"loss_fct\": \"torch.nn.modules.loss.MSELoss\"\r\n }\r\n ```\r\n\r\n### Training Hyperparameters\r\n#### Non-Default Hyperparameters\r\n\r\n- `eval_strategy`: steps\r\n- `num_train_epochs`: 5\r\n- `batch_sampler`: no_duplicates\r\n- `multi_dataset_batch_sampler`: round_robin\r\n\r\n#### All Hyperparameters\r\n
Click to expand\r\n\r\n- `overwrite_output_dir`: False\r\n- `do_predict`: False\r\n- `eval_strategy`: steps\r\n- `prediction_loss_only`: True\r\n- `per_device_train_batch_size`: 8\r\n- `per_device_eval_batch_size`: 8\r\n- `per_gpu_train_batch_size`: None\r\n- `per_gpu_eval_batch_size`: None\r\n- `gradient_accumulation_steps`: 1\r\n- `eval_accumulation_steps`: None\r\n- `learning_rate`: 5e-05\r\n- `weight_decay`: 0.0\r\n- `adam_beta1`: 0.9\r\n- `adam_beta2`: 0.999\r\n- `adam_epsilon`: 1e-08\r\n- `max_grad_norm`: 1\r\n- `num_train_epochs`: 5\r\n- `max_steps`: -1\r\n- `lr_scheduler_type`: linear\r\n- `lr_scheduler_kwargs`: {}\r\n- `warmup_ratio`: 0.0\r\n- `warmup_steps`: 0\r\n- `log_level`: passive\r\n- `log_level_replica`: warning\r\n- `log_on_each_node`: True\r\n- `logging_nan_inf_filter`: True\r\n- `save_safetensors`: True\r\n- `save_on_each_node`: False\r\n- `save_only_model`: False\r\n- `restore_callback_states_from_checkpoint`: False\r\n- `no_cuda`: False\r\n- `use_cpu`: False\r\n- `use_mps_device`: False\r\n- `seed`: 42\r\n- `data_seed`: None\r\n- `jit_mode_eval`: False\r\n- `use_ipex`: False\r\n- `bf16`: False\r\n- `fp16`: False\r\n- `fp16_opt_level`: O1\r\n- `half_precision_backend`: auto\r\n- `bf16_full_eval`: False\r\n- `fp16_full_eval`: False\r\n- `tf32`: None\r\n- `local_rank`: 0\r\n- `ddp_backend`: None\r\n- `tpu_num_cores`: None\r\n- `tpu_metrics_debug`: False\r\n- `debug`: []\r\n- `dataloader_drop_last`: False\r\n- `dataloader_num_workers`: 0\r\n- `dataloader_prefetch_factor`: None\r\n- `past_index`: -1\r\n- `disable_tqdm`: False\r\n- `remove_unused_columns`: True\r\n- `label_names`: None\r\n- `load_best_model_at_end`: False\r\n- `ignore_data_skip`: False\r\n- `fsdp`: []\r\n- `fsdp_min_num_params`: 0\r\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\r\n- `fsdp_transformer_layer_cls_to_wrap`: None\r\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\r\n- `deepspeed`: None\r\n- `label_smoothing_factor`: 0.0\r\n- `optim`: adamw_torch\r\n- `optim_args`: None\r\n- `adafactor`: False\r\n- `group_by_length`: False\r\n- `length_column_name`: length\r\n- `ddp_find_unused_parameters`: None\r\n- `ddp_bucket_cap_mb`: None\r\n- `ddp_broadcast_buffers`: False\r\n- `dataloader_pin_memory`: True\r\n- `dataloader_persistent_workers`: False\r\n- `skip_memory_metrics`: True\r\n- `use_legacy_prediction_loop`: False\r\n- `push_to_hub`: False\r\n- `resume_from_checkpoint`: None\r\n- `hub_model_id`: None\r\n- `hub_strategy`: every_save\r\n- `hub_private_repo`: False\r\n- `hub_always_push`: False\r\n- `gradient_checkpointing`: False\r\n- `gradient_checkpointing_kwargs`: None\r\n- `include_inputs_for_metrics`: False\r\n- `eval_do_concat_batches`: True\r\n- `fp16_backend`: auto\r\n- `push_to_hub_model_id`: None\r\n- `push_to_hub_organization`: None\r\n- `mp_parameters`: \r\n- `auto_find_batch_size`: False\r\n- `full_determinism`: False\r\n- `torchdynamo`: None\r\n- `ray_scope`: last\r\n- `ddp_timeout`: 1800\r\n- `torch_compile`: False\r\n- `torch_compile_backend`: None\r\n- `torch_compile_mode`: None\r\n- `dispatch_batches`: None\r\n- `split_batches`: None\r\n- `include_tokens_per_second`: False\r\n- `include_num_input_tokens_seen`: False\r\n- `neftune_noise_alpha`: None\r\n- `optim_target_modules`: None\r\n- `batch_eval_metrics`: False\r\n- `batch_sampler`: no_duplicates\r\n- `multi_dataset_batch_sampler`: round_robin\r\n\r\n
\r\n\r\n### Training Logs\r\n| Epoch | Step | Training Loss | sts-dev_spearman_max |\r\n|:------:|:----:|:-------------:|:--------------------:|\r\n| 0.3458 | 500 | 0.4123 | - |\r\n| 0.6916 | 1000 | 0.3009 | 0.8365 |\r\n| 1.0007 | 1447 | - | 0.8610 |\r\n| 1.0367 | 1500 | 0.259 | - |\r\n| 1.3824 | 2000 | 0.1301 | 0.8580 |\r\n| 1.7282 | 2500 | 0.0898 | - |\r\n| 2.0007 | 2894 | - | 0.8668 |\r\n\r\n\r\n### Framework Versions\r\n- Python: 3.11.9\r\n- Sentence Transformers: 3.0.1\r\n- Transformers: 4.41.2\r\n- PyTorch: 2.2.2+cu121\r\n- Accelerate: 0.31.0\r\n- Datasets: 2.20.0\r\n- Tokenizers: 0.19.1\r\n\r\n## Citation\r\n\r\n### BibTeX\r\n\r\n#### Sentence Transformers\r\n```bibtex\r\n@inproceedings{reimers-2019-sentence-bert,\r\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\r\n author = \"Reimers, Nils and Gurevych, Iryna\",\r\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\r\n month = \"11\",\r\n year = \"2019\",\r\n publisher = \"Association for Computational Linguistics\",\r\n url = \"https://arxiv.org/abs/1908.10084\",\r\n}\r\n```\r\n\r\n#### MultipleNegativesRankingLoss\r\n```bibtex\r\n@misc{henderson2017efficient,\r\n title={Efficient Natural Language Response Suggestion for Smart Reply}, \r\n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\r\n year={2017},\r\n eprint={1705.00652},\r\n archivePrefix={arXiv},\r\n primaryClass={cs.CL}\r\n}\r\n```\r\n\r\n\r\n\r\n\r\n\r\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\r\n# SentenceTransformer based on klue/roberta-base\r\n\r\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [klue/roberta-base](https://huggingface.co/klue/roberta-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\r\n\r\n## Model Details\r\n\r\n### Model Description\r\n- **Model Type:** Sentence Transformer\r\n- **Base model:** [klue/roberta-base](https://huggingface.co/klue/roberta-base) \r\n- **Maximum Sequence Length:** 128 tokens\r\n- **Output Dimensionality:** 768 tokens\r\n- **Similarity Function:** Cosine Similarity\r\n\r\n\r\n\r\n\r\n### Model Sources\r\n\r\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\r\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\r\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\r\n\r\n### Full Model Architecture\r\n\r\n```\r\nSentenceTransformer(\r\n (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: RobertaModel \r\n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\r\n)\r\n```\r\n\r\n## Usage\r\n\r\n### Direct Usage (Sentence Transformers)\r\n\r\nFirst install the Sentence Transformers library:\r\n\r\n```bash\r\npip install -U sentence-transformers\r\n```\r\n\r\nThen you can load this model and run inference.\r\n```python\r\nfrom sentence_transformers import SentenceTransformer\r\n\r\n# Download from the 🤗 Hub\r\nmodel = SentenceTransformer(\"sentence_transformers_model_id\")\r\n# Run inference\r\nsentences = [\r\n '딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.',\r\n '한 남자가 트럭을 보고 있다.',\r\n '남자가 자고 있다.',\r\n]\r\nembeddings = model.encode(sentences)\r\nprint(embeddings.shape)\r\n# [3, 768]\r\n\r\n# Get the similarity scores for the embeddings\r\nsimilarities = model.similarity(embeddings, embeddings)\r\nprint(similarities.shape)\r\n# [3, 3]\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n## Evaluation\r\n\r\n### Metrics\r\n\r\n#### Semantic Similarity\r\n* Dataset: `sts-dev`\r\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\r\n\r\n| Metric | Value |\r\n|:-------------------|:-----------|\r\n| pearson_cosine | 0.865 |\r\n| spearman_cosine | 0.8668 |\r\n| pearson_manhattan | 0.8558 |\r\n| spearman_manhattan | 0.8607 |\r\n| pearson_euclidean | 0.8562 |\r\n| spearman_euclidean | 0.8613 |\r\n| pearson_dot | 0.8302 |\r\n| spearman_dot | 0.8276 |\r\n| pearson_max | 0.865 |\r\n| **spearman_max** | **0.8668** |\r\n\r\n\r\n\r\n\r\n\r\n## Training Details\r\n\r\n### Training Datasets\r\n\r\n#### Unnamed Dataset\r\n\r\n\r\n* Size: 568,640 training samples\r\n* Columns: sentence_0, sentence_1, and sentence_2\r\n* Approximate statistics based on the first 1000 samples:\r\n | | sentence_0 | sentence_1 | sentence_2 |\r\n |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|\r\n | type | string | string | string |\r\n | details |
  • min: 4 tokens
  • mean: 19.21 tokens
  • max: 128 tokens
|
  • min: 3 tokens
  • mean: 18.29 tokens
  • max: 93 tokens
|
  • min: 4 tokens
  • mean: 14.61 tokens
  • max: 57 tokens
|\r\n* Samples:\r\n | sentence_0 | sentence_1 | sentence_2 |\r\n |:----------------------------------------|:-------------------------------------------------|:--------------------------------------|\r\n | 발생 부하가 함께 5% 적습니다. | 발생 부하의 5% 감소와 함께 11. | 발생 부하가 5% 증가합니다. |\r\n | 어떤 행사를 위해 음식과 옷을 배급하는 여성들. | 여성들은 음식과 옷을 나눠줌으로써 난민들을 돕고 있다. | 여자들이 사막에서 오토바이를 운전하고 있다. |\r\n | 어린 아이들은 그 지식을 얻을 필요가 있다. | 응, 우리 젊은이들 중 많은 사람들이 그걸 배워야 할 것 같아. | 젊은 사람들은 배울 필요가 없다. |\r\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\r\n ```json\r\n {\r\n \"scale\": 20.0,\r\n \"similarity_fct\": \"cos_sim\"\r\n }\r\n ```\r\n\r\n#### Unnamed Dataset\r\n\r\n\r\n* Size: 5,777 training samples\r\n* Columns: sentence_0, sentence_1, and label\r\n* Approximate statistics based on the first 1000 samples:\r\n | | sentence_0 | sentence_1 | label |\r\n |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------|\r\n | type | string | string | float |\r\n | details |
  • min: 3 tokens
  • mean: 17.61 tokens
  • max: 65 tokens
|
  • min: 3 tokens
  • mean: 17.66 tokens
  • max: 76 tokens
|
  • min: 0.0
  • mean: 0.54
  • max: 1.0
|\r\n* Samples:\r\n | sentence_0 | sentence_1 | label |\r\n |:-----------------------------------------|:------------------------------------------|:--------------------------------|\r\n | 몰디브 대통령이 경찰의 반란 이후 사임하고, 시위 | 몰디브 대통령이 몇 주 동안의 시위 끝에 그만두다. | 0.6799999999999999 |\r\n | 사자가 밀폐된 지역을 걷고 있다. | 사자가 주위를 돌아다니고 있다. | 0.52 |\r\n | 한 소년이 노래를 부르고 피아노를 치고 있다. | 한 소년이 피아노를 치고 있다. | 0.6 |\r\n* Loss: [CosineSimilarityLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters:\r\n ```json\r\n {\r\n \"loss_fct\": \"torch.nn.modules.loss.MSELoss\"\r\n }\r\n ```\r\n\r\n### Training Hyperparameters\r\n#### Non-Default Hyperparameters\r\n\r\n- `eval_strategy`: steps\r\n- `num_train_epochs`: 5\r\n- `batch_sampler`: no_duplicates\r\n- `multi_dataset_batch_sampler`: round_robin\r\n\r\n#### All Hyperparameters\r\n
Click to expand\r\n\r\n- `overwrite_output_dir`: False\r\n- `do_predict`: False\r\n- `eval_strategy`: steps\r\n- `prediction_loss_only`: True\r\n- `per_device_train_batch_size`: 8\r\n- `per_device_eval_batch_size`: 8\r\n- `per_gpu_train_batch_size`: None\r\n- `per_gpu_eval_batch_size`: None\r\n- `gradient_accumulation_steps`: 1\r\n- `eval_accumulation_steps`: None\r\n- `learning_rate`: 5e-05\r\n- `weight_decay`: 0.0\r\n- `adam_beta1`: 0.9\r\n- `adam_beta2`: 0.999\r\n- `adam_epsilon`: 1e-08\r\n- `max_grad_norm`: 1\r\n- `num_train_epochs`: 5\r\n- `max_steps`: -1\r\n- `lr_scheduler_type`: linear\r\n- `lr_scheduler_kwargs`: {}\r\n- `warmup_ratio`: 0.0\r\n- `warmup_steps`: 0\r\n- `log_level`: passive\r\n- `log_level_replica`: warning\r\n- `log_on_each_node`: True\r\n- `logging_nan_inf_filter`: True\r\n- `save_safetensors`: True\r\n- `save_on_each_node`: False\r\n- `save_only_model`: False\r\n- `restore_callback_states_from_checkpoint`: False\r\n- `no_cuda`: False\r\n- `use_cpu`: False\r\n- `use_mps_device`: False\r\n- `seed`: 42\r\n- `data_seed`: None\r\n- `jit_mode_eval`: False\r\n- `use_ipex`: False\r\n- `bf16`: False\r\n- `fp16`: False\r\n- `fp16_opt_level`: O1\r\n- `half_precision_backend`: auto\r\n- `bf16_full_eval`: False\r\n- `fp16_full_eval`: False\r\n- `tf32`: None\r\n- `local_rank`: 0\r\n- `ddp_backend`: None\r\n- `tpu_num_cores`: None\r\n- `tpu_metrics_debug`: False\r\n- `debug`: []\r\n- `dataloader_drop_last`: False\r\n- `dataloader_num_workers`: 0\r\n- `dataloader_prefetch_factor`: None\r\n- `past_index`: -1\r\n- `disable_tqdm`: False\r\n- `remove_unused_columns`: True\r\n- `label_names`: None\r\n- `load_best_model_at_end`: False\r\n- `ignore_data_skip`: False\r\n- `fsdp`: []\r\n- `fsdp_min_num_params`: 0\r\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\r\n- `fsdp_transformer_layer_cls_to_wrap`: None\r\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\r\n- `deepspeed`: None\r\n- `label_smoothing_factor`: 0.0\r\n- `optim`: adamw_torch\r\n- `optim_args`: None\r\n- `adafactor`: False\r\n- `group_by_length`: False\r\n- `length_column_name`: length\r\n- `ddp_find_unused_parameters`: None\r\n- `ddp_bucket_cap_mb`: None\r\n- `ddp_broadcast_buffers`: False\r\n- `dataloader_pin_memory`: True\r\n- `dataloader_persistent_workers`: False\r\n- `skip_memory_metrics`: True\r\n- `use_legacy_prediction_loop`: False\r\n- `push_to_hub`: False\r\n- `resume_from_checkpoint`: None\r\n- `hub_model_id`: None\r\n- `hub_strategy`: every_save\r\n- `hub_private_repo`: False\r\n- `hub_always_push`: False\r\n- `gradient_checkpointing`: False\r\n- `gradient_checkpointing_kwargs`: None\r\n- `include_inputs_for_metrics`: False\r\n- `eval_do_concat_batches`: True\r\n- `fp16_backend`: auto\r\n- `push_to_hub_model_id`: None\r\n- `push_to_hub_organization`: None\r\n- `mp_parameters`: \r\n- `auto_find_batch_size`: False\r\n- `full_determinism`: False\r\n- `torchdynamo`: None\r\n- `ray_scope`: last\r\n- `ddp_timeout`: 1800\r\n- `torch_compile`: False\r\n- `torch_compile_backend`: None\r\n- `torch_compile_mode`: None\r\n- `dispatch_batches`: None\r\n- `split_batches`: None\r\n- `include_tokens_per_second`: False\r\n- `include_num_input_tokens_seen`: False\r\n- `neftune_noise_alpha`: None\r\n- `optim_target_modules`: None\r\n- `batch_eval_metrics`: False\r\n- `batch_sampler`: no_duplicates\r\n- `multi_dataset_batch_sampler`: round_robin\r\n\r\n
\r\n\r\n### Training Logs\r\n| Epoch | Step | Training Loss | sts-dev_spearman_max |\r\n|:------:|:----:|:-------------:|:--------------------:|\r\n| 0.3458 | 500 | 0.4123 | - |\r\n| 0.6916 | 1000 | 0.3009 | 0.8365 |\r\n| 1.0007 | 1447 | - | 0.8610 |\r\n| 1.0367 | 1500 | 0.259 | - |\r\n| 1.3824 | 2000 | 0.1301 | 0.8580 |\r\n| 1.7282 | 2500 | 0.0898 | - |\r\n| 2.0007 | 2894 | - | 0.8668 |\r\n\r\n\r\n### Framework Versions\r\n- Python: 3.11.9\r\n- Sentence Transformers: 3.0.1\r\n- Transformers: 4.41.2\r\n- PyTorch: 2.2.2+cu121\r\n- Accelerate: 0.31.0\r\n- Datasets: 2.20.0\r\n- Tokenizers: 0.19.1\r\n\r\n## Citation\r\n\r\n### BibTeX\r\n\r\n#### Sentence Transformers\r\n```bibtex\r\n@inproceedings{reimers-2019-sentence-bert,\r\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\r\n author = \"Reimers, Nils and Gurevych, Iryna\",\r\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\r\n month = \"11\",\r\n year = \"2019\",\r\n publisher = \"Association for Computational Linguistics\",\r\n url = \"https://arxiv.org/abs/1908.10084\",\r\n}\r\n```\r\n\r\n#### MultipleNegativesRankingLoss\r\n```bibtex\r\n@misc{henderson2017efficient,\r\n title={Efficient Natural Language Response Suggestion for Smart Reply}, \r\n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\r\n year={2017},\r\n eprint={1705.00652},\r\n archivePrefix={arXiv},\r\n primaryClass={cs.CL}\r\n}\r\n```\r\n\r\n\r\n\r\n\r\n\r\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"klue/roberta-base\", \"datasets\": [], \"language\": [], \"library_name\": \"sentence-transformers\", \"metrics\": [\"pearson_cosine\", \"spearman_cosine\", \"pearson_manhattan\", \"spearman_manhattan\", \"pearson_euclidean\", \"spearman_euclidean\", \"pearson_dot\", \"spearman_dot\", \"pearson_max\", \"spearman_max\"], \"pipeline_tag\": \"sentence-similarity\", \"tags\": [\"sentence-transformers\", \"sentence-similarity\", \"feature-extraction\", \"generated_from_trainer\", \"dataset_size:574417\", \"loss:MultipleNegativesRankingLoss\", \"loss:CosineSimilarityLoss\"], \"widget\": [{\"source_sentence\": \"이집트 대통령 선거에서 가까운 여론조사\", \"sentences\": [\"알 카에다 충돌, 폭발로 예멘에서 35명의 군인이 사망\", \"보도자료 : 예멘 대통령 선거\", \"반 파이프에 스케이트보드를 신은 남자\"]}, {\"source_sentence\": \"한 소년이 팽창식 슬라이드를 내려간다.\", \"sentences\": [\"빨간 옷을 입은 소년이 부풀릴 수 있는 놀이기구를 타고 내려간다.\", \"새들이 물속에서 헤엄치고 있다.\", \"여자는 녹색 후추를 썰었다.\"]}, {\"source_sentence\": \"비상 차량들이 현장에 있다.\", \"sentences\": [\"구급차와 소방차가 현장에서 도움을 주려고 한다.\", \"유물을 보는 사람들이 있다.\", \"구급차와 소방차에 불이 붙었다.\"]}, {\"source_sentence\": \"그들은 서로 가까이 있지 않다.\", \"sentences\": [\"그 품질은 레이저에 가깝다.\", \"그들은 샤토와 매우 가깝다.\", \"그들은 샤토와 서로 어느 정도 떨어져 있다.\"]}, {\"source_sentence\": \"딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.\", \"sentences\": [\"남자가 자고 있다.\", \"사람들이 말하고 있다.\", \"한 남자가 트럭을 보고 있다.\"]}], \"model-index\": [{\"name\": \"SentenceTransformer based on klue/roberta-base\", \"results\": [{\"task\": {\"type\": \"semantic-similarity\", \"name\": \"Semantic Similarity\"}, \"dataset\": {\"name\": \"sts dev\", \"type\": \"sts-dev\"}, \"metrics\": [{\"type\": \"pearson_cosine\", \"value\": 0.8650328554572645, \"name\": \"Pearson Cosine\"}, {\"type\": \"spearman_cosine\", \"value\": 0.8667952293243948, \"name\": \"Spearman Cosine\"}, {\"type\": \"pearson_manhattan\", \"value\": 0.8558437246473041, \"name\": \"Pearson Manhattan\"}, {\"type\": \"spearman_manhattan\", \"value\": 0.860673936504169, \"name\": \"Spearman Manhattan\"}, {\"type\": \"pearson_euclidean\", \"value\": 0.8562228685196989, \"name\": \"Pearson Euclidean\"}, {\"type\": \"spearman_euclidean\", \"value\": 0.8612884653822855, \"name\": \"Spearman Euclidean\"}, {\"type\": \"pearson_dot\", \"value\": 0.830160661850442, \"name\": \"Pearson Dot\"}, {\"type\": \"spearman_dot\", \"value\": 0.8275972106510755, \"name\": \"Spearman Dot\"}, {\"type\": \"pearson_max\", \"value\": 0.8650328554572645, \"name\": \"Pearson Max\"}, {\"type\": \"spearman_max\", \"value\": 0.8667952293243948, \"name\": \"Spearman Max\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION","SEMANTIC_SIMILARITY"],"string":"[\n \"TEXT_CLASSIFICATION\",\n \"SEMANTIC_SIMILARITY\"\n]"},"__index_level_0__":{"kind":"number","value":45520,"string":"45,520"}}},{"rowIdx":43744,"cells":{"id":{"kind":"string","value":"anilguven/albert_tr_turkish_spam_email"},"author":{"kind":"string","value":"anilguven"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","safetensors","albert","text-classification","turkish","spam","ham","email","bert","tr","dataset:anilguven/turkish_spam_email","license:unknown","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"albert\",\n \"text-classification\",\n \"turkish\",\n \"spam\",\n \"ham\",\n \"email\",\n \"bert\",\n \"tr\",\n \"dataset:anilguven/turkish_spam_email\",\n \"license:unknown\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-01-25T19:34:03Z","string":"2024-01-25T19:34:03Z"},"last_modified":{"kind":"string","value":"2024-01-26T13:34:19+00:00"},"downloads":{"kind":"number","value":16,"string":"16"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\ndatasets:\n- anilguven/turkish_spam_email\nlanguage:\n- tr\nlicense: unknown\nmetrics:\n- accuracy\n- f1\n- recall\n- precision\ntags:\n- turkish\n- spam\n- ham\n- email\n- albert\n- bert\n---\n### Model Info\n\nThis model was developed/finetuned for spam detection task for Turkish Language. This model was finetuned via spam/ham email dataset. \n- LABEL_0: ham/normal mail\n- LABEL_1: spam mail\n\n### Model Sources\n\n\n\n- **Dataset:** https://huggingface.co/datasets/anilguven/turkish_spam_email\n- **Paper:** https://dergipark.org.tr/tr/pub/ejosat/issue/75736/1234079\n- **Demo-Coding [optional]:** https://github.com/anil1055/Turkish_spam_email_detection_with_language_models\n- **Finetuned from model [optional]:** https://huggingface.co/loodos/albert-base-turkish-uncased\n\n#### Preprocessing \n\nYou must apply removing stopwords, stemming, or lemmatization process for Turkish.\n\n# Model Load safetensors\n\n\n\nDetailed https://huggingface.co/docs/diffusers/using-diffusers/using_safetensors\n\n### Results\n\n- F1-score: %93.55\n- Accuracy: %93.10\n\n## Citation\n\n\n\n**BibTeX:**\n\n*@article{article_1234079, title={Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi}, journal={Avrupa Bilim ve Teknoloji Dergisi}, pages={1–6}, year={2023}, DOI={10.31590/ejosat.1234079}, author={GÜVEN, Zekeriya Anıl}, keywords={Siber Güvenlik, Spam Tespiti, Dil Modeli, Makine Öğrenmesi, Doğal Dil İşleme, Metin Sınıflandırma, Cyber Security, Spam Detection, Language Model, Machine Learning, Natural Language Processing, Text Classification}, number={47}, publisher={Osman SAĞDIÇ} }*\n\n**APA:**\n\n*GÜVEN, Z. A. (2023). Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi. Avrupa Bilim ve Teknoloji Dergisi, (47), 1-6.*"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"### Model Info\n\nThis model was developed/finetuned for spam detection task for Turkish Language. This model was finetuned via spam/ham email dataset. \n- LABEL_0: ham/normal mail\n- LABEL_1: spam mail\n\n### Model Sources\n\n\n\n- **Dataset:** https://huggingface.co/datasets/anilguven/turkish_spam_email\n- **Paper:** https://dergipark.org.tr/tr/pub/ejosat/issue/75736/1234079\n- **Demo-Coding [optional]:** https://github.com/anil1055/Turkish_spam_email_detection_with_language_models\n- **Finetuned from model [optional]:** https://huggingface.co/loodos/albert-base-turkish-uncased\n\n#### Preprocessing \n\nYou must apply removing stopwords, stemming, or lemmatization process for Turkish.\n\n# Model Load safetensors\n\n\n\nDetailed https://huggingface.co/docs/diffusers/using-diffusers/using_safetensors\n\n### Results\n\n- F1-score: %93.55\n- Accuracy: %93.10\n\n## Citation\n\n\n\n**BibTeX:**\n\n*@article{article_1234079, title={Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi}, journal={Avrupa Bilim ve Teknoloji Dergisi}, pages={1–6}, year={2023}, DOI={10.31590/ejosat.1234079}, author={GÜVEN, Zekeriya Anıl}, keywords={Siber Güvenlik, Spam Tespiti, Dil Modeli, Makine Öğrenmesi, Doğal Dil İşleme, Metin Sınıflandırma, Cyber Security, Spam Detection, Language Model, Machine Learning, Natural Language Processing, Text Classification}, number={47}, publisher={Osman SAĞDIÇ} }*\n\n**APA:**\n\n*GÜVEN, Z. A. (2023). Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi. Avrupa Bilim ve Teknoloji Dergisi, (47), 1-6.*"},"metadata":{"kind":"string","value":"{\"datasets\": [\"anilguven/turkish_spam_email\"], \"language\": [\"tr\"], \"license\": \"unknown\", \"metrics\": [\"accuracy\", \"f1\", \"recall\", \"precision\"], \"tags\": [\"turkish\", \"spam\", \"ham\", \"email\", \"albert\", \"bert\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45521,"string":"45,521"}}},{"rowIdx":43745,"cells":{"id":{"kind":"string","value":"gulermuslim/distilbert-base-uncased-finetuned-emotion"},"author":{"kind":"string","value":"gulermuslim"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","distilbert","text-classification","generated_from_trainer","dataset:emotion","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:emotion\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-10-21T09:38:18Z","string":"2022-10-21T09:38:18Z"},"last_modified":{"kind":"string","value":"2022-10-21T09:48:14+00:00"},"downloads":{"kind":"number","value":10,"string":"10"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- emotion\nlicense: apache-2.0\nmetrics:\n- accuracy\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-emotion\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: emotion\n type: emotion\n args: default\n metrics:\n - type: accuracy\n value: 0.9245\n name: Accuracy\n - type: f1\n value: 0.9246934497325665\n name: F1\n---\n\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2157\n- Accuracy: 0.9245\n- F1: 0.9247\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.8781 | 1.0 | 250 | 0.3374 | 0.8995 | 0.8951 |\n| 0.2546 | 2.0 | 500 | 0.2157 | 0.9245 | 0.9247 |\n\n\n### Framework versions\n\n- Transformers 4.13.0\n- Pytorch 1.12.1+cu113\n- Datasets 1.16.1\n- Tokenizers 0.10.3\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2157\n- Accuracy: 0.9245\n- F1: 0.9247\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.8781 | 1.0 | 250 | 0.3374 | 0.8995 | 0.8951 |\n| 0.2546 | 2.0 | 500 | 0.2157 | 0.9245 | 0.9247 |\n\n\n### Framework versions\n\n- Transformers 4.13.0\n- Pytorch 1.12.1+cu113\n- Datasets 1.16.1\n- Tokenizers 0.10.3\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"emotion\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\", \"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-emotion\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"emotion\", \"type\": \"emotion\", \"args\": \"default\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.9245, \"name\": \"Accuracy\"}, {\"type\": \"f1\", \"value\": 0.9246934497325665, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45522,"string":"45,522"}}},{"rowIdx":43746,"cells":{"id":{"kind":"string","value":"hhenryz/LOVA3-llava-v1.5-7b"},"author":{"kind":"string","value":"hhenryz"},"task_category":{"kind":"string","value":"image-text-to-text"},"tags":{"kind":"list like","value":["transformers","pytorch","llava","text-generation","image-text-to-text","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"llava\",\n \"text-generation\",\n \"image-text-to-text\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-24T08:09:25Z","string":"2024-05-24T08:09:25Z"},"last_modified":{"kind":"string","value":"2025-02-24T08:20:43+00:00"},"downloads":{"kind":"number","value":42,"string":"42"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\n---\n\n## 💡Key Contributions:\n\n* **LOVA3** - To the best of our knowledge, LOVA3 is the first effort to imbue the asking and assessment abilities in training a robust and intelligent MLLM, inspired from human learning mechanism.\n* **EvalQABench** - We build a new benchmark EvalQABench for the VQA correction evaluation as the first effort to advance the development of future research.\n\n* **Performance Improvement** - Training with our proposed LOVA3 framework, we observe consistent improvement on 10 representative benchmarks.\n\n\n## Model weight\n\nPretrained weight: [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b)\n\nDownload it by using following command:\n\n```\ngit clone https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b\n```\n\n## Training Data\n\n* Here we provide the training/Evaluation/Testing sets of EvalQABench under the folder `EvalQABench`.\n\n* Training data: [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M).\n\n### Image Datasets\n\nPlease download the images from constituting datasets:\n\n- COCO: [train2014](http://images.cocodataset.org/zips/train2014.zip)\n- GQA: [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip)\n- OCR-VQA: [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), **we save all files as `.jpg`**\n- AOKVQA: [download script](https://github.com/allenai/aokvqa?tab=readme-ov-file#downloading-the-dataset)\n- TextVQA: [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip)\n- VisualGenome: [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip)\n- LLaVA-Instruct: [huggingface](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K)\n\n\n## 💃 Evaluation\n\n1. Download [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b) under the folder `checkpoints`.\n\n2. Download the CLIP vision encoder [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints`\n\n3. Run the evaluation scripts under the folder `scripts/v1_5/eval`. There are 12 multimodal datasets and benchmarks awaiting evaluation.\n\nTake VizWiz as an example, the running command is as follows:\n\n```\nmodelname=LOVA3-llava-v1.5-7b\n\npython -m llava.eval.model_vqa_loader \\\n --model-path checkpoints/$modelname \\\n --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \\\n --image-folder /yourpath/vizwiz/test/ \\\n --answers-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \\\n --temperature 0 \\\n --conv-mode vicuna_v1\n\npython scripts/convert_vizwiz_for_submission.py \\\n --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \\\n --result-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \\\n --result-upload-file ./playground/data/eval/vizwiz/answers_upload/$modelname.json\n\n```\n\n## Training\n\n1. Download the pretrained MLP adapter weights [llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5](https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5) from and put it under the folder `checkpoints`.\n\n2. Download the model weight [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints`.\n\n3. Download the model weight [vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) under the folder `checkpoints`.\n\n4. Download the training data [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M) under the folder `data`.\n\n5. Run the training script.\n\n```\nbash scripts/v1_5/finetune.sh\n```\n\n## 🙏 Acknowledgement\n\n- [LLaVA](https://github.com/haotian-liu/LLaVA): The codebase we built upon. \n- [LAVIS](https://github.com/salesforce/LAVIS): We download some datasets from its scripts.\n\n## 🎓 Citation\n\nIf you find LOVA3 useful, please cite using this BibTeX:\n\n```bibtex\n@inproceedings{\n zhao2024lova,\n title={{LOVA}3: Learning to Visual Question Answering, Asking and Assessment},\n author={Hengyuan Zhao and Pan Zhou and Difei Gao and Zechen Bai and Mike Zheng Shou},\n booktitle={The Thirty-eighth Annual Conference on Neural Information Processing Systems},\n year={2024},\n url={https://openreview.net/forum?id=vIOKLMl6wu}\n}\n```\n```"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n## 💡Key Contributions:\n\n* **LOVA3** - To the best of our knowledge, LOVA3 is the first effort to imbue the asking and assessment abilities in training a robust and intelligent MLLM, inspired from human learning mechanism.\n* **EvalQABench** - We build a new benchmark EvalQABench for the VQA correction evaluation as the first effort to advance the development of future research.\n\n* **Performance Improvement** - Training with our proposed LOVA3 framework, we observe consistent improvement on 10 representative benchmarks.\n\n\n## Model weight\n\nPretrained weight: [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b)\n\nDownload it by using following command:\n\n```\ngit clone https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b\n```\n\n## Training Data\n\n* Here we provide the training/Evaluation/Testing sets of EvalQABench under the folder `EvalQABench`.\n\n* Training data: [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M).\n\n### Image Datasets\n\nPlease download the images from constituting datasets:\n\n- COCO: [train2014](http://images.cocodataset.org/zips/train2014.zip)\n- GQA: [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip)\n- OCR-VQA: [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), **we save all files as `.jpg`**\n- AOKVQA: [download script](https://github.com/allenai/aokvqa?tab=readme-ov-file#downloading-the-dataset)\n- TextVQA: [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip)\n- VisualGenome: [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip)\n- LLaVA-Instruct: [huggingface](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K)\n\n\n## 💃 Evaluation\n\n1. Download [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b) under the folder `checkpoints`.\n\n2. Download the CLIP vision encoder [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints`\n\n3. Run the evaluation scripts under the folder `scripts/v1_5/eval`. There are 12 multimodal datasets and benchmarks awaiting evaluation.\n\nTake VizWiz as an example, the running command is as follows:\n\n```\nmodelname=LOVA3-llava-v1.5-7b\n\npython -m llava.eval.model_vqa_loader \\\n --model-path checkpoints/$modelname \\\n --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \\\n --image-folder /yourpath/vizwiz/test/ \\\n --answers-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \\\n --temperature 0 \\\n --conv-mode vicuna_v1\n\npython scripts/convert_vizwiz_for_submission.py \\\n --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \\\n --result-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \\\n --result-upload-file ./playground/data/eval/vizwiz/answers_upload/$modelname.json\n\n```\n\n## Training\n\n1. Download the pretrained MLP adapter weights [llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5](https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5) from and put it under the folder `checkpoints`.\n\n2. Download the model weight [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints`.\n\n3. Download the model weight [vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) under the folder `checkpoints`.\n\n4. Download the training data [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M) under the folder `data`.\n\n5. Run the training script.\n\n```\nbash scripts/v1_5/finetune.sh\n```\n\n## 🙏 Acknowledgement\n\n- [LLaVA](https://github.com/haotian-liu/LLaVA): The codebase we built upon. \n- [LAVIS](https://github.com/salesforce/LAVIS): We download some datasets from its scripts.\n\n## 🎓 Citation\n\nIf you find LOVA3 useful, please cite using this BibTeX:\n\n```bibtex\n@inproceedings{\n zhao2024lova,\n title={{LOVA}3: Learning to Visual Question Answering, Asking and Assessment},\n author={Hengyuan Zhao and Pan Zhou and Difei Gao and Zechen Bai and Mike Zheng Shou},\n booktitle={The Thirty-eighth Annual Conference on Neural Information Processing Systems},\n year={2024},\n url={https://openreview.net/forum?id=vIOKLMl6wu}\n}\n```\n```"},"metadata":{"kind":"string","value":"{\"library_name\": \"transformers\", \"license\": \"apache-2.0\", \"pipeline_tag\": \"image-text-to-text\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING"],"string":"[\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45523,"string":"45,523"}}},{"rowIdx":43747,"cells":{"id":{"kind":"string","value":"MathiasBrussow/distilbert-base-uncased-finetuned-clinc"},"author":{"kind":"string","value":"MathiasBrussow"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["pytorch","tensorboard","distilbert","generated_from_trainer","dataset:clinc_oos","license:apache-2.0","model-index","region:us"],"string":"[\n \"pytorch\",\n \"tensorboard\",\n \"distilbert\",\n \"generated_from_trainer\",\n \"dataset:clinc_oos\",\n \"license:apache-2.0\",\n \"model-index\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-03T10:26:19Z","string":"2024-10-03T10:26:19Z"},"last_modified":{"kind":"string","value":"2024-10-03T15:30:49+00:00"},"downloads":{"kind":"number","value":13,"string":"13"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- clinc_oos\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-clinc\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: clinc_oos\n type: clinc_oos\n args: plus\n metrics:\n - type: accuracy\n value: 0.9180645161290323\n name: Accuracy\n---\n\n\n\n# distilbert-base-uncased-finetuned-clinc\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.7721\n- Accuracy: 0.9181\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 48\n- eval_batch_size: 48\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| No log | 1.0 | 318 | 3.2884 | 0.7419 |\n| 3.7865 | 2.0 | 636 | 1.8751 | 0.8368 |\n| 3.7865 | 3.0 | 954 | 1.1569 | 0.8961 |\n| 1.6925 | 4.0 | 1272 | 0.8573 | 0.9132 |\n| 0.9056 | 5.0 | 1590 | 0.7721 | 0.9181 |\n\n\n### Framework versions\n\n- Transformers 4.16.2\n- Pytorch 2.4.1+cu121\n- Datasets 1.16.1\n- Tokenizers 0.19.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-base-uncased-finetuned-clinc\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.7721\n- Accuracy: 0.9181\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 48\n- eval_batch_size: 48\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| No log | 1.0 | 318 | 3.2884 | 0.7419 |\n| 3.7865 | 2.0 | 636 | 1.8751 | 0.8368 |\n| 3.7865 | 3.0 | 954 | 1.1569 | 0.8961 |\n| 1.6925 | 4.0 | 1272 | 0.8573 | 0.9132 |\n| 0.9056 | 5.0 | 1590 | 0.7721 | 0.9181 |\n\n\n### Framework versions\n\n- Transformers 4.16.2\n- Pytorch 2.4.1+cu121\n- Datasets 1.16.1\n- Tokenizers 0.19.1\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"clinc_oos\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-clinc\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"clinc_oos\", \"type\": \"clinc_oos\", \"args\": \"plus\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.9180645161290323, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45524,"string":"45,524"}}},{"rowIdx":43748,"cells":{"id":{"kind":"string","value":"gokuls/hBERTv1_new_pretrain_48_KD_cola"},"author":{"kind":"string","value":"gokuls"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","hybridbert","text-classification","generated_from_trainer","en","dataset:glue","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"hybridbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"en\",\n \"dataset:glue\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-14T17:00:54Z","string":"2023-06-14T17:00:54Z"},"last_modified":{"kind":"string","value":"2023-06-14T17:17:29+00:00"},"downloads":{"kind":"number","value":8,"string":"8"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- glue\nlanguage:\n- en\nmetrics:\n- matthews_correlation\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: hBERTv1_new_pretrain_48_KD_cola\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: GLUE COLA\n type: glue\n config: cola\n split: validation\n args: cola\n metrics:\n - type: matthews_correlation\n value: 0.0\n name: Matthews Correlation\n - type: accuracy\n value: 0.6912751793861389\n name: Accuracy\n---\n\n\n\n# hBERTv1_new_pretrain_48_KD_cola\n\nThis model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_48_KD](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_48_KD) on the GLUE COLA dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.6163\n- Matthews Correlation: 0.0\n- Accuracy: 0.6913\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 4e-05\n- train_batch_size: 128\n- eval_batch_size: 128\n- seed: 10\n- distributed_type: multi-GPU\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------------------:|:--------:|\n| 0.6281 | 1.0 | 67 | 0.6241 | 0.0 | 0.6913 |\n| 0.6172 | 2.0 | 134 | 0.6471 | 0.0 | 0.6913 |\n| 0.6123 | 3.0 | 201 | 0.6185 | 0.0 | 0.6913 |\n| 0.6138 | 4.0 | 268 | 0.6180 | 0.0 | 0.6913 |\n| 0.6029 | 5.0 | 335 | 0.6170 | 0.0021 | 0.6846 |\n| 0.5795 | 6.0 | 402 | 0.6163 | 0.0 | 0.6913 |\n| 0.556 | 7.0 | 469 | 0.6529 | 0.0735 | 0.6328 |\n| 0.5273 | 8.0 | 536 | 0.6502 | 0.1135 | 0.6510 |\n| 0.5108 | 9.0 | 603 | 0.7095 | 0.1134 | 0.6318 |\n| 0.4911 | 10.0 | 670 | 0.6667 | 0.0454 | 0.6644 |\n| 0.4768 | 11.0 | 737 | 0.6869 | 0.0658 | 0.6232 |\n\n\n### Framework versions\n\n- Transformers 4.30.2\n- Pytorch 1.14.0a0+410ce96\n- Datasets 2.12.0\n- Tokenizers 0.13.3\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# hBERTv1_new_pretrain_48_KD_cola\n\nThis model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_48_KD](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_48_KD) on the GLUE COLA dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.6163\n- Matthews Correlation: 0.0\n- Accuracy: 0.6913\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 4e-05\n- train_batch_size: 128\n- eval_batch_size: 128\n- seed: 10\n- distributed_type: multi-GPU\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------------------:|:--------:|\n| 0.6281 | 1.0 | 67 | 0.6241 | 0.0 | 0.6913 |\n| 0.6172 | 2.0 | 134 | 0.6471 | 0.0 | 0.6913 |\n| 0.6123 | 3.0 | 201 | 0.6185 | 0.0 | 0.6913 |\n| 0.6138 | 4.0 | 268 | 0.6180 | 0.0 | 0.6913 |\n| 0.6029 | 5.0 | 335 | 0.6170 | 0.0021 | 0.6846 |\n| 0.5795 | 6.0 | 402 | 0.6163 | 0.0 | 0.6913 |\n| 0.556 | 7.0 | 469 | 0.6529 | 0.0735 | 0.6328 |\n| 0.5273 | 8.0 | 536 | 0.6502 | 0.1135 | 0.6510 |\n| 0.5108 | 9.0 | 603 | 0.7095 | 0.1134 | 0.6318 |\n| 0.4911 | 10.0 | 670 | 0.6667 | 0.0454 | 0.6644 |\n| 0.4768 | 11.0 | 737 | 0.6869 | 0.0658 | 0.6232 |\n\n\n### Framework versions\n\n- Transformers 4.30.2\n- Pytorch 1.14.0a0+410ce96\n- Datasets 2.12.0\n- Tokenizers 0.13.3\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"glue\"], \"language\": [\"en\"], \"metrics\": [\"matthews_correlation\", \"accuracy\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"hBERTv1_new_pretrain_48_KD_cola\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"GLUE COLA\", \"type\": \"glue\", \"config\": \"cola\", \"split\": \"validation\", \"args\": \"cola\"}, \"metrics\": [{\"type\": \"matthews_correlation\", \"value\": 0.0, \"name\": \"Matthews Correlation\"}, {\"type\": \"accuracy\", \"value\": 0.6912751793861389, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45525,"string":"45,525"}}},{"rowIdx":43749,"cells":{"id":{"kind":"string","value":"clboetticher-school/marian-finetuned-kde4-en-to-fr"},"author":{"kind":"string","value":"clboetticher-school"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","marian","text2text-generation","translation","generated_from_trainer","dataset:kde4","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"marian\",\n \"text2text-generation\",\n \"translation\",\n \"generated_from_trainer\",\n \"dataset:kde4\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-02-14T18:53:38Z","string":"2023-02-14T18:53:38Z"},"last_modified":{"kind":"string","value":"2023-02-14T21:17:54+00:00"},"downloads":{"kind":"number","value":10,"string":"10"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- kde4\nlicense: apache-2.0\ntags:\n- translation\n- generated_from_trainer\nmodel-index:\n- name: marian-finetuned-kde4-en-to-fr\n results: []\n---\n\n\n\n# marian-finetuned-kde4-en-to-fr\n\nThis model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset.\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1\n- mixed_precision_training: Native AMP\n\n### Framework versions\n\n- Transformers 4.26.1\n- Pytorch 1.13.1+cu116\n- Datasets 2.9.0\n- Tokenizers 0.13.2\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# marian-finetuned-kde4-en-to-fr\n\nThis model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset.\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 1\n- mixed_precision_training: Native AMP\n\n### Framework versions\n\n- Transformers 4.26.1\n- Pytorch 1.13.1+cu116\n- Datasets 2.9.0\n- Tokenizers 0.13.2\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"kde4\"], \"license\": \"apache-2.0\", \"tags\": [\"translation\", \"generated_from_trainer\"], \"model-index\": [{\"name\": \"marian-finetuned-kde4-en-to-fr\", \"results\": []}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45526,"string":"45,526"}}},{"rowIdx":43750,"cells":{"id":{"kind":"string","value":"MS-Go/autotrain-hjuihu-1974565969"},"author":{"kind":"string","value":"MS-Go"},"task_category":{"kind":"string","value":"summarization"},"tags":{"kind":"list like","value":["transformers","pytorch","autotrain","summarization","unk","dataset:MS-Go/autotrain-data-hjuihu","co2_eq_emissions","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"autotrain\",\n \"summarization\",\n \"unk\",\n \"dataset:MS-Go/autotrain-data-hjuihu\",\n \"co2_eq_emissions\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-11-03T12:26:38Z","string":"2022-11-03T12:26:38Z"},"last_modified":{"kind":"string","value":"2022-11-03T12:58:30+00:00"},"downloads":{"kind":"number","value":11,"string":"11"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- MS-Go/autotrain-data-hjuihu\nlanguage:\n- unk\ntags:\n- autotrain\n- summarization\nwidget:\n- text: I love AutoTrain 🤗\nco2_eq_emissions:\n emissions: 49.671043265609676\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Summarization\n- Model ID: 1974565969\n- CO2 Emissions (in grams): 49.6710\n\n## Validation Metrics\n\n- Loss: 2.889\n- Rouge1: 36.489\n- Rouge2: 7.128\n- RougeL: 18.766\n- RougeLsum: 33.217\n- Gen Len: 141.972\n\n## Usage\n\nYou can use cURL to access this model:\n\n```\n$ curl -X POST -H \"Authorization: Bearer YOUR_HUGGINGFACE_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoTrain\"}' https://api-inference.huggingface.co/MS-Go/autotrain-hjuihu-1974565969\n```"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Model Trained Using AutoTrain\n\n- Problem type: Summarization\n- Model ID: 1974565969\n- CO2 Emissions (in grams): 49.6710\n\n## Validation Metrics\n\n- Loss: 2.889\n- Rouge1: 36.489\n- Rouge2: 7.128\n- RougeL: 18.766\n- RougeLsum: 33.217\n- Gen Len: 141.972\n\n## Usage\n\nYou can use cURL to access this model:\n\n```\n$ curl -X POST -H \"Authorization: Bearer YOUR_HUGGINGFACE_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoTrain\"}' https://api-inference.huggingface.co/MS-Go/autotrain-hjuihu-1974565969\n```"},"metadata":{"kind":"string","value":"{\"datasets\": [\"MS-Go/autotrain-data-hjuihu\"], \"language\": [\"unk\"], \"tags\": [\"autotrain\", \"summarization\"], \"widget\": [{\"text\": \"I love AutoTrain 🤗\"}], \"co2_eq_emissions\": {\"emissions\": 49.671043265609676}}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["SUMMARIZATION"],"string":"[\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45527,"string":"45,527"}}},{"rowIdx":43751,"cells":{"id":{"kind":"string","value":"thrunlab/t5-large_cola_sp0_ar0"},"author":{"kind":"string","value":"thrunlab"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","t5","text-classification","generated_from_trainer","dataset:glue","base_model:google-t5/t5-large","base_model:finetune:google-t5/t5-large","license:apache-2.0","model-index","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"t5\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:glue\",\n \"base_model:google-t5/t5-large\",\n \"base_model:finetune:google-t5/t5-large\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-31T17:48:38Z","string":"2023-10-31T17:48:38Z"},"last_modified":{"kind":"string","value":"2023-10-31T20:41:40+00:00"},"downloads":{"kind":"number","value":31,"string":"31"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: t5-large\ndatasets:\n- glue\nlicense: apache-2.0\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: t5-large_cola_sp0_ar0\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: glue\n type: glue\n config: cola\n split: validation\n args: cola\n metrics:\n - type: accuracy\n value: 0.880859375\n name: Accuracy\n---\n\n\n\n# t5-large_cola_sp0_ar0\n\nThis model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on the glue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.4179\n- Accuracy: 0.8809\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 16\n- eval_batch_size: 32\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 20\n- num_epochs: 6\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 0.5885 | 0.05 | 25 | 0.6751 | 0.6913 |\n| 0.5475 | 0.11 | 50 | 0.5338 | 0.6913 |\n| 0.5122 | 0.16 | 75 | 0.4847 | 0.7919 |\n| 0.4486 | 0.21 | 100 | 0.5089 | 0.7996 |\n| 0.4087 | 0.27 | 125 | 0.5139 | 0.8063 |\n| 0.4022 | 0.32 | 150 | 0.5188 | 0.8035 |\n| 0.4245 | 0.37 | 175 | 0.5196 | 0.7987 |\n| 0.4298 | 0.42 | 200 | 0.6226 | 0.8006 |\n| 0.4326 | 0.48 | 225 | 0.6169 | 0.8015 |\n| 0.4321 | 0.53 | 250 | 0.6173 | 0.7987 |\n| 0.4288 | 0.58 | 275 | 0.4786 | 0.8102 |\n| 0.3914 | 0.64 | 300 | 0.5147 | 0.8054 |\n| 0.3519 | 0.69 | 325 | 0.5691 | 0.8150 |\n| 0.4036 | 0.74 | 350 | 0.4560 | 0.8236 |\n| 0.3706 | 0.8 | 375 | 0.4640 | 0.8245 |\n| 0.3584 | 0.85 | 400 | 0.4605 | 0.8207 |\n| 0.3539 | 0.9 | 425 | 0.4932 | 0.8217 |\n| 0.3982 | 0.96 | 450 | 0.5397 | 0.8073 |\n| 0.3352 | 1.01 | 475 | 0.5490 | 0.8150 |\n| 0.2631 | 1.06 | 500 | 0.9244 | 0.8121 |\n| 0.2992 | 1.11 | 525 | 0.5666 | 0.8169 |\n| 0.2308 | 1.17 | 550 | 0.7285 | 0.8178 |\n| 0.2893 | 1.22 | 575 | 0.6907 | 0.8198 |\n| 0.2809 | 1.27 | 600 | 0.4998 | 0.8140 |\n| 0.2469 | 1.33 | 625 | 0.7260 | 0.8236 |\n| 0.331 | 1.38 | 650 | 0.5812 | 0.8293 |\n| 0.286 | 1.43 | 675 | 0.5102 | 0.8360 |\n| 0.347 | 1.49 | 700 | 0.5696 | 0.8255 |\n| 0.2971 | 1.54 | 725 | 0.4114 | 0.8380 |\n| 0.3048 | 1.59 | 750 | 0.5466 | 0.8169 |\n| 0.3168 | 1.65 | 775 | 0.4787 | 0.8274 |\n| 0.2247 | 1.7 | 800 | 0.7926 | 0.8063 |\n| 0.2666 | 1.75 | 825 | 0.5763 | 0.8274 |\n| 0.2856 | 1.8 | 850 | 0.5131 | 0.8303 |\n| 0.2967 | 1.86 | 875 | 0.4970 | 0.8293 |\n| 0.296 | 1.91 | 900 | 0.5532 | 0.8293 |\n| 0.2828 | 1.96 | 925 | 0.4777 | 0.8274 |\n| 0.2708 | 2.02 | 950 | 0.5433 | 0.8351 |\n| 0.1406 | 2.07 | 975 | 0.6351 | 0.8351 |\n| 0.2046 | 2.12 | 1000 | 0.6058 | 0.8332 |\n| 0.2227 | 2.18 | 1025 | 0.5616 | 0.8408 |\n| 0.1551 | 2.23 | 1050 | 1.0299 | 0.8360 |\n| 0.1465 | 2.28 | 1075 | 0.7842 | 0.8380 |\n| 0.2171 | 2.34 | 1100 | 0.6329 | 0.8437 |\n| 0.1588 | 2.39 | 1125 | 0.7575 | 0.8418 |\n| 0.4245 | 2.44 | 1150 | 0.7603 | 0.8351 |\n| 0.2124 | 2.49 | 1175 | 0.5838 | 0.8447 |\n| 0.2333 | 2.55 | 1200 | 0.4896 | 0.8418 |\n| 0.1943 | 2.6 | 1225 | 0.6343 | 0.8332 |\n| 0.1961 | 2.65 | 1250 | 0.6343 | 0.8284 |\n| 0.1981 | 2.71 | 1275 | 0.6145 | 0.8332 |\n| 0.2151 | 2.76 | 1300 | 0.6335 | 0.8360 |\n| 0.1634 | 2.81 | 1325 | 1.1357 | 0.8399 |\n| 0.1526 | 2.87 | 1350 | 1.0044 | 0.8293 |\n| 0.2096 | 2.92 | 1375 | 0.7761 | 0.8360 |\n| 0.2135 | 2.97 | 1400 | 0.9338 | 0.8351 |\n| 0.155 | 3.03 | 1425 | 3.3297 | 0.8360 |\n| 0.3667 | 3.08 | 1450 | 4.0564 | 0.8370 |\n| 0.5925 | 3.13 | 1475 | 6.7411 | 0.8408 |\n| 0.5866 | 3.18 | 1500 | 7.1940 | 0.8399 |\n| 0.3812 | 3.24 | 1525 | 7.0097 | 0.8351 |\n| 0.1041 | 3.29 | 1550 | 7.0157 | 0.8351 |\n| 0.3451 | 3.34 | 1575 | 6.2653 | 0.8418 |\n| 0.1121 | 3.4 | 1600 | 4.2608 | 0.8485 |\n\n\n### Framework versions\n\n- Transformers 4.33.2\n- Pytorch 2.0.1+cu117\n- Datasets 2.14.5\n- Tokenizers 0.11.6\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# t5-large_cola_sp0_ar0\n\nThis model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on the glue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.4179\n- Accuracy: 0.8809\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 16\n- eval_batch_size: 32\n- seed: 1\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 20\n- num_epochs: 6\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 0.5885 | 0.05 | 25 | 0.6751 | 0.6913 |\n| 0.5475 | 0.11 | 50 | 0.5338 | 0.6913 |\n| 0.5122 | 0.16 | 75 | 0.4847 | 0.7919 |\n| 0.4486 | 0.21 | 100 | 0.5089 | 0.7996 |\n| 0.4087 | 0.27 | 125 | 0.5139 | 0.8063 |\n| 0.4022 | 0.32 | 150 | 0.5188 | 0.8035 |\n| 0.4245 | 0.37 | 175 | 0.5196 | 0.7987 |\n| 0.4298 | 0.42 | 200 | 0.6226 | 0.8006 |\n| 0.4326 | 0.48 | 225 | 0.6169 | 0.8015 |\n| 0.4321 | 0.53 | 250 | 0.6173 | 0.7987 |\n| 0.4288 | 0.58 | 275 | 0.4786 | 0.8102 |\n| 0.3914 | 0.64 | 300 | 0.5147 | 0.8054 |\n| 0.3519 | 0.69 | 325 | 0.5691 | 0.8150 |\n| 0.4036 | 0.74 | 350 | 0.4560 | 0.8236 |\n| 0.3706 | 0.8 | 375 | 0.4640 | 0.8245 |\n| 0.3584 | 0.85 | 400 | 0.4605 | 0.8207 |\n| 0.3539 | 0.9 | 425 | 0.4932 | 0.8217 |\n| 0.3982 | 0.96 | 450 | 0.5397 | 0.8073 |\n| 0.3352 | 1.01 | 475 | 0.5490 | 0.8150 |\n| 0.2631 | 1.06 | 500 | 0.9244 | 0.8121 |\n| 0.2992 | 1.11 | 525 | 0.5666 | 0.8169 |\n| 0.2308 | 1.17 | 550 | 0.7285 | 0.8178 |\n| 0.2893 | 1.22 | 575 | 0.6907 | 0.8198 |\n| 0.2809 | 1.27 | 600 | 0.4998 | 0.8140 |\n| 0.2469 | 1.33 | 625 | 0.7260 | 0.8236 |\n| 0.331 | 1.38 | 650 | 0.5812 | 0.8293 |\n| 0.286 | 1.43 | 675 | 0.5102 | 0.8360 |\n| 0.347 | 1.49 | 700 | 0.5696 | 0.8255 |\n| 0.2971 | 1.54 | 725 | 0.4114 | 0.8380 |\n| 0.3048 | 1.59 | 750 | 0.5466 | 0.8169 |\n| 0.3168 | 1.65 | 775 | 0.4787 | 0.8274 |\n| 0.2247 | 1.7 | 800 | 0.7926 | 0.8063 |\n| 0.2666 | 1.75 | 825 | 0.5763 | 0.8274 |\n| 0.2856 | 1.8 | 850 | 0.5131 | 0.8303 |\n| 0.2967 | 1.86 | 875 | 0.4970 | 0.8293 |\n| 0.296 | 1.91 | 900 | 0.5532 | 0.8293 |\n| 0.2828 | 1.96 | 925 | 0.4777 | 0.8274 |\n| 0.2708 | 2.02 | 950 | 0.5433 | 0.8351 |\n| 0.1406 | 2.07 | 975 | 0.6351 | 0.8351 |\n| 0.2046 | 2.12 | 1000 | 0.6058 | 0.8332 |\n| 0.2227 | 2.18 | 1025 | 0.5616 | 0.8408 |\n| 0.1551 | 2.23 | 1050 | 1.0299 | 0.8360 |\n| 0.1465 | 2.28 | 1075 | 0.7842 | 0.8380 |\n| 0.2171 | 2.34 | 1100 | 0.6329 | 0.8437 |\n| 0.1588 | 2.39 | 1125 | 0.7575 | 0.8418 |\n| 0.4245 | 2.44 | 1150 | 0.7603 | 0.8351 |\n| 0.2124 | 2.49 | 1175 | 0.5838 | 0.8447 |\n| 0.2333 | 2.55 | 1200 | 0.4896 | 0.8418 |\n| 0.1943 | 2.6 | 1225 | 0.6343 | 0.8332 |\n| 0.1961 | 2.65 | 1250 | 0.6343 | 0.8284 |\n| 0.1981 | 2.71 | 1275 | 0.6145 | 0.8332 |\n| 0.2151 | 2.76 | 1300 | 0.6335 | 0.8360 |\n| 0.1634 | 2.81 | 1325 | 1.1357 | 0.8399 |\n| 0.1526 | 2.87 | 1350 | 1.0044 | 0.8293 |\n| 0.2096 | 2.92 | 1375 | 0.7761 | 0.8360 |\n| 0.2135 | 2.97 | 1400 | 0.9338 | 0.8351 |\n| 0.155 | 3.03 | 1425 | 3.3297 | 0.8360 |\n| 0.3667 | 3.08 | 1450 | 4.0564 | 0.8370 |\n| 0.5925 | 3.13 | 1475 | 6.7411 | 0.8408 |\n| 0.5866 | 3.18 | 1500 | 7.1940 | 0.8399 |\n| 0.3812 | 3.24 | 1525 | 7.0097 | 0.8351 |\n| 0.1041 | 3.29 | 1550 | 7.0157 | 0.8351 |\n| 0.3451 | 3.34 | 1575 | 6.2653 | 0.8418 |\n| 0.1121 | 3.4 | 1600 | 4.2608 | 0.8485 |\n\n\n### Framework versions\n\n- Transformers 4.33.2\n- Pytorch 2.0.1+cu117\n- Datasets 2.14.5\n- Tokenizers 0.11.6\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"t5-large\", \"datasets\": [\"glue\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"t5-large_cola_sp0_ar0\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"glue\", \"type\": \"glue\", \"config\": \"cola\", \"split\": \"validation\", \"args\": \"cola\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.880859375, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45528,"string":"45,528"}}},{"rowIdx":43752,"cells":{"id":{"kind":"string","value":"RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","arxiv:2306.01708","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"arxiv:2306.01708\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-30T05:41:10Z","string":"2024-10-30T05:41:10Z"},"last_modified":{"kind":"string","value":"2024-10-30T07:09:59+00:00"},"downloads":{"kind":"number","value":21,"string":"21"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nLlama-3.2-3B-Mix-Skill - GGUF\n- Model creator: https://huggingface.co/bunnycore/\n- Original model: https://huggingface.co/bunnycore/Llama-3.2-3B-Mix-Skill/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [Llama-3.2-3B-Mix-Skill.Q2_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q2_K.gguf) | Q2_K | 1.39GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf) | Q3_K_S | 1.59GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K.gguf) | Q3_K | 1.73GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf) | Q3_K_M | 1.73GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf) | Q3_K_L | 1.85GB |\n| [Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf) | IQ4_XS | 1.91GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_0.gguf) | Q4_0 | 1.99GB |\n| [Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf) | IQ4_NL | 2.0GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf) | Q4_K_S | 2.0GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K.gguf) | Q4_K | 2.09GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf) | Q4_K_M | 2.09GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_1.gguf) | Q4_1 | 2.18GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_0.gguf) | Q5_0 | 2.37GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf) | Q5_K_S | 2.37GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K.gguf) | Q5_K | 2.41GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf) | Q5_K_M | 2.41GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_1.gguf) | Q5_1 | 2.55GB |\n| [Llama-3.2-3B-Mix-Skill.Q6_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q6_K.gguf) | Q6_K | 2.76GB |\n| [Llama-3.2-3B-Mix-Skill.Q8_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q8_0.gguf) | Q8_0 | 3.58GB |\n\n\n\n\nOriginal model description:\n---\nlibrary_name: transformers\ntags:\n- mergekit\n- merge\nbase_model:\n- bunnycore/Llama-3.2-3B-Long-Think\n- huihui-ai/Llama-3.2-3B-Instruct-abliterated\n- bunnycore/Llama-3.2-3B-Pure-RP\nmodel-index:\n- name: Llama-3.2-3B-Mix-Skill\n results:\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: IFEval (0-Shot)\n type: HuggingFaceH4/ifeval\n args:\n num_few_shot: 0\n metrics:\n - type: inst_level_strict_acc and prompt_level_strict_acc\n value: 64.04\n name: strict accuracy\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: BBH (3-Shot)\n type: BBH\n args:\n num_few_shot: 3\n metrics:\n - type: acc_norm\n value: 23.78\n name: normalized accuracy\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MATH Lvl 5 (4-Shot)\n type: hendrycks/competition_math\n args:\n num_few_shot: 4\n metrics:\n - type: exact_match\n value: 12.69\n name: exact match\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: GPQA (0-shot)\n type: Idavidrein/gpqa\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 1.57\n name: acc_norm\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MuSR (0-shot)\n type: TAUR-Lab/MuSR\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 2.75\n name: acc_norm\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MMLU-PRO (5-shot)\n type: TIGER-Lab/MMLU-Pro\n config: main\n split: test\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 23.56\n name: accuracy\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n---\nThis language model is a merged version of several pre-trained models, designed to excel in roleplay, long-form question answering, and prompt following tasks. It was created using the TIES merge method with huihui-ai/Llama-3.2-3B-Instruct-abliterated as the base model.\n\n# Intended Use:\n\nThis model is suitable for a variety of applications, including:\n\n- Creative Writing: Generating stories, poems, scripts, and other forms of creative text.\n- Question Answering: Providing comprehensive and informative answers to a wide range of questions.\n- Role-Playing: Engaging in interactive role-playing scenarios with users.\n- Prompt Following: Completing tasks and generating text based on specific prompts or instructions.\n\n# merge\n\nThis is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).\n\n## Merge Details\n### Merge Method\n\nThis model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [huihui-ai/Llama-3.2-3B-Instruct-abliterated](https://huggingface.co/huihui-ai/Llama-3.2-3B-Instruct-abliterated) as a base.\n\n### Models Merged\n\nThe following models were included in the merge:\n* [bunnycore/Llama-3.2-3B-Long-Think](https://huggingface.co/bunnycore/Llama-3.2-3B-Long-Think)\n* [bunnycore/Llama-3.2-3B-Pure-RP](https://huggingface.co/bunnycore/Llama-3.2-3B-Pure-RP)\n\n### Configuration\n\nThe following YAML configuration was used to produce this model:\n\n```yaml\nmodels:\n - model: bunnycore/Llama-3.2-3B-Long-Think\n parameters:\n density: 0.5\n weight: 0.5\n - model: bunnycore/Llama-3.2-3B-Pure-RP\n parameters:\n density: 0.5\n weight: 0.5\n\nmerge_method: ties\nbase_model: huihui-ai/Llama-3.2-3B-Instruct-abliterated\nparameters:\n normalize: false\n int8_mask: true\ndtype: float16\n\n```\n\n# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)\nDetailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_bunnycore__Llama-3.2-3B-Mix-Skill)\n\n| Metric |Value|\n|-------------------|----:|\n|Avg. |21.40|\n|IFEval (0-Shot) |64.04|\n|BBH (3-Shot) |23.78|\n|MATH Lvl 5 (4-Shot)|12.69|\n|GPQA (0-shot) | 1.57|\n|MuSR (0-shot) | 2.75|\n|MMLU-PRO (5-shot) |23.56|\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"Quantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nLlama-3.2-3B-Mix-Skill - GGUF\n- Model creator: https://huggingface.co/bunnycore/\n- Original model: https://huggingface.co/bunnycore/Llama-3.2-3B-Mix-Skill/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [Llama-3.2-3B-Mix-Skill.Q2_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q2_K.gguf) | Q2_K | 1.39GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf) | Q3_K_S | 1.59GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K.gguf) | Q3_K | 1.73GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf) | Q3_K_M | 1.73GB |\n| [Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf) | Q3_K_L | 1.85GB |\n| [Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf) | IQ4_XS | 1.91GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_0.gguf) | Q4_0 | 1.99GB |\n| [Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf) | IQ4_NL | 2.0GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf) | Q4_K_S | 2.0GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K.gguf) | Q4_K | 2.09GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf) | Q4_K_M | 2.09GB |\n| [Llama-3.2-3B-Mix-Skill.Q4_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_1.gguf) | Q4_1 | 2.18GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_0.gguf) | Q5_0 | 2.37GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf) | Q5_K_S | 2.37GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K.gguf) | Q5_K | 2.41GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf) | Q5_K_M | 2.41GB |\n| [Llama-3.2-3B-Mix-Skill.Q5_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_1.gguf) | Q5_1 | 2.55GB |\n| [Llama-3.2-3B-Mix-Skill.Q6_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q6_K.gguf) | Q6_K | 2.76GB |\n| [Llama-3.2-3B-Mix-Skill.Q8_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q8_0.gguf) | Q8_0 | 3.58GB |\n\n\n\n\nOriginal model description:\n---\nlibrary_name: transformers\ntags:\n- mergekit\n- merge\nbase_model:\n- bunnycore/Llama-3.2-3B-Long-Think\n- huihui-ai/Llama-3.2-3B-Instruct-abliterated\n- bunnycore/Llama-3.2-3B-Pure-RP\nmodel-index:\n- name: Llama-3.2-3B-Mix-Skill\n results:\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: IFEval (0-Shot)\n type: HuggingFaceH4/ifeval\n args:\n num_few_shot: 0\n metrics:\n - type: inst_level_strict_acc and prompt_level_strict_acc\n value: 64.04\n name: strict accuracy\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: BBH (3-Shot)\n type: BBH\n args:\n num_few_shot: 3\n metrics:\n - type: acc_norm\n value: 23.78\n name: normalized accuracy\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MATH Lvl 5 (4-Shot)\n type: hendrycks/competition_math\n args:\n num_few_shot: 4\n metrics:\n - type: exact_match\n value: 12.69\n name: exact match\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: GPQA (0-shot)\n type: Idavidrein/gpqa\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 1.57\n name: acc_norm\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MuSR (0-shot)\n type: TAUR-Lab/MuSR\n args:\n num_few_shot: 0\n metrics:\n - type: acc_norm\n value: 2.75\n name: acc_norm\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MMLU-PRO (5-shot)\n type: TIGER-Lab/MMLU-Pro\n config: main\n split: test\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 23.56\n name: accuracy\n source:\n url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill\n name: Open LLM Leaderboard\n---\nThis language model is a merged version of several pre-trained models, designed to excel in roleplay, long-form question answering, and prompt following tasks. It was created using the TIES merge method with huihui-ai/Llama-3.2-3B-Instruct-abliterated as the base model.\n\n# Intended Use:\n\nThis model is suitable for a variety of applications, including:\n\n- Creative Writing: Generating stories, poems, scripts, and other forms of creative text.\n- Question Answering: Providing comprehensive and informative answers to a wide range of questions.\n- Role-Playing: Engaging in interactive role-playing scenarios with users.\n- Prompt Following: Completing tasks and generating text based on specific prompts or instructions.\n\n# merge\n\nThis is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).\n\n## Merge Details\n### Merge Method\n\nThis model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [huihui-ai/Llama-3.2-3B-Instruct-abliterated](https://huggingface.co/huihui-ai/Llama-3.2-3B-Instruct-abliterated) as a base.\n\n### Models Merged\n\nThe following models were included in the merge:\n* [bunnycore/Llama-3.2-3B-Long-Think](https://huggingface.co/bunnycore/Llama-3.2-3B-Long-Think)\n* [bunnycore/Llama-3.2-3B-Pure-RP](https://huggingface.co/bunnycore/Llama-3.2-3B-Pure-RP)\n\n### Configuration\n\nThe following YAML configuration was used to produce this model:\n\n```yaml\nmodels:\n - model: bunnycore/Llama-3.2-3B-Long-Think\n parameters:\n density: 0.5\n weight: 0.5\n - model: bunnycore/Llama-3.2-3B-Pure-RP\n parameters:\n density: 0.5\n weight: 0.5\n\nmerge_method: ties\nbase_model: huihui-ai/Llama-3.2-3B-Instruct-abliterated\nparameters:\n normalize: false\n int8_mask: true\ndtype: float16\n\n```\n\n# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard)\nDetailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_bunnycore__Llama-3.2-3B-Mix-Skill)\n\n| Metric |Value|\n|-------------------|----:|\n|Avg. |21.40|\n|IFEval (0-Shot) |64.04|\n|BBH (3-Shot) |23.78|\n|MATH Lvl 5 (4-Shot)|12.69|\n|GPQA (0-shot) | 1.57|\n|MuSR (0-shot) | 2.75|\n|MMLU-PRO (5-shot) |23.56|\n\n\n\n"},"metadata":{"kind":"string","value":"{}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING"],"string":"[\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45529,"string":"45,529"}}},{"rowIdx":43753,"cells":{"id":{"kind":"string","value":"Varsha00/finetuned-opusmt-en-hi-gu"},"author":{"kind":"string","value":"Varsha00"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","marian","text2text-generation","en","hi","gu","dataset:ai4bharat/samanantar","dataset:wmt/wmt19","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"marian\",\n \"text2text-generation\",\n \"en\",\n \"hi\",\n \"gu\",\n \"dataset:ai4bharat/samanantar\",\n \"dataset:wmt/wmt19\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-31T15:37:14Z","string":"2024-07-31T15:37:14Z"},"last_modified":{"kind":"string","value":"2024-07-31T16:11:55+00:00"},"downloads":{"kind":"number","value":16,"string":"16"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: Helsinki/opus-mt-en-mul\ndatasets:\n- ai4bharat/samanantar\n- wmt/wmt19\nlanguage:\n- en\n- hi\n- gu\nlicense: apache-2.0\nmetrics:\n- bleu\n---\n\n# Finetuning\n\nThis model is a fine-tuned version of [Varsha00/finetuned-opusmt-en-to-hi](https://huggingface.co/Varsha00/finetuned-opusmt-en-to-hi) on the samanantar & WMT News dataset.\nsource group: English\ntarget group: Gujarati\nmodel: transformer\n\n## Model description\n\nThis model is a sequentially finetuned version of the Helsinki-NLP/opus-mt-en-mul model, designed for translating between English and Gujarati. \nThe model was initially finetuned on the Hindi language using a substantial dataset and subsequently finetuned on Gujarati using a smaller dataset. \nThis approach, known as sequential finetuning or cascaded finetuning, allows the model to leverage the knowledge gained from Hindi to improve its \nperformance on Gujarati translations, despite the limited data available for the latter.\n\n## Training and evaluation data\n\nai4bharath/samanantar\nWMT-News \n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-5\n- warmup_steps: 500\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 16\n- num_epochs: 10\n\n### Benchamark Evaluation\n- BLEU score on Tatoeba: 27.7761903401179\n- BLUE score on IN-22: 16.437183600289\n\n \n### Framework versions\n\n- Transformers 4.42.3\n- Pytorch 2.1.2\n- Datasets 2.20.0\n- Tokenizers 0.19.1"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Finetuning\n\nThis model is a fine-tuned version of [Varsha00/finetuned-opusmt-en-to-hi](https://huggingface.co/Varsha00/finetuned-opusmt-en-to-hi) on the samanantar & WMT News dataset.\nsource group: English\ntarget group: Gujarati\nmodel: transformer\n\n## Model description\n\nThis model is a sequentially finetuned version of the Helsinki-NLP/opus-mt-en-mul model, designed for translating between English and Gujarati. \nThe model was initially finetuned on the Hindi language using a substantial dataset and subsequently finetuned on Gujarati using a smaller dataset. \nThis approach, known as sequential finetuning or cascaded finetuning, allows the model to leverage the knowledge gained from Hindi to improve its \nperformance on Gujarati translations, despite the limited data available for the latter.\n\n## Training and evaluation data\n\nai4bharath/samanantar\nWMT-News \n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-5\n- warmup_steps: 500\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 16\n- num_epochs: 10\n\n### Benchamark Evaluation\n- BLEU score on Tatoeba: 27.7761903401179\n- BLUE score on IN-22: 16.437183600289\n\n \n### Framework versions\n\n- Transformers 4.42.3\n- Pytorch 2.1.2\n- Datasets 2.20.0\n- Tokenizers 0.19.1"},"metadata":{"kind":"string","value":"{\"base_model\": \"Helsinki/opus-mt-en-mul\", \"datasets\": [\"ai4bharat/samanantar\", \"wmt/wmt19\"], \"language\": [\"en\", \"hi\", \"gu\"], \"license\": \"apache-2.0\", \"metrics\": [\"bleu\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45530,"string":"45,530"}}},{"rowIdx":43754,"cells":{"id":{"kind":"string","value":"ymoslem/whisper-medium-ga2en-v4"},"author":{"kind":"string","value":"ymoslem"},"task_category":{"kind":"string","value":"automatic-speech-recognition"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","whisper","automatic-speech-recognition","generated_from_trainer","ga","en","dataset:ymoslem/IWSLT2023-GA-EN","dataset:ymoslem/FLEURS-GA-EN","dataset:ymoslem/BitesizeIrish-GA-EN","dataset:ymoslem/SpokenWords-GA-EN-MTed","dataset:ymoslem/Tatoeba-Speech-Irish","dataset:ymoslem/Wikimedia-Speech-Irish","base_model:openai/whisper-medium","base_model:finetune:openai/whisper-medium","license:apache-2.0","model-index","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"whisper\",\n \"automatic-speech-recognition\",\n \"generated_from_trainer\",\n \"ga\",\n \"en\",\n \"dataset:ymoslem/IWSLT2023-GA-EN\",\n \"dataset:ymoslem/FLEURS-GA-EN\",\n \"dataset:ymoslem/BitesizeIrish-GA-EN\",\n \"dataset:ymoslem/SpokenWords-GA-EN-MTed\",\n \"dataset:ymoslem/Tatoeba-Speech-Irish\",\n \"dataset:ymoslem/Wikimedia-Speech-Irish\",\n \"base_model:openai/whisper-medium\",\n \"base_model:finetune:openai/whisper-medium\",\n \"license:apache-2.0\",\n \"model-index\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-19T21:25:49Z","string":"2024-04-19T21:25:49Z"},"last_modified":{"kind":"string","value":"2024-04-30T11:27:57+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: openai/whisper-medium\ndatasets:\n- ymoslem/IWSLT2023-GA-EN\n- ymoslem/FLEURS-GA-EN\n- ymoslem/BitesizeIrish-GA-EN\n- ymoslem/SpokenWords-GA-EN-MTed\n- ymoslem/Tatoeba-Speech-Irish\n- ymoslem/Wikimedia-Speech-Irish\nlanguage:\n- ga\n- en\nlicense: apache-2.0\nmetrics:\n- bleu\n- wer\ntags:\n- generated_from_trainer\nmodel-index:\n- name: Whisper Medium GA-EN Speech Translation\n results:\n - task:\n type: automatic-speech-recognition\n name: Automatic Speech Recognition\n dataset:\n name: IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented\n with noise\n type: ymoslem/IWSLT2023-GA-EN\n metrics:\n - type: bleu\n value: 32.01\n name: Bleu\n - type: wer\n value: 62.76452048626745\n name: Wer\n---\n\n\n\n# Whisper Medium GA-EN Speech Translation\n\nThis model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise dataset.\nThe datasets are augmented in two ways: noise augmentation, and truncating low-amplitude samples. \nThe best model checkpoint (this version) based on ChrF is at step 2900, epoch 0.6349, \nand it achieves the following results on the evaluation set:\n- Loss: 1.1883\n- Bleu: 32.88\n- Chrf: 51.52\n- Wer: 62.0441\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 0.02\n- training_steps: 3000\n- mixed_precision_training: Native AMP\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Bleu | Chrf | Wer |\n|:-------------:|:------:|:----:|:---------------:|:-----:|:-----:|:--------:|\n| 2.4487 | 0.0219 | 100 | 1.9518 | 8.34 | 24.49 | 117.2445 |\n| 2.11 | 0.0438 | 200 | 1.6630 | 15.32 | 32.12 | 84.0612 |\n| 1.9757 | 0.0657 | 300 | 1.5366 | 10.86 | 33.42 | 131.7875 |\n| 1.7964 | 0.0876 | 400 | 1.4825 | 19.81 | 36.71 | 81.9451 |\n| 1.6422 | 0.1095 | 500 | 1.4432 | 18.83 | 40.4 | 84.0162 |\n| 1.3839 | 0.1314 | 600 | 1.4169 | 24.91 | 40.87 | 69.0230 |\n| 1.352 | 0.1533 | 700 | 1.4340 | 25.01 | 41.57 | 71.5894 |\n| 1.2434 | 0.1752 | 800 | 1.3813 | 24.05 | 41.29 | 73.7506 |\n| 1.2223 | 0.1970 | 900 | 1.3578 | 25.89 | 41.61 | 70.5988 |\n| 1.0414 | 0.2189 | 1000 | 1.3075 | 27.45 | 44.17 | 68.2575 |\n| 0.9199 | 0.2408 | 1100 | 1.3022 | 23.14 | 44.3 | 84.1513 |\n| 0.8648 | 0.2627 | 1200 | 1.3050 | 23.36 | 43.37 | 72.4448 |\n| 0.8469 | 0.2846 | 1300 | 1.2853 | 28.37 | 45.97 | 67.1319 |\n| 0.7649 | 0.3065 | 1400 | 1.2755 | 28.56 | 46.76 | 66.0964 |\n| 0.7321 | 0.3284 | 1500 | 1.2750 | 27.23 | 46.1 | 69.3381 |\n| 0.6541 | 0.3503 | 1600 | 1.2557 | 30.02 | 48.06 | 65.6011 |\n| 0.6107 | 0.3722 | 1700 | 1.2520 | 30.41 | 49.23 | 64.2954 |\n| 0.5738 | 0.3941 | 1800 | 1.2435 | 32.45 | 50.27 | 63.4399 |\n| 0.4983 | 0.4160 | 1900 | 1.2007 | 31.17 | 48.58 | 64.0702 |\n| 0.4439 | 0.4379 | 2000 | 1.2140 | 32.29 | 50.37 | 60.6033 |\n| 0.367 | 0.4598 | 2100 | 1.2230 | 29.54 | 49.14 | 67.7172 |\n| 0.2807 | 0.4817 | 2200 | 1.2277 | 33.1 | 51.21 | 62.9446 |\n| 0.2621 | 0.5036 | 2300 | 1.2441 | 30.59 | 49.49 | 64.8807 |\n| 0.2965 | 0.5255 | 2400 | 1.1969 | 31.82 | 49.67 | 63.5299 |\n| 0.236 | 0.5473 | 2500 | 1.2275 | 31.17 | 50.29 | 65.1959 |\n| 0.229 | 0.5692 | 2600 | 1.2008 | 30.02 | 50.27 | 70.6439 |\n| 0.164 | 0.5911 | 2700 | 1.2192 | 31.37 | 50.57 | 63.6200 |\n| 0.1786 | 0.6130 | 2800 | 1.1965 | 31.81 | 50.13 | 62.8546 |\n| 0.1987 | 0.6349 | 2900 | 1.1883 | 32.88 | 51.52 | 62.0441 |\n| 0.1633 | 0.6568 | 3000 | 1.1903 | 32.01 | 50.38 | 62.7645 |\n\n\n### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.0.1+cu118\n- Datasets 2.19.0\n- Tokenizers 0.19.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# Whisper Medium GA-EN Speech Translation\n\nThis model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise dataset.\nThe datasets are augmented in two ways: noise augmentation, and truncating low-amplitude samples. \nThe best model checkpoint (this version) based on ChrF is at step 2900, epoch 0.6349, \nand it achieves the following results on the evaluation set:\n- Loss: 1.1883\n- Bleu: 32.88\n- Chrf: 51.52\n- Wer: 62.0441\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 32\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_steps: 0.02\n- training_steps: 3000\n- mixed_precision_training: Native AMP\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Bleu | Chrf | Wer |\n|:-------------:|:------:|:----:|:---------------:|:-----:|:-----:|:--------:|\n| 2.4487 | 0.0219 | 100 | 1.9518 | 8.34 | 24.49 | 117.2445 |\n| 2.11 | 0.0438 | 200 | 1.6630 | 15.32 | 32.12 | 84.0612 |\n| 1.9757 | 0.0657 | 300 | 1.5366 | 10.86 | 33.42 | 131.7875 |\n| 1.7964 | 0.0876 | 400 | 1.4825 | 19.81 | 36.71 | 81.9451 |\n| 1.6422 | 0.1095 | 500 | 1.4432 | 18.83 | 40.4 | 84.0162 |\n| 1.3839 | 0.1314 | 600 | 1.4169 | 24.91 | 40.87 | 69.0230 |\n| 1.352 | 0.1533 | 700 | 1.4340 | 25.01 | 41.57 | 71.5894 |\n| 1.2434 | 0.1752 | 800 | 1.3813 | 24.05 | 41.29 | 73.7506 |\n| 1.2223 | 0.1970 | 900 | 1.3578 | 25.89 | 41.61 | 70.5988 |\n| 1.0414 | 0.2189 | 1000 | 1.3075 | 27.45 | 44.17 | 68.2575 |\n| 0.9199 | 0.2408 | 1100 | 1.3022 | 23.14 | 44.3 | 84.1513 |\n| 0.8648 | 0.2627 | 1200 | 1.3050 | 23.36 | 43.37 | 72.4448 |\n| 0.8469 | 0.2846 | 1300 | 1.2853 | 28.37 | 45.97 | 67.1319 |\n| 0.7649 | 0.3065 | 1400 | 1.2755 | 28.56 | 46.76 | 66.0964 |\n| 0.7321 | 0.3284 | 1500 | 1.2750 | 27.23 | 46.1 | 69.3381 |\n| 0.6541 | 0.3503 | 1600 | 1.2557 | 30.02 | 48.06 | 65.6011 |\n| 0.6107 | 0.3722 | 1700 | 1.2520 | 30.41 | 49.23 | 64.2954 |\n| 0.5738 | 0.3941 | 1800 | 1.2435 | 32.45 | 50.27 | 63.4399 |\n| 0.4983 | 0.4160 | 1900 | 1.2007 | 31.17 | 48.58 | 64.0702 |\n| 0.4439 | 0.4379 | 2000 | 1.2140 | 32.29 | 50.37 | 60.6033 |\n| 0.367 | 0.4598 | 2100 | 1.2230 | 29.54 | 49.14 | 67.7172 |\n| 0.2807 | 0.4817 | 2200 | 1.2277 | 33.1 | 51.21 | 62.9446 |\n| 0.2621 | 0.5036 | 2300 | 1.2441 | 30.59 | 49.49 | 64.8807 |\n| 0.2965 | 0.5255 | 2400 | 1.1969 | 31.82 | 49.67 | 63.5299 |\n| 0.236 | 0.5473 | 2500 | 1.2275 | 31.17 | 50.29 | 65.1959 |\n| 0.229 | 0.5692 | 2600 | 1.2008 | 30.02 | 50.27 | 70.6439 |\n| 0.164 | 0.5911 | 2700 | 1.2192 | 31.37 | 50.57 | 63.6200 |\n| 0.1786 | 0.6130 | 2800 | 1.1965 | 31.81 | 50.13 | 62.8546 |\n| 0.1987 | 0.6349 | 2900 | 1.1883 | 32.88 | 51.52 | 62.0441 |\n| 0.1633 | 0.6568 | 3000 | 1.1903 | 32.01 | 50.38 | 62.7645 |\n\n\n### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.0.1+cu118\n- Datasets 2.19.0\n- Tokenizers 0.19.1\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"openai/whisper-medium\", \"datasets\": [\"ymoslem/IWSLT2023-GA-EN\", \"ymoslem/FLEURS-GA-EN\", \"ymoslem/BitesizeIrish-GA-EN\", \"ymoslem/SpokenWords-GA-EN-MTed\", \"ymoslem/Tatoeba-Speech-Irish\", \"ymoslem/Wikimedia-Speech-Irish\"], \"language\": [\"ga\", \"en\"], \"license\": \"apache-2.0\", \"metrics\": [\"bleu\", \"wer\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"Whisper Medium GA-EN Speech Translation\", \"results\": [{\"task\": {\"type\": \"automatic-speech-recognition\", \"name\": \"Automatic Speech Recognition\"}, \"dataset\": {\"name\": \"IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise\", \"type\": \"ymoslem/IWSLT2023-GA-EN\"}, \"metrics\": [{\"type\": \"bleu\", \"value\": 32.01, \"name\": \"Bleu\"}, {\"type\": \"wer\", \"value\": 62.76452048626745, \"name\": \"Wer\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45531,"string":"45,531"}}},{"rowIdx":43755,"cells":{"id":{"kind":"string","value":"tjake/granite-3.0-2b-instruct-JQ4"},"author":{"kind":"string","value":"tjake"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","granite","text-generation","language","granite-3.0","conversational","arxiv:0000.00000","base_model:ibm-granite/granite-3.0-2b-base","base_model:finetune:ibm-granite/granite-3.0-2b-base","license:apache-2.0","model-index","autotrain_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"granite\",\n \"text-generation\",\n \"language\",\n \"granite-3.0\",\n \"conversational\",\n \"arxiv:0000.00000\",\n \"base_model:ibm-granite/granite-3.0-2b-base\",\n \"base_model:finetune:ibm-granite/granite-3.0-2b-base\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-27T23:02:40Z","string":"2024-10-27T23:02:40Z"},"last_modified":{"kind":"string","value":"2024-10-27T23:14:40+00:00"},"downloads":{"kind":"number","value":20,"string":"20"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model:\n- ibm-granite/granite-3.0-2b-base\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- language\n- granite-3.0\ninference: false\nmodel-index:\n- name: granite-3.0-2b-instruct\n results:\n - task:\n type: text-generation\n dataset:\n name: IFEval\n type: instruction-following\n metrics:\n - type: pass@1\n value: 46.07\n name: pass@1\n - type: pass@1\n value: 7.66\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: AGI-Eval\n type: human-exams\n metrics:\n - type: pass@1\n value: 29.75\n name: pass@1\n - type: pass@1\n value: 56.03\n name: pass@1\n - type: pass@1\n value: 27.92\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: OBQA\n type: commonsense\n metrics:\n - type: pass@1\n value: 43.2\n name: pass@1\n - type: pass@1\n value: 66.36\n name: pass@1\n - type: pass@1\n value: 76.79\n name: pass@1\n - type: pass@1\n value: 71.9\n name: pass@1\n - type: pass@1\n value: 53.37\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: BoolQ\n type: reading-comprehension\n metrics:\n - type: pass@1\n value: 84.89\n name: pass@1\n - type: pass@1\n value: 19.73\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: ARC-C\n type: reasoning\n metrics:\n - type: pass@1\n value: 54.35\n name: pass@1\n - type: pass@1\n value: 28.61\n name: pass@1\n - type: pass@1\n value: 43.74\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: HumanEvalSynthesis\n type: code\n metrics:\n - type: pass@1\n value: 50.61\n name: pass@1\n - type: pass@1\n value: 45.58\n name: pass@1\n - type: pass@1\n value: 51.83\n name: pass@1\n - type: pass@1\n value: 41\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: GSM8K\n type: math\n metrics:\n - type: pass@1\n value: 59.66\n name: pass@1\n - type: pass@1\n value: 23.66\n name: pass@1\n - task:\n type: text-generation\n dataset:\n name: PAWS-X (7 langs)\n type: multilingual\n metrics:\n - type: pass@1\n value: 61.42\n name: pass@1\n - type: pass@1\n value: 37.13\n name: pass@1\n---\n\n# Quantized Version of ibm-granite/granite-3.0-2b-instruct \n\nThis model is a quantized variant of the ibm-granite/granite-3.0-2b-instruct model, optimized for use with Jlama, a Java-based inference engine. The quantization process reduces the model's size and improves inference speed, while maintaining high accuracy for efficient deployment in production environments.\n\nFor more information on Jlama, visit the [Jlama GitHub repository](https://github.com/tjake/jlama).\n\n---\n\n\n\n\n\n\n# Granite-3.0-2B-Instruct\n\n**Model Summary:**\nGranite-3.0-2B-Instruct is a 2B parameter model finetuned from *Granite-3.0-2B-Base* using a combination of open source instruction datasets with permissive license and internally collected synthetic datasets. This model is developed using a diverse set of techniques with a structured chat format, including supervised finetuning, model alignment using reinforcement learning, and model merging.\n\n- **Developers:** Granite Team, IBM\n- **GitHub Repository:** [ibm-granite/granite-3.0-language-models](https://github.com/ibm-granite/granite-3.0-language-models)\n- **Website**: [Granite Docs](https://www.ibm.com/granite/docs/)\n- **Paper:** [Granite 3.0 Language Models](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf)\n- **Release Date**: October 21st, 2024\n- **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\n\n**Supported Languages:** \nEnglish, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 3.0 models for languages beyond these 12 languages.\n\n**Intended use:** \nThe model is designed to respond to general instructions and can be used to build AI assistants for multiple domains, including business applications.\n\n*Capabilities*\n* Summarization\n* Text classification\n* Text extraction\n* Question-answering\n* Retrieval Augmented Generation (RAG)\n* Code related tasks\n* Function-calling tasks\n* Multilingual dialog use cases\n\n**Generation:** \nThis is a simple example of how to use Granite-3.0-2B-Instruct model.\n\nInstall the following libraries:\n\n```shell\npip install torch torchvision torchaudio\npip install accelerate\npip install transformers\n```\nThen, copy the snippet from the section that is relevant for your use case.\n\n```python\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\ndevice = \"auto\"\nmodel_path = \"ibm-granite/granite-3.0-2b-instruct\"\ntokenizer = AutoTokenizer.from_pretrained(model_path)\n# drop device_map if running on CPU\nmodel = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)\nmodel.eval()\n# change input text as desired\nchat = [\n { \"role\": \"user\", \"content\": \"Please list one IBM Research laboratory located in the United States. You should only output its name and location.\" },\n]\nchat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)\n# tokenize the text\ninput_tokens = tokenizer(chat, return_tensors=\"pt\").to(device)\n# generate output tokens\noutput = model.generate(**input_tokens, \n max_new_tokens=100)\n# decode output tokens into text\noutput = tokenizer.batch_decode(output)\n# print output\nprint(output)\n```\n\n**Model Architecture:** \nGranite-3.0-2B-Instruct is based on a decoder-only dense transformer architecture. Core components of this architecture are: GQA and RoPE, MLP with SwiGLU, RMSNorm, and shared input/output embeddings.\n\n| Model | 2B Dense | 8B Dense | 1B MoE | 3B MoE |\n| :-------- | :-------- | :--------| :--------| :--------|\n| Embedding size | **2048** | 4096 | 1024 | 1536 |\n| Number of layers | **40** | 40 | 24 | 32 |\n| Attention head size | **64** | 128 | 64 | 64 |\n| Number of attention heads | **32** | 32 | 16 | 24 |\n| Number of KV heads | **8** | 8 | 8 | 8 |\n| MLP hidden size | **8192** | 12800 | 512 | 512 |\n| MLP activation | **SwiGLU** | SwiGLU | SwiGLU | SwiGLU |\n| Number of Experts | **—** | — | 32 | 40 |\n| MoE TopK | **—** | — | 8 | 8 |\n| Initialization std | **0.1** | 0.1 | 0.1 | 0.1 |\n| Sequence Length | **4096** | 4096 | 4096 | 4096 |\n| Position Embedding | **RoPE** | RoPE | RoPE | RoPE |\n| # Parameters | **2.5B** | 8.1B | 1.3B | 3.3B |\n| # Active Parameters | **2.5B** | 8.1B | 400M | 800M |\n| # Training tokens | **12T** | 12T | 10T | 10T |\n\n**Training Data:** \nOverall, our SFT data is largely comprised of three key sources: (1) publicly available datasets with permissive license, (2) internal synthetic data targeting specific capabilities, and (3) very small amounts of human-curated data. A detailed attribution of datasets can be found in the [Granite Technical Report](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf) and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf).\n\n**Infrastructure:**\nWe train Granite 3.0 Language Models using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs while minimizing environmental impact by utilizing 100% renewable energy sources.\n\n**Ethical Considerations and Limitations:** \nGranite 3.0 Instruct Models are primarily finetuned using instruction-response pairs mostly in English, but also multilingual data covering eleven languages. Although this model can handle multilingual dialog use cases, its performance might not be similar to English tasks. In such case, introducing a small number of examples (few-shot) can help the model in generating more accurate outputs. While this model has been aligned by keeping safety in consideration, the model may in some cases produce inaccurate, biased, or unsafe responses to user prompts. So we urge the community to use this model with proper safety testing and tuning tailored for their specific tasks.\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Quantized Version of ibm-granite/granite-3.0-2b-instruct \n\nThis model is a quantized variant of the ibm-granite/granite-3.0-2b-instruct model, optimized for use with Jlama, a Java-based inference engine. The quantization process reduces the model's size and improves inference speed, while maintaining high accuracy for efficient deployment in production environments.\n\nFor more information on Jlama, visit the [Jlama GitHub repository](https://github.com/tjake/jlama).\n\n---\n\n\n\n\n\n\n# Granite-3.0-2B-Instruct\n\n**Model Summary:**\nGranite-3.0-2B-Instruct is a 2B parameter model finetuned from *Granite-3.0-2B-Base* using a combination of open source instruction datasets with permissive license and internally collected synthetic datasets. This model is developed using a diverse set of techniques with a structured chat format, including supervised finetuning, model alignment using reinforcement learning, and model merging.\n\n- **Developers:** Granite Team, IBM\n- **GitHub Repository:** [ibm-granite/granite-3.0-language-models](https://github.com/ibm-granite/granite-3.0-language-models)\n- **Website**: [Granite Docs](https://www.ibm.com/granite/docs/)\n- **Paper:** [Granite 3.0 Language Models](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf)\n- **Release Date**: October 21st, 2024\n- **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0)\n\n**Supported Languages:** \nEnglish, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 3.0 models for languages beyond these 12 languages.\n\n**Intended use:** \nThe model is designed to respond to general instructions and can be used to build AI assistants for multiple domains, including business applications.\n\n*Capabilities*\n* Summarization\n* Text classification\n* Text extraction\n* Question-answering\n* Retrieval Augmented Generation (RAG)\n* Code related tasks\n* Function-calling tasks\n* Multilingual dialog use cases\n\n**Generation:** \nThis is a simple example of how to use Granite-3.0-2B-Instruct model.\n\nInstall the following libraries:\n\n```shell\npip install torch torchvision torchaudio\npip install accelerate\npip install transformers\n```\nThen, copy the snippet from the section that is relevant for your use case.\n\n```python\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\ndevice = \"auto\"\nmodel_path = \"ibm-granite/granite-3.0-2b-instruct\"\ntokenizer = AutoTokenizer.from_pretrained(model_path)\n# drop device_map if running on CPU\nmodel = AutoModelForCausalLM.from_pretrained(model_path, device_map=device)\nmodel.eval()\n# change input text as desired\nchat = [\n { \"role\": \"user\", \"content\": \"Please list one IBM Research laboratory located in the United States. You should only output its name and location.\" },\n]\nchat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True)\n# tokenize the text\ninput_tokens = tokenizer(chat, return_tensors=\"pt\").to(device)\n# generate output tokens\noutput = model.generate(**input_tokens, \n max_new_tokens=100)\n# decode output tokens into text\noutput = tokenizer.batch_decode(output)\n# print output\nprint(output)\n```\n\n**Model Architecture:** \nGranite-3.0-2B-Instruct is based on a decoder-only dense transformer architecture. Core components of this architecture are: GQA and RoPE, MLP with SwiGLU, RMSNorm, and shared input/output embeddings.\n\n| Model | 2B Dense | 8B Dense | 1B MoE | 3B MoE |\n| :-------- | :-------- | :--------| :--------| :--------|\n| Embedding size | **2048** | 4096 | 1024 | 1536 |\n| Number of layers | **40** | 40 | 24 | 32 |\n| Attention head size | **64** | 128 | 64 | 64 |\n| Number of attention heads | **32** | 32 | 16 | 24 |\n| Number of KV heads | **8** | 8 | 8 | 8 |\n| MLP hidden size | **8192** | 12800 | 512 | 512 |\n| MLP activation | **SwiGLU** | SwiGLU | SwiGLU | SwiGLU |\n| Number of Experts | **—** | — | 32 | 40 |\n| MoE TopK | **—** | — | 8 | 8 |\n| Initialization std | **0.1** | 0.1 | 0.1 | 0.1 |\n| Sequence Length | **4096** | 4096 | 4096 | 4096 |\n| Position Embedding | **RoPE** | RoPE | RoPE | RoPE |\n| # Parameters | **2.5B** | 8.1B | 1.3B | 3.3B |\n| # Active Parameters | **2.5B** | 8.1B | 400M | 800M |\n| # Training tokens | **12T** | 12T | 10T | 10T |\n\n**Training Data:** \nOverall, our SFT data is largely comprised of three key sources: (1) publicly available datasets with permissive license, (2) internal synthetic data targeting specific capabilities, and (3) very small amounts of human-curated data. A detailed attribution of datasets can be found in the [Granite Technical Report](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf) and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf).\n\n**Infrastructure:**\nWe train Granite 3.0 Language Models using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs while minimizing environmental impact by utilizing 100% renewable energy sources.\n\n**Ethical Considerations and Limitations:** \nGranite 3.0 Instruct Models are primarily finetuned using instruction-response pairs mostly in English, but also multilingual data covering eleven languages. Although this model can handle multilingual dialog use cases, its performance might not be similar to English tasks. In such case, introducing a small number of examples (few-shot) can help the model in generating more accurate outputs. While this model has been aligned by keeping safety in consideration, the model may in some cases produce inaccurate, biased, or unsafe responses to user prompts. So we urge the community to use this model with proper safety testing and tuning tailored for their specific tasks.\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": [\"ibm-granite/granite-3.0-2b-base\"], \"library_name\": \"transformers\", \"license\": \"apache-2.0\", \"pipeline_tag\": \"text-generation\", \"tags\": [\"language\", \"granite-3.0\"], \"inference\": false, \"model-index\": [{\"name\": \"granite-3.0-2b-instruct\", \"results\": [{\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"IFEval\", \"type\": \"instruction-following\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 46.07, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 7.66, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"AGI-Eval\", \"type\": \"human-exams\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 29.75, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 56.03, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 27.92, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"OBQA\", \"type\": \"commonsense\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 43.2, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 66.36, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 76.79, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 71.9, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 53.37, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"BoolQ\", \"type\": \"reading-comprehension\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 84.89, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 19.73, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"ARC-C\", \"type\": \"reasoning\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 54.35, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 28.61, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 43.74, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"HumanEvalSynthesis\", \"type\": \"code\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 50.61, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 45.58, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 51.83, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 41, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"GSM8K\", \"type\": \"math\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 59.66, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 23.66, \"name\": \"pass@1\"}]}, {\"task\": {\"type\": \"text-generation\"}, \"dataset\": {\"name\": \"PAWS-X (7 langs)\", \"type\": \"multilingual\"}, \"metrics\": [{\"type\": \"pass@1\", \"value\": 61.42, \"name\": \"pass@1\"}, {\"type\": \"pass@1\", \"value\": 37.13, \"name\": \"pass@1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION","SUMMARIZATION"],"string":"[\n \"TEXT_CLASSIFICATION\",\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45532,"string":"45,532"}}},{"rowIdx":43756,"cells":{"id":{"kind":"string","value":"lomov/strategydisofmaterialimpactsv1"},"author":{"kind":"string","value":"lomov"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","roberta","text-classification","autotrain","dataset:strategydisofmaterialimpactsv1/autotrain-data","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"roberta\",\n \"text-classification\",\n \"autotrain\",\n \"dataset:strategydisofmaterialimpactsv1/autotrain-data\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-20T01:02:07Z","string":"2024-05-20T01:02:07Z"},"last_modified":{"kind":"string","value":"2024-05-20T01:03:45+00:00"},"downloads":{"kind":"number","value":9,"string":"9"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- strategydisofmaterialimpactsv1/autotrain-data\ntags:\n- autotrain\n- text-classification\nwidget:\n- text: I love AutoTrain\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification\n\n## Validation Metrics\nloss: 0.4904100298881531\n\nf1_macro: 0.851601435352396\n\nf1_micro: 0.8658536585365854\n\nf1_weighted: 0.8538194199208925\n\nprecision_macro: 0.8594329005283454\n\nprecision_micro: 0.8658536585365854\n\nprecision_weighted: 0.8606490578892111\n\nrecall_macro: 0.862797619047619\n\nrecall_micro: 0.8658536585365854\n\nrecall_weighted: 0.8658536585365854\n\naccuracy: 0.8658536585365854\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification\n\n## Validation Metrics\nloss: 0.4904100298881531\n\nf1_macro: 0.851601435352396\n\nf1_micro: 0.8658536585365854\n\nf1_weighted: 0.8538194199208925\n\nprecision_macro: 0.8594329005283454\n\nprecision_micro: 0.8658536585365854\n\nprecision_weighted: 0.8606490578892111\n\nrecall_macro: 0.862797619047619\n\nrecall_micro: 0.8658536585365854\n\nrecall_weighted: 0.8658536585365854\n\naccuracy: 0.8658536585365854\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"strategydisofmaterialimpactsv1/autotrain-data\"], \"tags\": [\"autotrain\", \"text-classification\"], \"widget\": [{\"text\": \"I love AutoTrain\"}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45533,"string":"45,533"}}},{"rowIdx":43757,"cells":{"id":{"kind":"string","value":"seongil-dn/bge-m3-kor-retrieval-bs1024-checkpoint-236"},"author":{"kind":"string","value":"seongil-dn"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","xlm-roberta","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:482743","loss:CachedMultipleNegativesRankingLoss","arxiv:1908.10084","arxiv:2101.06983","base_model:BAAI/bge-m3","base_model:finetune:BAAI/bge-m3","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"xlm-roberta\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:482743\",\n \"loss:CachedMultipleNegativesRankingLoss\",\n \"arxiv:1908.10084\",\n \"arxiv:2101.06983\",\n \"base_model:BAAI/bge-m3\",\n \"base_model:finetune:BAAI/bge-m3\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-12-08T15:07:26Z","string":"2024-12-08T15:07:26Z"},"last_modified":{"kind":"string","value":"2024-12-08T15:08:42+00:00"},"downloads":{"kind":"number","value":6,"string":"6"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: BAAI/bge-m3\nlibrary_name: sentence-transformers\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:482743\n- loss:CachedMultipleNegativesRankingLoss\nwidget:\n- source_sentence: 언제 청년층 생산가능인구 수가 9,149,000명으로 드러났어\n sentences:\n - '제2장 청년층 노동시장 동향 및 청년내일채움공제의 성과 개괄\n\n □ 본 장에서는 청년내일채움공제 사업의 도입 배경으로 작용한 청년층 노동시장의 현황을 살펴보고, 세부 성과에 앞서 청년내일채움공제가 현 청년고용\n 실태에 미친 개괄적 성과를 평가하고자 한다\n\n 제1절 청년층 노동시장 동향\n\n 1. 청년층 인구구조 변화 추이\n\n □ 고용률과 실업률, 취업자 수 등의 고용지표는 경제 및 노동시장의 상황 뿐 만 아니라, 인구구조 변화에 의해 영향을 받는 만큼, 청년층의\n 노동시장 동향을 파악하기 위해서는 청년층 인구구조의 변화양상을 이해할 필요가 있음 □ 청년층 인구구조 변화 추이\n\n ○ 전체 생산가능인구가 여전히 증가추세에 있는 것에 비해, 청년층(15∼29세) 생산가능인구는 1990년을 기점으로 감소추세에 있음. 2018년\n 청년층 생산가능인구는 전년대비 133천명 감소한 9,149천명임 - 15∼19세, 20∼24세 연령대의 생산가능인구는 감소추세에 있으나, 25∼29세\n 생산가능인구(3,432천명, 전년대비 96천명 증가)는 증가추세에 있으며 향후 2∼3년간 증가추세가 지속될 것으로 전망됨\n\n ○ 청년층 경제활동인구는 2000년대 지속적으로 감소하다가 2013∼2016년 기간 증가추세를 보였으나, 2017년 이후 감소하고 있음. 2018년\n 청년층 경제활동인구는 전년대비 21천명 감소한 4,312천명을 기록하였음'\n - 2. 인구구조변화의 현황
UN 인구국 자료를 분석해보면 2010년 인구와 2050년의 중간 수준 예측치를 기준으로 인구 1,000만 명\n 이상의 국가이면서 2050년까지 생산가능인구(15세~64세)가 감소하는 국가는 198개국중에서 19개국이다. 이 중에서 우리나라는 생산 가능인구(15세~64세)\n 감소율이 세계 6위 수준을 기록할 것으로 분석되었고, 이에 따라 생산가능 인구가 2010년 대비 2050년에는 27%가 줄어들 것으로 예상된다.\n 이에 반하여 노인인구는 236%가 증가할 것으로 예상된다. 우리나라는 생산가능인구 감소율과 노인인구(65세 이상) 증가율이 매우 높은 나라\n 중 하나이며, 동시에 유소년(0~14세) 인구의 감소도 22% 정도 될 것으로 예상하고 있다.
UN의 인구국(局) 자료인 [표 1]을\n 보면, 첫째 현재 우리나라를 포함하여 선진국에서조차도 현재 인구감소를 겪어본 국가가 거의 없어 이것이 가져다 줄 영향에 대한 경험적 지식이\n 거의 전무하다는 점이다.
둘째, 우리나라의 경우에는 무엇보다도 인구감소기로의 전환이 급격하게 이루어지고 있다. 따라서 우리 스스로 이의\n 긍정적ㆍ부정적 영향을 연역적으로 추론해볼 필요가 있다. 이를 위해서는 우리 사회의 전체 변화를 거시적으로 바라보고 정책 수단에 따라 가져다\n 줄 다양한 변화를 살펴볼 수 있는 시뮬레이션(simulation) 도구 및 정책 개발 그리고 집행 체제가 필요하다고 할 수 있다.\n - '7. 해외사례\n\n 플랫폼 노동관계에 특화된 산재보험제도를 별도로 규정하고 있는 포괄적 입법례는 아직 보이지 않는다. 다만, 플랫폼 노동에 관한 미국의 앨라배마\n 주법(SB363, 2018.3.13. 제정)에서는 플랫폼 노동자들에게 일정한 수준의 복지를 허용하기 위한 법적 기초를 만들고 있다(부표 1\n 참조). SB363은 플랫폼 노동자들의 이동성을 고려한 ‘이동 가능 복지제도’(portable benefit plans)를 플랫폼 사업주들이\n 만들면, 이 복지제도를 통해 다수의 플랫폼과 계약을 맺는 플랫폼 노동자들이 복수 사업주 혹은 사업주의 변경에도 불구하고 복지제도를 유지하면서\n 그로부터 복지수혜를 얻을 수 있도록 하고 있다. 이때의 복지제도는 건강보험, 재해보험, 퇴직수당제도 등으로 구성될 수 있고, 플랫폼 노동자가\n 복지계좌를 개설하면 여기에 플랫폼 사업주가 복지기여금을 납부하는 방식을 취하고 있다. 국가에 의한 사회보험제도가 아니고, 플랫폼 사업주에 의한\n 자발적 복지제도의 기초를 구성한 것이기는 하지만, 지방정부가 이를 관리하도록 함으로써 공정성과 객관성을 담보한다. 한국의 산재보험제도에 대입시켜\n 비교할 수 있는 입법례가 아니기는 하지만, 플랫폼 사업주로 하여금 플랫폼 노동자에 대한 일정한 복지제도 형성을 촉구하고 있다는 점, 해당 복지제도에\n 대하여 플랫폼이 일정한 기여금을 납부하도록 하고 있다는 점 등에서 하나의 제도적 모델로 시사가능하다. 한편, 미국의 플로리다, 인디아나, 아이오와,\n 켄터키, 테네시, 텍사스, 유타주 등에서는 플랫폼노동자(경우에 따라서는 가사노동자에 국한)의 의미를 규정하면서, 주법상 재해보험제도 및(또는)\n 실업급여제도의 가입자가 될 수 있도록 하고 있다.'\n- source_sentence: 도매 전력 매매를 주로 한 전력거래체계를 만들었을 때 어느 부문이 안정성 영향을 받을 수 있어\n sentences:\n - '(4) 외환시장\n\n 원/달러 환율 2016년 말 대비 하락\n\n 원/달러 환율은 연초 미 보호무역주의에 대한 우려, 수출 호조, 외국인 증권투자자금 유입 등으로 3월 말까지 상당 폭 하락하였다. 이후 북한\n 관련 리스크 전개 상황, 미 연준의 연내 금리인상 기대 변화 등에 따라 등락하는 모습을 보였다. 10월 들어 북한 리스크 완화, 미 달러화\n 약세 지속, 국내경제의 견실한 성장세 등으로 하락세로 전환하였으며, 2017년 말에는 연중 최저치인 1,070.5원을 기록하였다. 2017년\n 중 미 달러화에 대한 원화의 가치는 2016년 말 대비 12.8% 상승하였다. 한편 원/엔 환율(100엔당)은 미 연준 금리인상 기대변화,\n 북한 리스크 등에 영향을 받으며 등락을 지속하다가 10월 들어 원화가 상대적으로 강세를 보이면서 하락하였다. 2017년 말 원화의 엔화에 대한\n 가치는 100엔당 949.2원으로 2016년 말 대비 9.1% 상승하였다. 원/달러 환율의 일중 및 전일대비 변동폭은 연평균 각각 5.5원,\n 4.4원을 기록하여 2016년(각각 7.5원 및 6.0원)에 비해 축소되었다. 외환스왑레이트 하락, 통화스왑금리 상승\n\n 외환스왑레이트(3개월물)는 외화자금수요 확대 등으로 2016년 말 대비 0.61% 포인트 하락한 -0.63%를 기록하였다. 통화스왑금리(3년물)는\n 경제지표 호조 등에 따른 국고채 금리 상승 등으로 2016년 말 대비 0.37% 포인트 상승한 1.57%를 기록하였다.'\n - '4. 국내 전력거래 형태 전망\n\n 앞서 살펴보았듯이, 구조개편을 단행한 외국의 전력거래 형태는 대체로 도매 전력거래 위주, 소매 전력거래 위주, 계약위주 등 세 가지로 구분되어\n 있다. 국내 전력산업의 현재 여건을 고려할 경우 가장 가능성이 높은 전력거래 형태는 도매 전력거래 위주의 거래형태일 것이다. 만약 도매 전력거래\n 위주의 전력거래체계를 구축하는 경우 가격과 공급의 안정성에 상당한 영향을 미칠 것으로 보인다. 하지만 규제계약(Vesting Contract)을\n 포함하여 장단기 계약물량을 확대함으로써 현물시장의 가격 및 공급 변동성을 완화할 수 있는 대안이 존재한다. 만약 전력시장에서의 가격과 공급\n 안정성 측면을 염두에 둔다면 소매 전력거래 형태 또는 장단기 계약 위주의 전력거래 형태를 고려해 볼 수 있다. 소매 전력거래 형태에서는 주로\n 기존의 수직통합체제 또는 이미 발전과 판매가 송배전 부문으로부터 분리된 경우라면 발전과 판매의 통합과 이로 인한 규모의 확대에 따라 유효경쟁의\n 약화 가능성이 문제가 될 수 있다. 이렇게 통합된 기업의 소매시장에서의 시장지배력 문제가 존재하기 때문에 순수 판매회사 또는 신규기업과의 실질적인\n 경쟁이 가능한 방향으로 제도적 장치를 마련할 필요가 있다.'\n - 'Ⅲ. 우리나라 전력시장에서의 유연성 자원\n\n 1. 우리나라 전력시장의 현황\n\n 우리나라의 전력시장은 전력산업의 효율성 향상을 위하여 2001년 전력산업구조개편을 통해 독점사업자였던 한국전력공사로부터 발전부문을 분리시켜\n 발전자회사로 재편시키고, 발전부문에 대한 경쟁체제를 도입하면서 출범하였다. 우리나라 전력시장은 소비자가 직접 거래에 참여하는 소매시장이 아닌\n 발전회사와 판매회사가 참여하는 도매시장으로, 다수의 발전회사가 존재하는 공급측과는 달리 수요측은 단일 구매자인 한국전력공사만 존재하는 구매독점\n 형태로 이루어져 있다. 또한 설비용량이 20MW를 초과하는 상업용 발전기는 전력시장을 통해서만 거래가 가능한 의무적 시장이다.'\n- source_sentence: 해외 파생상품시장에서 거래되는 경우는 어떤 파생상품으로 분류할 수 있어\n sentences:\n - '3) 연금 세제\n\n ▩ 근퇴법 상 연금수령을 활성화하기 위한 강제조항이 부재한 상황에서 세제체계(소득세법)에 따라 수령방법이 결정되는 구조임. ○ 제도적으로 일시금\n 수령을 방지하거나 연금수령을 강제하는 규정이 부재한 상황에서 수령 방법은 주로 세금부담에 따라 결정될 수밖에 없음. ○ 일시금 수령을 경감시키기\n 위해 2016년 소득세법을 개정하였으나 개정된 소득세법 조차도 일시금과 연금수령 간 세금부담 차이가 크지 않아 일시금 수령을 방지하지 못하는\n 한계가 존재함. ▩ 연금과 일시금 수령 간 세제차이가 크지 않은 주요 원인은 일시금 수령시 제공하는 공제혜택이 크기 때문임. ○ 국내 퇴직연금시장에서\n 일시금 수령이 만연한 것은 연금수령 시 부담하는 세율이 높아서가 아니라 일시금 수령 시 부담하는 세율이 낮기 때문임. ○ 일시금 수령의 세금부담이\n 높지 않은 이유는 일시금 수령 시에도 다수의 공제혜택(근속연수공제+환산급여공제)이 제공되고 혜택의 규모도 크기 때문임. ○ 즉 연금수령 시에는\n 일시금 수령 시 부담하는 세금의 70%만을 부담하도록 규정하고(소득세법 제129조(원천징수세율)제1항제5의336)) 있으나, 일시금 수령 시\n 세금부담이 작기 때문에 연금수령 시 추가 제공되는 세제혜택의 크기가 미미하게 됨. ○ 특히 연금수령 기간에 따른 세금차이가 없어 연금으로 수령하더라도\n 단기간 연금(예, 10년)을 수령하는 유인을 제공함.'\n - □ 장외파생 권역별 대표 상품을 살펴보면 금리관련상품 중에는 금리스왑(Interest Rate Swap:IRS)이, 통화관련 상품 중에서는\n 통화선도계약이, 신용관련거래에서는 신용파산스왑(Credit Default Swap:CDS)이 가장 높은 비중을 차지
○ 금리스왑은 금리관련\n 장외파생상품시장의 91.2%를 차지하고 있으며, 신용관련상품의 경우 신용파산스왑과 총수익스왑(Total Return Swap:TRS)만이 거래
○\n 통화선도는 대부분 NDF(Non-Deliverable Forward)의 형태로 거래
○ 장외파생상품 거래는 일대일 계약을 기반으로 하고\n 거래당사자의 높은 신용도를 요구하기 때문에 신용도가 낮은 개인 또는 금융기관은 참가하기가 어려운 실정
○ 특히, 신용관련거래는 다른 금융기관에\n 비해 상대적으로 높은 신용도를 가지고 있는 은행과 증권사를 통해서만 거래가 이루어지고 있는 것으로 파악\n - '제5조 (파생상품) ① 이 법에서 ˝파생상품˝이란 다음 각 호의 어느 하나에 해당하는 계약상의 권리를 말한다.\n\n 1. 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 장래의 특정 시점에 인도할 것을\n 약정하는 계약\n\n 2. 당사자 어느 한쪽의 의사표시에 의하여 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을\n 수수하는 거래를 성립시킬 수 있는 권리를 부여하는 것을 약정하는 계약\n\n 3. 장래의 일정기간 동안 미리 정한 가격으로 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된\n 금전등을 교환할 것을 약정하는 계약\n\n ② 이 법에서 ˝장내파생상품˝이란 파생상품으로서 파생상품시장에서 거래되는 것 또는 해외 파생상품시장(파생상품시장과 유사한 시장으로서 해외에\n 있는 시장과 대통령령으로 정하는 해외 파생상품거래가 이루어지는 시장을 말한다)에서 거래되는 것을 말한다. ③ 이 법에서 ˝장외파생상품˝이란\n 파생상품으로서 장내파생상품이 아닌 것을 말한다. ④ 제1항 각 호의 어느 하나에 해당하는 계약 중 매매계약이 아닌 계약의 체결은 이 법을 적용함에\n 있어서 매매계약의 체결로 본다.'\n- source_sentence: 어디서 자금세탁 전제범죄에서 비영리단체 관련 자금세탁범죄를 찾아내는 방식을 선택했어\n sentences:\n - 'IV. 비영리단체 분야의 범죄유형 및 사례\n\n 1. 국내사례와 범죄유형\n\n □ 한국은 아직 비영리단체에 대한 명확한 개념 구분이 정립되어 있지 않은 실정으로 이에 따라 그동안 한국에서는 비영리단체 관련범죄에 대한 인식이\n 제대로 이루어지지 않음. -비영리단체 관련 범죄에 있어서도 비영리단체에 대한 명확한 개념 구분 없이 판결문이 작성되어 비영리단체 관련 자금세탁범죄를\n 조사하는데 있어서 큰 장애로 작용함. -국내 판례에서 비영리단체와 관련하여 자금세탁이라는 용어를 직접 사용한 경우는 없으며 이에 따라 부득이\n 자금세탁 전제범죄를 통하여 비영리단체 관련 자금세탁범죄를 조사하는 방법을 택함.'\n - 'VI. 비영리단체를 통한 테러자금조달방지를 위한 제도개선방안\n\n □ FATF는 2001년 10월 테러자금조달에 대한 특별권고 8항(Special Recommendationson Terrorist Financing)을\n 통하여 비영리단체에 대한 관리감독을 강화하고 관련 법규를 정비할 것을 권고하였음. -우리나라에서 비영리단체를 통한 자금세탁을 이용해서 테러자금이\n 조달될 가능성은 매우 낮은 것으로 평가되나 자금세탁의 우회로를 차단하고, 또한 예방적인 조치를 취해 국제적으로 자금세탁을 통한 불법적인 테러자금조달\n 방지 노력에 적극 부응하기 위해서는 비영리단체에 대한 자금세탁 방지제도 도입에 적극 참여해야 할 것임.'\n - '(2) 전북의 풍력산업 약점\n\n 전북의 풍력산업은 내부환경관점에서 다음과 같은 약점을 보유하고 있다. 첫째, 두산(창원), 유니슨(사천), 효성(창원), 한진(양산) 등 풍력터빈시스템업체는\n 모두 경남권에 위치하고 있으며, 현재 경남 이외 지역에서는 MW급 이상 풍력용 터빈시스템업체가 존재하지 않는다. 다만 최근 터빈의 대형화에\n 따라, 터빈의 무게 및 부피 등에 따른 운송에 한계가 존재하고 상당기간 고정식 형태의 해상풍력발전단지 구축이 중심이 될 것으로 판단됨에 따라,\n 부유식 해상풍력단지개발이 시작되기 전까지는 GW급 해상풍력단지 개발계획에 따라 서해안권으로 시스템업체의 이전가능성은 일부 존재한다. 둘째,\n 해상풍력 관련 연구/생산인력 부족 역시 장기적 해상풍력기술의 지속성 관점에서 시급한 문제이다. 현재 전북은 해상풍력터빈과 관련된 기술은 없으나,\n 휴먼컴퍼지트와 재료연구원 풍력핵심기술연구센터를 통해 해상풍력 블레이드 및 재료 등에 대한 기술이 축적되어 있다. 이외에 하부구조물 및 타워\n 관련 기술 역시 에드벡트가 석션버켓(suction bucket)방식의 하부구조물 관련 기술을 보유하고 있고 현재 군산공장을 리모델링 중인 CS윈드는\n 세계 최고수준의 타워제작기술을 확보하고 있다. 따라서 전북은 블레이드, 재료, 타워 및 하부구조물 등과 관련된 기술을 확보하고 있다.'\n- source_sentence: 전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어\n sentences:\n - '수출 증가세 지속\n\n 1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서\n 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도\n 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은\n 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은\n 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가\n 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비\n 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대\n 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는\n 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고\n 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고\n 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.'\n - '(3) 금융기관 여수신\n\n 은행 수신 큰 폭 확대\n\n 은행 수신은 2019년에 비해 증가폭이 크게 확대되었다. 수시입출식예금은 불확실성 증대에 따른 가계 및 기업의 예비자금 확보 등의 영향으로\n 증가 규모가 전년대비 3배가량 확대되었다. 반면 정기예금은 예금금리 하락, 예대율 및 LCR 규제 완화에 따른 은행의 정기예금 유치 유인 축소\n 등에 기인하여 감소로 전환하였다. 자산운용사 수신은 증가폭이 축소되었다. MMF는 꾸준한 증가세를 유지하였으나 주식형 및 채권형 펀드는 개인투자자의\n 주식 직접투자증가, 신용증권에 대한 시장 경계감 확대 등으로 감소로 전환하였다. 또한 기타펀드는 2019년 중 일부 사모펀드 손실 및 환매중단\n 사태, 사모펀드 일반투자자 요건 강화 등으로 증가 규모가 절반 수준으로 축소되었다. 한편 신용협동기구 등 비은행예금취급기관 수신은 대체로 2019년과\n 비슷한 증가 규모를 나타내었다.'\n - '(2) 전남지역\n\n 2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이\n 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6%\n → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비\n 7.4% 감소로 전환하였다.'\n---\n\n# SentenceTransformer based on BAAI/bge-m3\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) \n- **Maximum Sequence Length:** 1024 tokens\n- **Output Dimensionality:** 1024 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 1024, 'do_lower_case': False}) with Transformer model: XLMRobertaModel \n (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n (2): Normalize()\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"seongil-dn/bge-m3-kor-retrieval-bs1024-checkpoint-236\")\n# Run inference\nsentences = [\n '전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어',\n '(2) 전남지역\\n2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다.',\n '수출 증가세 지속\\n1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 1024]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `per_device_train_batch_size`: 512\n- `learning_rate`: 3e-05\n- `num_train_epochs`: 5\n- `warmup_ratio`: 0.05\n- `fp16`: True\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: no\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 512\n- `per_device_eval_batch_size`: 8\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 3e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 5\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.05\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: True\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: True\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `eval_use_gather_object`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n
Click to expand\n\n| Epoch | Step | Training Loss |\n|:------:|:----:|:-------------:|\n| 0.0085 | 1 | 2.0476 |\n| 0.0171 | 2 | 2.0595 |\n| 0.0256 | 3 | 2.0267 |\n| 0.0342 | 4 | 2.0971 |\n| 0.0427 | 5 | 2.2171 |\n| 0.0513 | 6 | 2.287 |\n| 0.0598 | 7 | 2.0867 |\n| 0.0684 | 8 | 1.9498 |\n| 0.0769 | 9 | 1.569 |\n| 0.0855 | 10 | 1.3313 |\n| 0.0940 | 11 | 1.4122 |\n| 0.1026 | 12 | 1.3425 |\n| 0.1111 | 13 | 1.1936 |\n| 0.1197 | 14 | 0.8012 |\n| 0.1282 | 15 | 0.8862 |\n| 0.1368 | 16 | 1.193 |\n| 0.1453 | 17 | 0.9771 |\n| 0.1538 | 18 | 0.3887 |\n| 0.1624 | 19 | 0.363 |\n| 0.1709 | 20 | 0.3092 |\n| 0.1795 | 21 | 0.2692 |\n| 0.1880 | 22 | 0.2386 |\n| 0.1966 | 23 | 0.2266 |\n| 0.2051 | 24 | 0.233 |\n| 0.2137 | 25 | 0.2214 |\n| 0.2222 | 26 | 0.2038 |\n| 0.2308 | 27 | 0.2015 |\n| 0.2393 | 28 | 0.1772 |\n| 0.2479 | 29 | 0.1697 |\n| 0.2564 | 30 | 0.1718 |\n| 0.2650 | 31 | 0.2456 |\n| 0.2735 | 32 | 0.5238 |\n| 0.2821 | 33 | 0.5308 |\n| 0.2906 | 34 | 0.5111 |\n| 0.2991 | 35 | 0.3931 |\n| 0.3077 | 36 | 0.3414 |\n| 0.3162 | 37 | 0.2704 |\n| 0.3248 | 38 | 0.2949 |\n| 0.3333 | 39 | 0.3082 |\n| 0.3419 | 40 | 0.3755 |\n| 0.3504 | 41 | 0.3127 |\n| 0.3590 | 42 | 0.3756 |\n| 0.3675 | 43 | 0.3564 |\n| 0.3761 | 44 | 0.3905 |\n| 0.3846 | 45 | 0.377 |\n| 0.3932 | 46 | 0.3043 |\n| 0.4017 | 47 | 0.3237 |\n| 0.4103 | 48 | 0.4035 |\n| 0.4188 | 49 | 0.4522 |\n| 0.4274 | 50 | 0.4392 |\n| 0.4359 | 51 | 0.4482 |\n| 0.4444 | 52 | 0.3586 |\n| 0.4530 | 53 | 0.3154 |\n| 0.4615 | 54 | 0.4053 |\n| 0.4701 | 55 | 0.5846 |\n| 0.4786 | 56 | 0.4372 |\n| 0.4872 | 57 | 0.6201 |\n| 0.4957 | 58 | 0.5278 |\n| 0.5043 | 59 | 0.4844 |\n| 0.5128 | 60 | 0.5817 |\n| 0.5214 | 61 | 0.3765 |\n| 0.5299 | 62 | 0.4785 |\n| 0.5385 | 63 | 0.5724 |\n| 0.5470 | 64 | 0.5375 |\n| 0.5556 | 65 | 0.5362 |\n| 0.5641 | 66 | 0.4731 |\n| 0.5726 | 67 | 0.4514 |\n| 0.5812 | 68 | 0.4563 |\n| 0.5897 | 69 | 0.4198 |\n| 0.5983 | 70 | 0.4086 |\n| 0.6068 | 71 | 0.3612 |\n| 0.6154 | 72 | 0.3463 |\n| 0.6239 | 73 | 0.6261 |\n| 0.6325 | 74 | 0.6283 |\n| 0.6410 | 75 | 0.4635 |\n| 0.6496 | 76 | 0.463 |\n| 0.6581 | 77 | 0.4075 |\n| 0.6667 | 78 | 0.3797 |\n| 0.6752 | 79 | 0.2769 |\n| 0.6838 | 80 | 0.3353 |\n| 0.6923 | 81 | 0.2295 |\n| 0.7009 | 82 | 0.4316 |\n| 0.7094 | 83 | 0.9861 |\n| 0.7179 | 84 | 0.9684 |\n| 0.7265 | 85 | 0.9883 |\n| 0.7350 | 86 | 0.8865 |\n| 0.7436 | 87 | 0.8248 |\n| 0.7521 | 88 | 0.7973 |\n| 0.7607 | 89 | 0.8465 |\n| 0.7692 | 90 | 0.7664 |\n| 0.7778 | 91 | 0.7643 |\n| 0.7863 | 92 | 0.7665 |\n| 0.7949 | 93 | 0.7348 |\n| 0.8034 | 94 | 0.7493 |\n| 0.8120 | 95 | 0.6115 |\n| 0.8205 | 96 | 0.6233 |\n| 0.8291 | 97 | 0.6435 |\n| 0.8376 | 98 | 0.5581 |\n| 0.8462 | 99 | 0.542 |\n| 0.8547 | 100 | 0.5571 |\n| 0.8632 | 101 | 0.502 |\n| 0.8718 | 102 | 0.5375 |\n| 0.8803 | 103 | 0.4952 |\n| 0.8889 | 104 | 0.4873 |\n| 0.8974 | 105 | 0.4599 |\n| 0.9060 | 106 | 0.4536 |\n| 0.9145 | 107 | 0.4479 |\n| 0.9231 | 108 | 0.384 |\n| 0.9316 | 109 | 0.3523 |\n| 0.9402 | 110 | 0.369 |\n| 0.9487 | 111 | 0.3422 |\n| 0.9573 | 112 | 0.3698 |\n| 0.9658 | 113 | 0.3625 |\n| 0.9744 | 114 | 0.3736 |\n| 0.9829 | 115 | 0.4313 |\n| 0.9915 | 116 | 0.4605 |\n| 1.0 | 117 | 0.2948 |\n| 1.0085 | 118 | 0.7391 |\n| 1.0171 | 119 | 0.6622 |\n| 1.0256 | 120 | 0.6917 |\n| 1.0342 | 121 | 0.7963 |\n| 1.0427 | 122 | 0.7815 |\n| 1.0513 | 123 | 0.6719 |\n| 1.0598 | 124 | 0.6098 |\n| 1.0684 | 125 | 0.549 |\n| 1.0769 | 126 | 0.7212 |\n| 1.0855 | 127 | 0.6381 |\n| 1.0940 | 128 | 0.7424 |\n| 1.1026 | 129 | 0.6822 |\n| 1.1111 | 130 | 0.6921 |\n| 1.1197 | 131 | 0.5022 |\n| 1.1282 | 132 | 0.578 |\n| 1.1368 | 133 | 0.8139 |\n| 1.1453 | 134 | 0.6167 |\n| 1.1538 | 135 | 0.1836 |\n| 1.1624 | 136 | 0.1853 |\n| 1.1709 | 137 | 0.1628 |\n| 1.1795 | 138 | 0.1464 |\n| 1.1880 | 139 | 0.1308 |\n| 1.1966 | 140 | 0.1273 |\n| 1.2051 | 141 | 0.1414 |\n| 1.2137 | 142 | 0.138 |\n| 1.2222 | 143 | 0.1268 |\n| 1.2308 | 144 | 0.1348 |\n| 1.2393 | 145 | 0.111 |\n| 1.2479 | 146 | 0.1069 |\n| 1.2564 | 147 | 0.1122 |\n| 1.2650 | 148 | 0.1703 |\n| 1.2735 | 149 | 0.405 |\n| 1.2821 | 150 | 0.3876 |\n| 1.2906 | 151 | 0.378 |\n| 1.2991 | 152 | 0.2633 |\n| 1.3077 | 153 | 0.2263 |\n| 1.3162 | 154 | 0.1748 |\n| 1.3248 | 155 | 0.2016 |\n| 1.3333 | 156 | 0.2166 |\n| 1.3419 | 157 | 0.2798 |\n| 1.3504 | 158 | 0.2295 |\n| 1.3590 | 159 | 0.2805 |\n| 1.3675 | 160 | 0.2619 |\n| 1.3761 | 161 | 0.3006 |\n| 1.3846 | 162 | 0.2843 |\n| 1.3932 | 163 | 0.2244 |\n| 1.4017 | 164 | 0.2361 |\n| 1.4103 | 165 | 0.3025 |\n| 1.4188 | 166 | 0.3443 |\n| 1.4274 | 167 | 0.3329 |\n| 1.4359 | 168 | 0.3467 |\n| 1.4444 | 169 | 0.2748 |\n| 1.4530 | 170 | 0.2304 |\n| 1.4615 | 171 | 0.3125 |\n| 1.4701 | 172 | 0.478 |\n| 1.4786 | 173 | 0.3085 |\n| 1.4872 | 174 | 0.4337 |\n| 1.4957 | 175 | 0.3936 |\n| 1.5043 | 176 | 0.3455 |\n| 1.5128 | 177 | 0.4205 |\n| 1.5214 | 178 | 0.2752 |\n| 1.5299 | 179 | 0.36 |\n| 1.5385 | 180 | 0.4347 |\n| 1.5470 | 181 | 0.3949 |\n| 1.5556 | 182 | 0.4072 |\n| 1.5641 | 183 | 0.3633 |\n| 1.5726 | 184 | 0.3532 |\n| 1.5812 | 185 | 0.3451 |\n| 1.5897 | 186 | 0.3242 |\n| 1.5983 | 187 | 0.3122 |\n| 1.6068 | 188 | 0.2845 |\n| 1.6154 | 189 | 0.2815 |\n| 1.6239 | 190 | 6.9159 |\n| 1.6325 | 191 | 7.9604 |\n| 1.6410 | 192 | 6.5821 |\n| 1.6496 | 193 | 3.9177 |\n| 1.6581 | 194 | 1.6951 |\n| 1.6667 | 195 | 0.5367 |\n| 1.6752 | 196 | 0.2935 |\n| 1.6838 | 197 | 0.3295 |\n| 1.6923 | 198 | 0.2212 |\n| 1.7009 | 199 | 0.335 |\n| 1.7094 | 200 | 0.7829 |\n| 1.7179 | 201 | 0.7884 |\n| 1.7265 | 202 | 0.7921 |\n| 1.7350 | 203 | 0.7342 |\n| 1.7436 | 204 | 0.6092 |\n| 1.7521 | 205 | 0.6014 |\n| 1.7607 | 206 | 0.6414 |\n| 1.7692 | 207 | 0.5842 |\n| 1.7778 | 208 | 0.5916 |\n| 1.7863 | 209 | 0.5993 |\n| 1.7949 | 210 | 0.5658 |\n| 1.8034 | 211 | 0.6013 |\n| 1.8120 | 212 | 0.4769 |\n| 1.8205 | 213 | 0.4801 |\n| 1.8291 | 214 | 0.5087 |\n| 1.8376 | 215 | 0.436 |\n| 1.8462 | 216 | 0.4398 |\n| 1.8547 | 217 | 0.4391 |\n| 1.8632 | 218 | 0.419 |\n| 1.8718 | 219 | 0.4338 |\n| 1.8803 | 220 | 0.395 |\n| 1.8889 | 221 | 0.4063 |\n| 1.8974 | 222 | 0.375 |\n| 1.9060 | 223 | 0.3655 |\n| 1.9145 | 224 | 0.3637 |\n| 1.9231 | 225 | 0.3098 |\n| 1.9316 | 226 | 0.2782 |\n| 1.9402 | 227 | 0.2941 |\n| 1.9487 | 228 | 0.275 |\n| 1.9573 | 229 | 0.3018 |\n| 1.9658 | 230 | 0.2971 |\n| 1.9744 | 231 | 0.3108 |\n| 1.9829 | 232 | 0.3808 |\n| 1.9915 | 233 | 0.4067 |\n| 2.0 | 234 | 0.2424 |\n| 2.0085 | 235 | 0.6453 |\n| 2.0171 | 236 | 0.5577 |\n\n
\n\n### Framework Versions\n- Python: 3.10.12\n- Sentence Transformers: 3.2.1\n- Transformers: 4.44.2\n- PyTorch: 2.3.1+cu121\n- Accelerate: 1.1.1\n- Datasets: 2.21.0\n- Tokenizers: 0.19.1\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### CachedMultipleNegativesRankingLoss\n```bibtex\n@misc{gao2021scaling,\n title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup},\n author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan},\n year={2021},\n eprint={2101.06983},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SentenceTransformer based on BAAI/bge-m3\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) \n- **Maximum Sequence Length:** 1024 tokens\n- **Output Dimensionality:** 1024 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 1024, 'do_lower_case': False}) with Transformer model: XLMRobertaModel \n (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n (2): Normalize()\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"seongil-dn/bge-m3-kor-retrieval-bs1024-checkpoint-236\")\n# Run inference\nsentences = [\n '전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어',\n '(2) 전남지역\\n2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다.',\n '수출 증가세 지속\\n1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 1024]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `per_device_train_batch_size`: 512\n- `learning_rate`: 3e-05\n- `num_train_epochs`: 5\n- `warmup_ratio`: 0.05\n- `fp16`: True\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: no\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 512\n- `per_device_eval_batch_size`: 8\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 3e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 5\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.05\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: True\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: True\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `eval_use_gather_object`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n
Click to expand\n\n| Epoch | Step | Training Loss |\n|:------:|:----:|:-------------:|\n| 0.0085 | 1 | 2.0476 |\n| 0.0171 | 2 | 2.0595 |\n| 0.0256 | 3 | 2.0267 |\n| 0.0342 | 4 | 2.0971 |\n| 0.0427 | 5 | 2.2171 |\n| 0.0513 | 6 | 2.287 |\n| 0.0598 | 7 | 2.0867 |\n| 0.0684 | 8 | 1.9498 |\n| 0.0769 | 9 | 1.569 |\n| 0.0855 | 10 | 1.3313 |\n| 0.0940 | 11 | 1.4122 |\n| 0.1026 | 12 | 1.3425 |\n| 0.1111 | 13 | 1.1936 |\n| 0.1197 | 14 | 0.8012 |\n| 0.1282 | 15 | 0.8862 |\n| 0.1368 | 16 | 1.193 |\n| 0.1453 | 17 | 0.9771 |\n| 0.1538 | 18 | 0.3887 |\n| 0.1624 | 19 | 0.363 |\n| 0.1709 | 20 | 0.3092 |\n| 0.1795 | 21 | 0.2692 |\n| 0.1880 | 22 | 0.2386 |\n| 0.1966 | 23 | 0.2266 |\n| 0.2051 | 24 | 0.233 |\n| 0.2137 | 25 | 0.2214 |\n| 0.2222 | 26 | 0.2038 |\n| 0.2308 | 27 | 0.2015 |\n| 0.2393 | 28 | 0.1772 |\n| 0.2479 | 29 | 0.1697 |\n| 0.2564 | 30 | 0.1718 |\n| 0.2650 | 31 | 0.2456 |\n| 0.2735 | 32 | 0.5238 |\n| 0.2821 | 33 | 0.5308 |\n| 0.2906 | 34 | 0.5111 |\n| 0.2991 | 35 | 0.3931 |\n| 0.3077 | 36 | 0.3414 |\n| 0.3162 | 37 | 0.2704 |\n| 0.3248 | 38 | 0.2949 |\n| 0.3333 | 39 | 0.3082 |\n| 0.3419 | 40 | 0.3755 |\n| 0.3504 | 41 | 0.3127 |\n| 0.3590 | 42 | 0.3756 |\n| 0.3675 | 43 | 0.3564 |\n| 0.3761 | 44 | 0.3905 |\n| 0.3846 | 45 | 0.377 |\n| 0.3932 | 46 | 0.3043 |\n| 0.4017 | 47 | 0.3237 |\n| 0.4103 | 48 | 0.4035 |\n| 0.4188 | 49 | 0.4522 |\n| 0.4274 | 50 | 0.4392 |\n| 0.4359 | 51 | 0.4482 |\n| 0.4444 | 52 | 0.3586 |\n| 0.4530 | 53 | 0.3154 |\n| 0.4615 | 54 | 0.4053 |\n| 0.4701 | 55 | 0.5846 |\n| 0.4786 | 56 | 0.4372 |\n| 0.4872 | 57 | 0.6201 |\n| 0.4957 | 58 | 0.5278 |\n| 0.5043 | 59 | 0.4844 |\n| 0.5128 | 60 | 0.5817 |\n| 0.5214 | 61 | 0.3765 |\n| 0.5299 | 62 | 0.4785 |\n| 0.5385 | 63 | 0.5724 |\n| 0.5470 | 64 | 0.5375 |\n| 0.5556 | 65 | 0.5362 |\n| 0.5641 | 66 | 0.4731 |\n| 0.5726 | 67 | 0.4514 |\n| 0.5812 | 68 | 0.4563 |\n| 0.5897 | 69 | 0.4198 |\n| 0.5983 | 70 | 0.4086 |\n| 0.6068 | 71 | 0.3612 |\n| 0.6154 | 72 | 0.3463 |\n| 0.6239 | 73 | 0.6261 |\n| 0.6325 | 74 | 0.6283 |\n| 0.6410 | 75 | 0.4635 |\n| 0.6496 | 76 | 0.463 |\n| 0.6581 | 77 | 0.4075 |\n| 0.6667 | 78 | 0.3797 |\n| 0.6752 | 79 | 0.2769 |\n| 0.6838 | 80 | 0.3353 |\n| 0.6923 | 81 | 0.2295 |\n| 0.7009 | 82 | 0.4316 |\n| 0.7094 | 83 | 0.9861 |\n| 0.7179 | 84 | 0.9684 |\n| 0.7265 | 85 | 0.9883 |\n| 0.7350 | 86 | 0.8865 |\n| 0.7436 | 87 | 0.8248 |\n| 0.7521 | 88 | 0.7973 |\n| 0.7607 | 89 | 0.8465 |\n| 0.7692 | 90 | 0.7664 |\n| 0.7778 | 91 | 0.7643 |\n| 0.7863 | 92 | 0.7665 |\n| 0.7949 | 93 | 0.7348 |\n| 0.8034 | 94 | 0.7493 |\n| 0.8120 | 95 | 0.6115 |\n| 0.8205 | 96 | 0.6233 |\n| 0.8291 | 97 | 0.6435 |\n| 0.8376 | 98 | 0.5581 |\n| 0.8462 | 99 | 0.542 |\n| 0.8547 | 100 | 0.5571 |\n| 0.8632 | 101 | 0.502 |\n| 0.8718 | 102 | 0.5375 |\n| 0.8803 | 103 | 0.4952 |\n| 0.8889 | 104 | 0.4873 |\n| 0.8974 | 105 | 0.4599 |\n| 0.9060 | 106 | 0.4536 |\n| 0.9145 | 107 | 0.4479 |\n| 0.9231 | 108 | 0.384 |\n| 0.9316 | 109 | 0.3523 |\n| 0.9402 | 110 | 0.369 |\n| 0.9487 | 111 | 0.3422 |\n| 0.9573 | 112 | 0.3698 |\n| 0.9658 | 113 | 0.3625 |\n| 0.9744 | 114 | 0.3736 |\n| 0.9829 | 115 | 0.4313 |\n| 0.9915 | 116 | 0.4605 |\n| 1.0 | 117 | 0.2948 |\n| 1.0085 | 118 | 0.7391 |\n| 1.0171 | 119 | 0.6622 |\n| 1.0256 | 120 | 0.6917 |\n| 1.0342 | 121 | 0.7963 |\n| 1.0427 | 122 | 0.7815 |\n| 1.0513 | 123 | 0.6719 |\n| 1.0598 | 124 | 0.6098 |\n| 1.0684 | 125 | 0.549 |\n| 1.0769 | 126 | 0.7212 |\n| 1.0855 | 127 | 0.6381 |\n| 1.0940 | 128 | 0.7424 |\n| 1.1026 | 129 | 0.6822 |\n| 1.1111 | 130 | 0.6921 |\n| 1.1197 | 131 | 0.5022 |\n| 1.1282 | 132 | 0.578 |\n| 1.1368 | 133 | 0.8139 |\n| 1.1453 | 134 | 0.6167 |\n| 1.1538 | 135 | 0.1836 |\n| 1.1624 | 136 | 0.1853 |\n| 1.1709 | 137 | 0.1628 |\n| 1.1795 | 138 | 0.1464 |\n| 1.1880 | 139 | 0.1308 |\n| 1.1966 | 140 | 0.1273 |\n| 1.2051 | 141 | 0.1414 |\n| 1.2137 | 142 | 0.138 |\n| 1.2222 | 143 | 0.1268 |\n| 1.2308 | 144 | 0.1348 |\n| 1.2393 | 145 | 0.111 |\n| 1.2479 | 146 | 0.1069 |\n| 1.2564 | 147 | 0.1122 |\n| 1.2650 | 148 | 0.1703 |\n| 1.2735 | 149 | 0.405 |\n| 1.2821 | 150 | 0.3876 |\n| 1.2906 | 151 | 0.378 |\n| 1.2991 | 152 | 0.2633 |\n| 1.3077 | 153 | 0.2263 |\n| 1.3162 | 154 | 0.1748 |\n| 1.3248 | 155 | 0.2016 |\n| 1.3333 | 156 | 0.2166 |\n| 1.3419 | 157 | 0.2798 |\n| 1.3504 | 158 | 0.2295 |\n| 1.3590 | 159 | 0.2805 |\n| 1.3675 | 160 | 0.2619 |\n| 1.3761 | 161 | 0.3006 |\n| 1.3846 | 162 | 0.2843 |\n| 1.3932 | 163 | 0.2244 |\n| 1.4017 | 164 | 0.2361 |\n| 1.4103 | 165 | 0.3025 |\n| 1.4188 | 166 | 0.3443 |\n| 1.4274 | 167 | 0.3329 |\n| 1.4359 | 168 | 0.3467 |\n| 1.4444 | 169 | 0.2748 |\n| 1.4530 | 170 | 0.2304 |\n| 1.4615 | 171 | 0.3125 |\n| 1.4701 | 172 | 0.478 |\n| 1.4786 | 173 | 0.3085 |\n| 1.4872 | 174 | 0.4337 |\n| 1.4957 | 175 | 0.3936 |\n| 1.5043 | 176 | 0.3455 |\n| 1.5128 | 177 | 0.4205 |\n| 1.5214 | 178 | 0.2752 |\n| 1.5299 | 179 | 0.36 |\n| 1.5385 | 180 | 0.4347 |\n| 1.5470 | 181 | 0.3949 |\n| 1.5556 | 182 | 0.4072 |\n| 1.5641 | 183 | 0.3633 |\n| 1.5726 | 184 | 0.3532 |\n| 1.5812 | 185 | 0.3451 |\n| 1.5897 | 186 | 0.3242 |\n| 1.5983 | 187 | 0.3122 |\n| 1.6068 | 188 | 0.2845 |\n| 1.6154 | 189 | 0.2815 |\n| 1.6239 | 190 | 6.9159 |\n| 1.6325 | 191 | 7.9604 |\n| 1.6410 | 192 | 6.5821 |\n| 1.6496 | 193 | 3.9177 |\n| 1.6581 | 194 | 1.6951 |\n| 1.6667 | 195 | 0.5367 |\n| 1.6752 | 196 | 0.2935 |\n| 1.6838 | 197 | 0.3295 |\n| 1.6923 | 198 | 0.2212 |\n| 1.7009 | 199 | 0.335 |\n| 1.7094 | 200 | 0.7829 |\n| 1.7179 | 201 | 0.7884 |\n| 1.7265 | 202 | 0.7921 |\n| 1.7350 | 203 | 0.7342 |\n| 1.7436 | 204 | 0.6092 |\n| 1.7521 | 205 | 0.6014 |\n| 1.7607 | 206 | 0.6414 |\n| 1.7692 | 207 | 0.5842 |\n| 1.7778 | 208 | 0.5916 |\n| 1.7863 | 209 | 0.5993 |\n| 1.7949 | 210 | 0.5658 |\n| 1.8034 | 211 | 0.6013 |\n| 1.8120 | 212 | 0.4769 |\n| 1.8205 | 213 | 0.4801 |\n| 1.8291 | 214 | 0.5087 |\n| 1.8376 | 215 | 0.436 |\n| 1.8462 | 216 | 0.4398 |\n| 1.8547 | 217 | 0.4391 |\n| 1.8632 | 218 | 0.419 |\n| 1.8718 | 219 | 0.4338 |\n| 1.8803 | 220 | 0.395 |\n| 1.8889 | 221 | 0.4063 |\n| 1.8974 | 222 | 0.375 |\n| 1.9060 | 223 | 0.3655 |\n| 1.9145 | 224 | 0.3637 |\n| 1.9231 | 225 | 0.3098 |\n| 1.9316 | 226 | 0.2782 |\n| 1.9402 | 227 | 0.2941 |\n| 1.9487 | 228 | 0.275 |\n| 1.9573 | 229 | 0.3018 |\n| 1.9658 | 230 | 0.2971 |\n| 1.9744 | 231 | 0.3108 |\n| 1.9829 | 232 | 0.3808 |\n| 1.9915 | 233 | 0.4067 |\n| 2.0 | 234 | 0.2424 |\n| 2.0085 | 235 | 0.6453 |\n| 2.0171 | 236 | 0.5577 |\n\n
\n\n### Framework Versions\n- Python: 3.10.12\n- Sentence Transformers: 3.2.1\n- Transformers: 4.44.2\n- PyTorch: 2.3.1+cu121\n- Accelerate: 1.1.1\n- Datasets: 2.21.0\n- Tokenizers: 0.19.1\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### CachedMultipleNegativesRankingLoss\n```bibtex\n@misc{gao2021scaling,\n title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup},\n author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan},\n year={2021},\n eprint={2101.06983},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"BAAI/bge-m3\", \"library_name\": \"sentence-transformers\", \"pipeline_tag\": \"sentence-similarity\", \"tags\": [\"sentence-transformers\", \"sentence-similarity\", \"feature-extraction\", \"generated_from_trainer\", \"dataset_size:482743\", \"loss:CachedMultipleNegativesRankingLoss\"], \"widget\": [{\"source_sentence\": \"언제 청년층 생산가능인구 수가 9,149,000명으로 드러났어\", \"sentences\": [\"제2장 청년층 노동시장 동향 및 청년내일채움공제의 성과 개괄\\n□ 본 장에서는 청년내일채움공제 사업의 도입 배경으로 작용한 청년층 노동시장의 현황을 살펴보고, 세부 성과에 앞서 청년내일채움공제가 현 청년고용 실태에 미친 개괄적 성과를 평가하고자 한다\\n제1절 청년층 노동시장 동향\\n1. 청년층 인구구조 변화 추이\\n□ 고용률과 실업률, 취업자 수 등의 고용지표는 경제 및 노동시장의 상황 뿐 만 아니라, 인구구조 변화에 의해 영향을 받는 만큼, 청년층의 노동시장 동향을 파악하기 위해서는 청년층 인구구조의 변화양상을 이해할 필요가 있음 □ 청년층 인구구조 변화 추이\\n○ 전체 생산가능인구가 여전히 증가추세에 있는 것에 비해, 청년층(15∼29세) 생산가능인구는 1990년을 기점으로 감소추세에 있음. 2018년 청년층 생산가능인구는 전년대비 133천명 감소한 9,149천명임 - 15∼19세, 20∼24세 연령대의 생산가능인구는 감소추세에 있으나, 25∼29세 생산가능인구(3,432천명, 전년대비 96천명 증가)는 증가추세에 있으며 향후 2∼3년간 증가추세가 지속될 것으로 전망됨\\n○ 청년층 경제활동인구는 2000년대 지속적으로 감소하다가 2013∼2016년 기간 증가추세를 보였으나, 2017년 이후 감소하고 있음. 2018년 청년층 경제활동인구는 전년대비 21천명 감소한 4,312천명을 기록하였음\", \"2. 인구구조변화의 현황
UN 인구국 자료를 분석해보면 2010년 인구와 2050년의 중간 수준 예측치를 기준으로 인구 1,000만 명 이상의 국가이면서 2050년까지 생산가능인구(15세~64세)가 감소하는 국가는 198개국중에서 19개국이다. 이 중에서 우리나라는 생산 가능인구(15세~64세) 감소율이 세계 6위 수준을 기록할 것으로 분석되었고, 이에 따라 생산가능 인구가 2010년 대비 2050년에는 27%가 줄어들 것으로 예상된다. 이에 반하여 노인인구는 236%가 증가할 것으로 예상된다. 우리나라는 생산가능인구 감소율과 노인인구(65세 이상) 증가율이 매우 높은 나라 중 하나이며, 동시에 유소년(0~14세) 인구의 감소도 22% 정도 될 것으로 예상하고 있다.
UN의 인구국(局) 자료인 [표 1]을 보면, 첫째 현재 우리나라를 포함하여 선진국에서조차도 현재 인구감소를 겪어본 국가가 거의 없어 이것이 가져다 줄 영향에 대한 경험적 지식이 거의 전무하다는 점이다.
둘째, 우리나라의 경우에는 무엇보다도 인구감소기로의 전환이 급격하게 이루어지고 있다. 따라서 우리 스스로 이의 긍정적ㆍ부정적 영향을 연역적으로 추론해볼 필요가 있다. 이를 위해서는 우리 사회의 전체 변화를 거시적으로 바라보고 정책 수단에 따라 가져다 줄 다양한 변화를 살펴볼 수 있는 시뮬레이션(simulation) 도구 및 정책 개발 그리고 집행 체제가 필요하다고 할 수 있다.\", \"7. 해외사례\\n플랫폼 노동관계에 특화된 산재보험제도를 별도로 규정하고 있는 포괄적 입법례는 아직 보이지 않는다. 다만, 플랫폼 노동에 관한 미국의 앨라배마 주법(SB363, 2018.3.13. 제정)에서는 플랫폼 노동자들에게 일정한 수준의 복지를 허용하기 위한 법적 기초를 만들고 있다(부표 1 참조). SB363은 플랫폼 노동자들의 이동성을 고려한 ‘이동 가능 복지제도’(portable benefit plans)를 플랫폼 사업주들이 만들면, 이 복지제도를 통해 다수의 플랫폼과 계약을 맺는 플랫폼 노동자들이 복수 사업주 혹은 사업주의 변경에도 불구하고 복지제도를 유지하면서 그로부터 복지수혜를 얻을 수 있도록 하고 있다. 이때의 복지제도는 건강보험, 재해보험, 퇴직수당제도 등으로 구성될 수 있고, 플랫폼 노동자가 복지계좌를 개설하면 여기에 플랫폼 사업주가 복지기여금을 납부하는 방식을 취하고 있다. 국가에 의한 사회보험제도가 아니고, 플랫폼 사업주에 의한 자발적 복지제도의 기초를 구성한 것이기는 하지만, 지방정부가 이를 관리하도록 함으로써 공정성과 객관성을 담보한다. 한국의 산재보험제도에 대입시켜 비교할 수 있는 입법례가 아니기는 하지만, 플랫폼 사업주로 하여금 플랫폼 노동자에 대한 일정한 복지제도 형성을 촉구하고 있다는 점, 해당 복지제도에 대하여 플랫폼이 일정한 기여금을 납부하도록 하고 있다는 점 등에서 하나의 제도적 모델로 시사가능하다. 한편, 미국의 플로리다, 인디아나, 아이오와, 켄터키, 테네시, 텍사스, 유타주 등에서는 플랫폼노동자(경우에 따라서는 가사노동자에 국한)의 의미를 규정하면서, 주법상 재해보험제도 및(또는) 실업급여제도의 가입자가 될 수 있도록 하고 있다.\"]}, {\"source_sentence\": \"도매 전력 매매를 주로 한 전력거래체계를 만들었을 때 어느 부문이 안정성 영향을 받을 수 있어\", \"sentences\": [\"(4) 외환시장\\n원/달러 환율 2016년 말 대비 하락\\n원/달러 환율은 연초 미 보호무역주의에 대한 우려, 수출 호조, 외국인 증권투자자금 유입 등으로 3월 말까지 상당 폭 하락하였다. 이후 북한 관련 리스크 전개 상황, 미 연준의 연내 금리인상 기대 변화 등에 따라 등락하는 모습을 보였다. 10월 들어 북한 리스크 완화, 미 달러화 약세 지속, 국내경제의 견실한 성장세 등으로 하락세로 전환하였으며, 2017년 말에는 연중 최저치인 1,070.5원을 기록하였다. 2017년 중 미 달러화에 대한 원화의 가치는 2016년 말 대비 12.8% 상승하였다. 한편 원/엔 환율(100엔당)은 미 연준 금리인상 기대변화, 북한 리스크 등에 영향을 받으며 등락을 지속하다가 10월 들어 원화가 상대적으로 강세를 보이면서 하락하였다. 2017년 말 원화의 엔화에 대한 가치는 100엔당 949.2원으로 2016년 말 대비 9.1% 상승하였다. 원/달러 환율의 일중 및 전일대비 변동폭은 연평균 각각 5.5원, 4.4원을 기록하여 2016년(각각 7.5원 및 6.0원)에 비해 축소되었다. 외환스왑레이트 하락, 통화스왑금리 상승\\n외환스왑레이트(3개월물)는 외화자금수요 확대 등으로 2016년 말 대비 0.61% 포인트 하락한 -0.63%를 기록하였다. 통화스왑금리(3년물)는 경제지표 호조 등에 따른 국고채 금리 상승 등으로 2016년 말 대비 0.37% 포인트 상승한 1.57%를 기록하였다.\", \"4. 국내 전력거래 형태 전망\\n앞서 살펴보았듯이, 구조개편을 단행한 외국의 전력거래 형태는 대체로 도매 전력거래 위주, 소매 전력거래 위주, 계약위주 등 세 가지로 구분되어 있다. 국내 전력산업의 현재 여건을 고려할 경우 가장 가능성이 높은 전력거래 형태는 도매 전력거래 위주의 거래형태일 것이다. 만약 도매 전력거래 위주의 전력거래체계를 구축하는 경우 가격과 공급의 안정성에 상당한 영향을 미칠 것으로 보인다. 하지만 규제계약(Vesting Contract)을 포함하여 장단기 계약물량을 확대함으로써 현물시장의 가격 및 공급 변동성을 완화할 수 있는 대안이 존재한다. 만약 전력시장에서의 가격과 공급 안정성 측면을 염두에 둔다면 소매 전력거래 형태 또는 장단기 계약 위주의 전력거래 형태를 고려해 볼 수 있다. 소매 전력거래 형태에서는 주로 기존의 수직통합체제 또는 이미 발전과 판매가 송배전 부문으로부터 분리된 경우라면 발전과 판매의 통합과 이로 인한 규모의 확대에 따라 유효경쟁의 약화 가능성이 문제가 될 수 있다. 이렇게 통합된 기업의 소매시장에서의 시장지배력 문제가 존재하기 때문에 순수 판매회사 또는 신규기업과의 실질적인 경쟁이 가능한 방향으로 제도적 장치를 마련할 필요가 있다.\", \"Ⅲ. 우리나라 전력시장에서의 유연성 자원\\n1. 우리나라 전력시장의 현황\\n우리나라의 전력시장은 전력산업의 효율성 향상을 위하여 2001년 전력산업구조개편을 통해 독점사업자였던 한국전력공사로부터 발전부문을 분리시켜 발전자회사로 재편시키고, 발전부문에 대한 경쟁체제를 도입하면서 출범하였다. 우리나라 전력시장은 소비자가 직접 거래에 참여하는 소매시장이 아닌 발전회사와 판매회사가 참여하는 도매시장으로, 다수의 발전회사가 존재하는 공급측과는 달리 수요측은 단일 구매자인 한국전력공사만 존재하는 구매독점 형태로 이루어져 있다. 또한 설비용량이 20MW를 초과하는 상업용 발전기는 전력시장을 통해서만 거래가 가능한 의무적 시장이다.\"]}, {\"source_sentence\": \"해외 파생상품시장에서 거래되는 경우는 어떤 파생상품으로 분류할 수 있어\", \"sentences\": [\"3) 연금 세제\\n▩ 근퇴법 상 연금수령을 활성화하기 위한 강제조항이 부재한 상황에서 세제체계(소득세법)에 따라 수령방법이 결정되는 구조임. ○ 제도적으로 일시금 수령을 방지하거나 연금수령을 강제하는 규정이 부재한 상황에서 수령 방법은 주로 세금부담에 따라 결정될 수밖에 없음. ○ 일시금 수령을 경감시키기 위해 2016년 소득세법을 개정하였으나 개정된 소득세법 조차도 일시금과 연금수령 간 세금부담 차이가 크지 않아 일시금 수령을 방지하지 못하는 한계가 존재함. ▩ 연금과 일시금 수령 간 세제차이가 크지 않은 주요 원인은 일시금 수령시 제공하는 공제혜택이 크기 때문임. ○ 국내 퇴직연금시장에서 일시금 수령이 만연한 것은 연금수령 시 부담하는 세율이 높아서가 아니라 일시금 수령 시 부담하는 세율이 낮기 때문임. ○ 일시금 수령의 세금부담이 높지 않은 이유는 일시금 수령 시에도 다수의 공제혜택(근속연수공제+환산급여공제)이 제공되고 혜택의 규모도 크기 때문임. ○ 즉 연금수령 시에는 일시금 수령 시 부담하는 세금의 70%만을 부담하도록 규정하고(소득세법 제129조(원천징수세율)제1항제5의336)) 있으나, 일시금 수령 시 세금부담이 작기 때문에 연금수령 시 추가 제공되는 세제혜택의 크기가 미미하게 됨. ○ 특히 연금수령 기간에 따른 세금차이가 없어 연금으로 수령하더라도 단기간 연금(예, 10년)을 수령하는 유인을 제공함.\", \"□ 장외파생 권역별 대표 상품을 살펴보면 금리관련상품 중에는 금리스왑(Interest Rate Swap:IRS)이, 통화관련 상품 중에서는 통화선도계약이, 신용관련거래에서는 신용파산스왑(Credit Default Swap:CDS)이 가장 높은 비중을 차지
○ 금리스왑은 금리관련 장외파생상품시장의 91.2%를 차지하고 있으며, 신용관련상품의 경우 신용파산스왑과 총수익스왑(Total Return Swap:TRS)만이 거래
○ 통화선도는 대부분 NDF(Non-Deliverable Forward)의 형태로 거래
○ 장외파생상품 거래는 일대일 계약을 기반으로 하고 거래당사자의 높은 신용도를 요구하기 때문에 신용도가 낮은 개인 또는 금융기관은 참가하기가 어려운 실정
○ 특히, 신용관련거래는 다른 금융기관에 비해 상대적으로 높은 신용도를 가지고 있는 은행과 증권사를 통해서만 거래가 이루어지고 있는 것으로 파악\", \"제5조 (파생상품) ① 이 법에서 ˝파생상품˝이란 다음 각 호의 어느 하나에 해당하는 계약상의 권리를 말한다.\\n1. 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 장래의 특정 시점에 인도할 것을 약정하는 계약\\n2. 당사자 어느 한쪽의 의사표시에 의하여 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 수수하는 거래를 성립시킬 수 있는 권리를 부여하는 것을 약정하는 계약\\n3. 장래의 일정기간 동안 미리 정한 가격으로 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 교환할 것을 약정하는 계약\\n② 이 법에서 ˝장내파생상품˝이란 파생상품으로서 파생상품시장에서 거래되는 것 또는 해외 파생상품시장(파생상품시장과 유사한 시장으로서 해외에 있는 시장과 대통령령으로 정하는 해외 파생상품거래가 이루어지는 시장을 말한다)에서 거래되는 것을 말한다. ③ 이 법에서 ˝장외파생상품˝이란 파생상품으로서 장내파생상품이 아닌 것을 말한다. ④ 제1항 각 호의 어느 하나에 해당하는 계약 중 매매계약이 아닌 계약의 체결은 이 법을 적용함에 있어서 매매계약의 체결로 본다.\"]}, {\"source_sentence\": \"어디서 자금세탁 전제범죄에서 비영리단체 관련 자금세탁범죄를 찾아내는 방식을 선택했어\", \"sentences\": [\"IV. 비영리단체 분야의 범죄유형 및 사례\\n1. 국내사례와 범죄유형\\n□ 한국은 아직 비영리단체에 대한 명확한 개념 구분이 정립되어 있지 않은 실정으로 이에 따라 그동안 한국에서는 비영리단체 관련범죄에 대한 인식이 제대로 이루어지지 않음. -비영리단체 관련 범죄에 있어서도 비영리단체에 대한 명확한 개념 구분 없이 판결문이 작성되어 비영리단체 관련 자금세탁범죄를 조사하는데 있어서 큰 장애로 작용함. -국내 판례에서 비영리단체와 관련하여 자금세탁이라는 용어를 직접 사용한 경우는 없으며 이에 따라 부득이 자금세탁 전제범죄를 통하여 비영리단체 관련 자금세탁범죄를 조사하는 방법을 택함.\", \"VI. 비영리단체를 통한 테러자금조달방지를 위한 제도개선방안\\n□ FATF는 2001년 10월 테러자금조달에 대한 특별권고 8항(Special Recommendationson Terrorist Financing)을 통하여 비영리단체에 대한 관리감독을 강화하고 관련 법규를 정비할 것을 권고하였음. -우리나라에서 비영리단체를 통한 자금세탁을 이용해서 테러자금이 조달될 가능성은 매우 낮은 것으로 평가되나 자금세탁의 우회로를 차단하고, 또한 예방적인 조치를 취해 국제적으로 자금세탁을 통한 불법적인 테러자금조달 방지 노력에 적극 부응하기 위해서는 비영리단체에 대한 자금세탁 방지제도 도입에 적극 참여해야 할 것임.\", \"(2) 전북의 풍력산업 약점\\n전북의 풍력산업은 내부환경관점에서 다음과 같은 약점을 보유하고 있다. 첫째, 두산(창원), 유니슨(사천), 효성(창원), 한진(양산) 등 풍력터빈시스템업체는 모두 경남권에 위치하고 있으며, 현재 경남 이외 지역에서는 MW급 이상 풍력용 터빈시스템업체가 존재하지 않는다. 다만 최근 터빈의 대형화에 따라, 터빈의 무게 및 부피 등에 따른 운송에 한계가 존재하고 상당기간 고정식 형태의 해상풍력발전단지 구축이 중심이 될 것으로 판단됨에 따라, 부유식 해상풍력단지개발이 시작되기 전까지는 GW급 해상풍력단지 개발계획에 따라 서해안권으로 시스템업체의 이전가능성은 일부 존재한다. 둘째, 해상풍력 관련 연구/생산인력 부족 역시 장기적 해상풍력기술의 지속성 관점에서 시급한 문제이다. 현재 전북은 해상풍력터빈과 관련된 기술은 없으나, 휴먼컴퍼지트와 재료연구원 풍력핵심기술연구센터를 통해 해상풍력 블레이드 및 재료 등에 대한 기술이 축적되어 있다. 이외에 하부구조물 및 타워 관련 기술 역시 에드벡트가 석션버켓(suction bucket)방식의 하부구조물 관련 기술을 보유하고 있고 현재 군산공장을 리모델링 중인 CS윈드는 세계 최고수준의 타워제작기술을 확보하고 있다. 따라서 전북은 블레이드, 재료, 타워 및 하부구조물 등과 관련된 기술을 확보하고 있다.\"]}, {\"source_sentence\": \"전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어\", \"sentences\": [\"수출 증가세 지속\\n1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.\", \"(3) 금융기관 여수신\\n은행 수신 큰 폭 확대\\n은행 수신은 2019년에 비해 증가폭이 크게 확대되었다. 수시입출식예금은 불확실성 증대에 따른 가계 및 기업의 예비자금 확보 등의 영향으로 증가 규모가 전년대비 3배가량 확대되었다. 반면 정기예금은 예금금리 하락, 예대율 및 LCR 규제 완화에 따른 은행의 정기예금 유치 유인 축소 등에 기인하여 감소로 전환하였다. 자산운용사 수신은 증가폭이 축소되었다. MMF는 꾸준한 증가세를 유지하였으나 주식형 및 채권형 펀드는 개인투자자의 주식 직접투자증가, 신용증권에 대한 시장 경계감 확대 등으로 감소로 전환하였다. 또한 기타펀드는 2019년 중 일부 사모펀드 손실 및 환매중단 사태, 사모펀드 일반투자자 요건 강화 등으로 증가 규모가 절반 수준으로 축소되었다. 한편 신용협동기구 등 비은행예금취급기관 수신은 대체로 2019년과 비슷한 증가 규모를 나타내었다.\", \"(2) 전남지역\\n2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다.\"]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45534,"string":"45,534"}}},{"rowIdx":43758,"cells":{"id":{"kind":"string","value":"SEBIS/legal_t5_small_cls_de"},"author":{"kind":"string","value":"SEBIS"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","jax","t5","text2text-generation","classification Deustch model","dataset:jrc-acquis","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"jax\",\n \"t5\",\n \"text2text-generation\",\n \"classification Deustch model\",\n \"dataset:jrc-acquis\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-03-02T23:29:04Z","string":"2022-03-02T23:29:04Z"},"last_modified":{"kind":"string","value":"2021-06-23T10:27:59+00:00"},"downloads":{"kind":"number","value":171,"string":"171"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- jrc-acquis\nlanguage: Deustch\ntags:\n- classification Deustch model\nwidget:\n- text: 'BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens\n in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen\n Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit\n Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN\n UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere\n auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag\n der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen\n Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels\n ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem\n Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls\n Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen\n der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen\n Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31.\n Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft\n genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels\n zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung\n für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die\n Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens\n ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt,\n die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die\n Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen\n des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1.'\n---\n\n# legal_t5_small_cls_de model\n\nModel for classification of legal text written in Deustch. It was first released in\n[this repository](https://github.com/agemagician/LegalTrans). This model is trained on three parallel corpus from jrc-acquis.\n\n\n## Model description\n\nlegal_t5_small_cls_de is based on the `t5-small` model and was trained on a large corpus of parallel text. This is a smaller model, which scales the baseline model of t5 down by using `dmodel = 512`, `dff = 2,048`, 8-headed attention, and only 6 layers each in the encoder and decoder. This variant has about 60 million parameters.\n\n## Intended uses & limitations\n\nThe model could be used for classification of legal texts written in Deustch.\n\n### How to use\n\nHere is how to use this model to classify legal text written in Deustch in PyTorch:\n\n```python\nfrom transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline\n\npipeline = TranslationPipeline(\nmodel=AutoModelWithLMHead.from_pretrained(\"SEBIS/legal_t5_small_cls_de\"),\ntokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path = \"SEBIS/legal_t5_small_cls_de\", do_lower_case=False, \n skip_special_tokens=True),\n device=0\n)\n\nde_text = \"BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1.\"\n\npipeline([de_text], max_length=512)\n```\n\n## Training data\n\nThe legal_t5_small_cls_de model was trained on [JRC-ACQUIS](https://wt-public.emm4u.eu/Acquis/index_2.2.html) dataset consisting of 23 Thousand texts.\n\n## Training procedure\n\n\nThe model was trained on a single TPU Pod V3-8 for 250K steps in total, using sequence length 512 (batch size 64). It has a total of approximately 220M parameters and was trained using the encoder-decoder architecture. The optimizer used is AdaFactor with inverse square root learning rate schedule for pre-training.\n\n### Preprocessing\n\nAn unigram model trained with 88M lines of text from the parallel corpus (of all possible language pairs) to get the vocabulary (with byte pair encoding), which is used with this model.\n\n### Pretraining\n\n\n\n## Evaluation results\n\nWhen the model is used for classification test dataset, achieves the following results:\n\nTest results :\n\n| Model | F1 score |\n|:-----:|:-----:|\n| legal_t5_small_cls_de | 0.6358|\n\n\n### BibTeX entry and citation info\n\n> Created by [Ahmed Elnaggar/@Elnaggar_AI](https://twitter.com/Elnaggar_AI) | [LinkedIn](https://www.linkedin.com/in/prof-ahmed-elnaggar/)\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# legal_t5_small_cls_de model\n\nModel for classification of legal text written in Deustch. It was first released in\n[this repository](https://github.com/agemagician/LegalTrans). This model is trained on three parallel corpus from jrc-acquis.\n\n\n## Model description\n\nlegal_t5_small_cls_de is based on the `t5-small` model and was trained on a large corpus of parallel text. This is a smaller model, which scales the baseline model of t5 down by using `dmodel = 512`, `dff = 2,048`, 8-headed attention, and only 6 layers each in the encoder and decoder. This variant has about 60 million parameters.\n\n## Intended uses & limitations\n\nThe model could be used for classification of legal texts written in Deustch.\n\n### How to use\n\nHere is how to use this model to classify legal text written in Deustch in PyTorch:\n\n```python\nfrom transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline\n\npipeline = TranslationPipeline(\nmodel=AutoModelWithLMHead.from_pretrained(\"SEBIS/legal_t5_small_cls_de\"),\ntokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path = \"SEBIS/legal_t5_small_cls_de\", do_lower_case=False, \n skip_special_tokens=True),\n device=0\n)\n\nde_text = \"BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1.\"\n\npipeline([de_text], max_length=512)\n```\n\n## Training data\n\nThe legal_t5_small_cls_de model was trained on [JRC-ACQUIS](https://wt-public.emm4u.eu/Acquis/index_2.2.html) dataset consisting of 23 Thousand texts.\n\n## Training procedure\n\n\nThe model was trained on a single TPU Pod V3-8 for 250K steps in total, using sequence length 512 (batch size 64). It has a total of approximately 220M parameters and was trained using the encoder-decoder architecture. The optimizer used is AdaFactor with inverse square root learning rate schedule for pre-training.\n\n### Preprocessing\n\nAn unigram model trained with 88M lines of text from the parallel corpus (of all possible language pairs) to get the vocabulary (with byte pair encoding), which is used with this model.\n\n### Pretraining\n\n\n\n## Evaluation results\n\nWhen the model is used for classification test dataset, achieves the following results:\n\nTest results :\n\n| Model | F1 score |\n|:-----:|:-----:|\n| legal_t5_small_cls_de | 0.6358|\n\n\n### BibTeX entry and citation info\n\n> Created by [Ahmed Elnaggar/@Elnaggar_AI](https://twitter.com/Elnaggar_AI) | [LinkedIn](https://www.linkedin.com/in/prof-ahmed-elnaggar/)\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"jrc-acquis\"], \"language\": \"Deustch\", \"tags\": [\"classification Deustch model\"], \"widget\": [{\"text\": \"BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1.\"}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45535,"string":"45,535"}}},{"rowIdx":43759,"cells":{"id":{"kind":"string","value":"knguyennguyen/mpnet_20k"},"author":{"kind":"string","value":"knguyennguyen"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","mpnet","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:20108","loss:MultipleNegativesRankingLoss","arxiv:1908.10084","arxiv:1705.00652","base_model:sentence-transformers/all-mpnet-base-v2","base_model:finetune:sentence-transformers/all-mpnet-base-v2","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"mpnet\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:20108\",\n \"loss:MultipleNegativesRankingLoss\",\n \"arxiv:1908.10084\",\n \"arxiv:1705.00652\",\n \"base_model:sentence-transformers/all-mpnet-base-v2\",\n \"base_model:finetune:sentence-transformers/all-mpnet-base-v2\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-01-17T21:32:53Z","string":"2025-01-17T21:32:53Z"},"last_modified":{"kind":"string","value":"2025-01-17T21:33:06+00:00"},"downloads":{"kind":"number","value":6,"string":"6"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: sentence-transformers/all-mpnet-base-v2\nlibrary_name: sentence-transformers\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:20108\n- loss:MultipleNegativesRankingLoss\nwidget:\n- source_sentence: I'm in search of a durable cover that can safeguard my cycling\n device while still allowing easy access to its features. It should be lightweight\n and available in different color options.\n sentences:\n - 'Title: Portable Radio AM FM KAVNLON-001 Descripion: [''1'']'\n - 'Title: GE WB27T11078 Keypanel Supt Asm Wh Descripion: [''This is an O.E.M. Authorized\n part . This is an authorized aftermarket product. Fits with various WB27T11078\n brand models.'']'\n - 'Title: Garmin 010-12791-00 Silicone Case, Edge 530, Black Descripion: [''Protect\n your Edge® 830 with this form-fitting, removable silicone case. Available in other\n colors (sold separately).'']'\n- source_sentence: I'm looking for a stylish handbag with a textured finish that offers\n a central storage area. It should have a chic design suitable for everyday use.\n sentences:\n - \"Title: LINERY Ski Gloves Men Waterproof Snowboard Gloves for Cold Weather Skiing\\\n \\ & Snowboarding Gloves Fits Both Men & Women Descripion: ['Our' 'LINERY'\\n 'ski\\\n \\ glove will be the only glove you need for a whole season at the ski resort.\\\n \\ Thanks to water-repellent shells, fleece liner and durable PU palms, these insulated\\\n \\ gloves keep your hands dry and cozy while you shred all season long.'\\n '4 Size\\\n \\ for You to Choose( Glove Size ):'\\n 'LADY SMALL: 3.25-3.45 inches across palm\\\n \\ of gloves, 2.75-2.95 inch length of middle finger, 10.25 inches total length.'\\n\\\n \\ 'LADY MEDIUM: 3.45-3.65 inches across palm of gloves, 2.95-3.15 inch length\\\n \\ of middle finger, 10.63 inches total length.'\\n 'MEN MEDIUM: 3.55-3.75 inches\\\n \\ across palm of gloves, 3.15-3.35 inch length of middle finger, 10.95 inches\\\n \\ total length.'\\n 'MEN LARGE: 3.85-4.05 inches across palm of gloves, 3.35-3.54\\\n \\ inch length of middle finger, 11.25 inches total length.'\\n 'Note: It’s suggested\\\n \\ to test the palm circumference and middle finger length, take the size chart\\\n \\ as reference to choose the right size. Manual measured, 0.4-0.8 inches deviation\\\n \\ allowed.']\"\n - 'Title: GUESS Colette Girlfriend Satchel Descripion: [''Colette Girlfriend Satchel\n in pebble pu with center zip compartment'']'\n - 'Title: French Toast Girls'' Stretch Skinny Pull-on Capri Pant Descripion: [\"Easy\n to wear and even easier to love! French Toast''s classroom capri features a simple\n navy and white elastic stripe on the waistband, functional front and back pockets\n and pull-on styling, making it even to easier to get her dressed and out the door.\"]'\n- source_sentence: I'm looking for a versatile bag that’s perfect for daily use, with\n ample room for my essentials. It should be made from durable materials, have a\n secure closure, and offer comfortable carrying options like adjustable straps.\n sentences:\n - 'Title: COVERGIRL Katy Kat Gift Set with Matte Lipsticks in Crimson Cat, Kitty\n Purry, Magenta Minx and Maroon Meow (packaging may vary) Descripion: [''The Purrrfect\n holiday gift. Deck yourself or someone you love with the new Katy Kat Collection.\n This gift was created by Katy Perry with 4 shades of Katy Kat Matte Lipstick in\n Crimson Cat, Kitty Purry, Magenta Minx, and Maroon Meow. You get Katy Kat Matte\n Lipstick that keeps lips pillowy soft. So enjoy! And have a beautiful holiday\n season!'']'\n - 'Title: Bewahly Vertical Laptop Stand[Adjustable Size],Aluminum Adjustable Laptop\n Holder, Saving Space, Suitable for MacBook Pro/Air, iPad, Samsung, Huawei, Surface,\n Dell, HP, Lenovo and Others (Gray) Descripion: [''Colour:grey Adjustable width:\n Adjustable for laptop width from 0.6 inch to 2.6 inch. Supplied Allen key to freely\n adjust to the most suitable width, suitable for all types of laptops. Aluminium\n alloy. The vertical laptop stand is made of superior aluminum alloy that resembles\n the material of the MacBook. Non-slip design: The excellent notebook stand provides\n many silicone pads such as in the U-type groove and under the notebook dock to\n prevent your device from being scratched and stay on the desk. Wide compatibility:\n Compatible with iPad/iPhone, MacBook Pro/Air, Microsoft Surface, Laptops, Notebooks,\n Dell Samsung Lenovo Acer Sony Asus XPS HP ENVY Series and even books. Space-saving:\n Keep your laptop vertical and make your desktop well organized to save more space\n quickly.'']'\n - 'Title: Peak Design Everyday Messenger 13L Descripion: [''The Peak Design(tm)\n 13 L Everyday Messenger V2 bag offers adequate space to keep your essentials handy.\n It will be your go-to everyday carry. Made of recycled 400D nylon, polyester,\n EVA, and Hypalon. MagLatch closure. Removable and adjustable padded shoulder straps.\n Crossbody strap and two hidden external straps.'']'\n- source_sentence: I'm looking for a special keychain gift that celebrates a love\n for dogs and is perfect for a grandmother. It should be made from quality materials\n and have a meaningful design that conveys affection.\n sentences:\n - 'Title: Bosch XR7LDC Super Spark Plug, (Pack of 1) Descripion: [''Item Name:'',\n ''Spark Plug 1pcs'', ''Part Brand:'', ''BOSCH'', ''OEM Numbers:'', ''12 12 1 465\n 104 / XX XX 1 464 104'', ''Electrode Gap [mm]:'', ''0,8'', ''Tightening Torque\n [Nm]:'', ''23'', ''Tightening angle [degrees]:'', ''90'', ''Outer Thread [mm]:'',\n ''12'', ''Thread Pitch [mm]:'', ''1,25'', ''Thread Length [mm]:'', ''19'', ''Spanner\n Size:'', ''17,5'', ''Spark Position [mm]:'', ''3'', ''Number of Poles:'', ''2'']'\n - 'Title: Aloe Vera Microwave Wax - Home Hair Removal Hard Wax - Brazilian Microwaveable\n Wax - Hot Stripless Wax for Body Waxing (Face, Eyebrows, Upper lip, Underarms,\n Legs and Arms) - Self Waxing Pot 200g Descripion: []'\n - \"Title: Kivosliviz Dog Grandma Gifts Keychain Dog Paw Print Jewelry Best Dog Grandma\\\n \\ Ever Dog Lover Keychain Dog Grandmother Keychains Descripion: ['❤PRODUCT NAME❤'\\n\\\n \\ 'Kivosliviz Dog Grandma Gifts Dog Paw Print Jewelry Best Dog Lover Keychain\\\n \\ This jewelry is made of high quality 316L stainless steel, it is processed by\\\n \\ precise polishing and made of environmental-friendly material.'\\n '❤KIVOSLIVIZ\\\n \\ JEWELRY❤'\\n 'Love can Change the World.You are more beautiful and confident\\\n \\ than you imagine.'\\n '❤Maintenance Instruction❤'\\n 'a. Keep it dry and avoid\\\n \\ wearing it when bathing, doing housework and swimming. b. Perfume is also corrosive\\\n \\ to jewelry. Wear your perfume before putting on the piece. c. Grease and sweat\\\n \\ on hands will caused corrosion. Be careful. d. Rub your Jewelry with baby wipes,they\\\n \\ perform miracles on jewelry without damaging the surface, then rub with a clean\\\n \\ dry soft cotton cloth to bring back the shine.']\"\n- source_sentence: I'm looking for a casual short-sleeve top with a fun and stylish\n design for women. It should have a round neck and a playful message, perfect for\n everyday wear. I'd prefer something that feels comfortable and has a unique print.\n sentences:\n - \"Title: 2 Pack USB-C Female to for Garmin Watch Charger Connector Male Adapter,\\\n \\ Type C to Charging Adapter for Garmin Instinct 2 Solar/Fenix 5/6/7X/7/6X/Venu\\\n \\ 2 Plus/EPIX/Forerunner 955 255 Descripion: ['Notice:'\\n '-- Charing Connector\\\n \\ Adapter only, Charging Cable not included.'\\n \\\"-- This adapter can only be\\\n \\ used for USB-A at one end and USB-C at the other end. BUT it can't be used for\\\n \\ USB-C at both ends !!! It can charge and transmit data, Please pay attention\\\n \\ that!\\\"\\n 'From Abanen, Smart Watch Accessories Brand'\\n '-- An ideal solution\\\n \\ to charge and sync Garmin watchs by a USB-C cable, saving the trouble of carrying\\\n \\ extra cable around.'\\n 'Compatible with Garmin Smartwatch:'\\n '-- Fenix 7X Solar\\\n \\ / Fenix 7X Sapphire Solar'\\n '-- Fenix 7S Solar / Fenix 7S Sapphire Solar'\\n\\\n \\ '-- Fenix 7 Solar / Fenix 7 Sapphire Solar' '-- EPIX (Gen 2)'\\n '-- Tactix 7\\\n \\ Pro' '-- Instinct 2 / Instinct 2S'\\n '-- Fenix 6X Pro / Fenix 6X Sapphire' '--\\\n \\ Fenix 5X /Fenix 5X Plus'\\n '-- Tactix Delta/Tactix Charlie/Tactix Bravo' '--\\\n \\ Quatix 3'\\n '-- D2 Charlie/D2 Delta PX' '-- Fenix 5/Fenix 5 Plus,'\\n '-- Fenix\\\n \\ 6 Pro/Sapphire' '-- Forerunner 935/945,'\\n '-- Instinct /Tactical Solar' '--\\\n \\ Tactix Charlie /Delta,'\\n '-- Quatix 5 / Quatix 6,' '-- Fenix 5S/Fenix 5S Plus,'\\n\\\n \\ '-- Fenix 6S Pro/Sapphire,' '-- Forerunner 245/645,'\\n '-- Approach S10 / S40\\\n \\ /S60 ,' '-- Vivoactive 3 / 3Music'\\n '-- Vivoactive 4 / 4S,']\"\n - \"Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with\\\n \\ Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: ['Women Mom\\\n \\ Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round\\\n \\ Neck Short Sleeve Tees Cute Casual Tops'\\n 'Size Chart:(1inch=2.54cm)'\\n 'Size\\\n \\ S: Length 66cm/25.98\\\" Bust 94cm/37.01\\\" Size M: Length 67cm/26.38\\\" Bust 98cm/38.58\\\"\\\n \\ Size L: Length 68cm/26.77\\\" Bust 102cm/40.16\\\" Size XL: Length 69cm/27.17\\\"\\\n \\ Bust 110cm/43.31\\\" Please allow slight (±3cm)manual measurement deviation for\\\n \\ the data The real color of the item may be slightly different from the pictures\\\n \\ shown on website,caused by many factors such as brightness of your monitor and\\\n \\ light brightness'\\n 'Two Ways About Delivery:' 'FBM:'\\n 'Ship from China,88%\\\n \\ customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and\\\n \\ others will receive within 3-4 weeks'\\n 'FBA:' 'Customers will receive within\\\n \\ 1-3 days' 'Service Guarantee:'\\n 'We endeavors 100% customer satisfaction service\\\n \\ and experience If you receive damaged or wrong items Please contact us with\\\n \\ attached pictures about the problem We will provide you a satisfactory solution\\\n \\ within 24 hours You may find that someone sells at a lower price than us But\\\n \\ they cannot guarantee the same quality and service as we do If you are satisfied\\\n \\ with our product or service Hope you can leave your positive feedback']\"\n - 'Title: Batman: Gotham By Gaslight Descripion: [\"It''s Batman vs. Jack the Ripper\n in an Elseworld''s adventure that imagines the Dark Knight over a hundred years\n ago in a turn-of-the-century Gotham.\"]'\n---\n\n# SentenceTransformer based on sentence-transformers/all-mpnet-base-v2\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) \n- **Maximum Sequence Length:** 128 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: MPNetModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"knguyennguyen/mpnet_20k\")\n# Run inference\nsentences = [\n \"I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print.\",\n 'Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: [\\'Women Mom Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round Neck Short Sleeve Tees Cute Casual Tops\\'\\n \\'Size Chart:(1inch=2.54cm)\\'\\n \\'Size S: Length 66cm/25.98\" Bust 94cm/37.01\" Size M: Length 67cm/26.38\" Bust 98cm/38.58\" Size L: Length 68cm/26.77\" Bust 102cm/40.16\" Size XL: Length 69cm/27.17\" Bust 110cm/43.31\" Please allow slight (±3cm)manual measurement deviation for the data The real color of the item may be slightly different from the pictures shown on website,caused by many factors such as brightness of your monitor and light brightness\\'\\n \\'Two Ways About Delivery:\\' \\'FBM:\\'\\n \\'Ship from China,88% customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and others will receive within 3-4 weeks\\'\\n \\'FBA:\\' \\'Customers will receive within 1-3 days\\' \\'Service Guarantee:\\'\\n \\'We endeavors 100% customer satisfaction service and experience If you receive damaged or wrong items Please contact us with attached pictures about the problem We will provide you a satisfactory solution within 24 hours You may find that someone sells at a lower price than us But they cannot guarantee the same quality and service as we do If you are satisfied with our product or service Hope you can leave your positive feedback\\']',\n 'Title: Batman: Gotham By Gaslight Descripion: [\"It\\'s Batman vs. Jack the Ripper in an Elseworld\\'s adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham.\"]',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 20,108 training samples\n* Columns: sentence_0 and sentence_1\n* Approximate statistics based on the first 1000 samples:\n | | sentence_0 | sentence_1 |\n |:--------|:-----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 11 tokens
  • mean: 36.23 tokens
  • max: 73 tokens
|
  • min: 13 tokens
  • mean: 88.42 tokens
  • max: 128 tokens
|\n* Samples:\n | sentence_0 | sentence_1 |\n |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n | I'm looking for a stylish pair of eyewear with a luxurious touch. They should have a unique color combination and come with a special case and cleaning accessory. | Title: Sunglasses Gucci GG 0528 S- 008 GOLD/BROWN CRYSTAL, 63-14-150 Descripion: ['Authentic Gucci GG0528 S 008 Gold Crystal/Brown Sunglasses. Comes with a matching satin flannel pouch and ivory microfiber cloth and Authenticity card.'] |\n | I'm looking for comfortable and stylish capri pants for girls that are easy to wear and have a stretchy fit. | Title: French Toast Girls' Stretch Skinny Pull-on Capri Pant Descripion: [\"Easy to wear and even easier to love! French Toast's classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door.\"] |\n | I'm in need of a replacement screen for a laptop that offers clear visuals and fits a specific model. It should provide high-definition quality for general use. | Title: BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel Descripion: ['BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel'] |\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `per_device_train_batch_size`: 128\n- `per_device_eval_batch_size`: 128\n- `num_train_epochs`: 5\n- `multi_dataset_batch_sampler`: round_robin\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: no\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 128\n- `per_device_eval_batch_size`: 128\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 5e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1\n- `num_train_epochs`: 5\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.0\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `use_liger_kernel`: False\n- `eval_use_gather_object`: False\n- `batch_sampler`: batch_sampler\n- `multi_dataset_batch_sampler`: round_robin\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss |\n|:------:|:----:|:-------------:|\n| 3.1646 | 500 | 0.493 |\n\n\n### Framework Versions\n- Python: 3.11.11\n- Sentence Transformers: 3.1.1\n- Transformers: 4.45.2\n- PyTorch: 2.5.1+cu121\n- Accelerate: 1.2.1\n- Datasets: 3.2.0\n- Tokenizers: 0.20.3\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### MultipleNegativesRankingLoss\n```bibtex\n@misc{henderson2017efficient,\n title={Efficient Natural Language Response Suggestion for Smart Reply},\n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\n year={2017},\n eprint={1705.00652},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SentenceTransformer based on sentence-transformers/all-mpnet-base-v2\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) \n- **Maximum Sequence Length:** 128 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: MPNetModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"knguyennguyen/mpnet_20k\")\n# Run inference\nsentences = [\n \"I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print.\",\n 'Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: [\\'Women Mom Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round Neck Short Sleeve Tees Cute Casual Tops\\'\\n \\'Size Chart:(1inch=2.54cm)\\'\\n \\'Size S: Length 66cm/25.98\" Bust 94cm/37.01\" Size M: Length 67cm/26.38\" Bust 98cm/38.58\" Size L: Length 68cm/26.77\" Bust 102cm/40.16\" Size XL: Length 69cm/27.17\" Bust 110cm/43.31\" Please allow slight (±3cm)manual measurement deviation for the data The real color of the item may be slightly different from the pictures shown on website,caused by many factors such as brightness of your monitor and light brightness\\'\\n \\'Two Ways About Delivery:\\' \\'FBM:\\'\\n \\'Ship from China,88% customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and others will receive within 3-4 weeks\\'\\n \\'FBA:\\' \\'Customers will receive within 1-3 days\\' \\'Service Guarantee:\\'\\n \\'We endeavors 100% customer satisfaction service and experience If you receive damaged or wrong items Please contact us with attached pictures about the problem We will provide you a satisfactory solution within 24 hours You may find that someone sells at a lower price than us But they cannot guarantee the same quality and service as we do If you are satisfied with our product or service Hope you can leave your positive feedback\\']',\n 'Title: Batman: Gotham By Gaslight Descripion: [\"It\\'s Batman vs. Jack the Ripper in an Elseworld\\'s adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham.\"]',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 20,108 training samples\n* Columns: sentence_0 and sentence_1\n* Approximate statistics based on the first 1000 samples:\n | | sentence_0 | sentence_1 |\n |:--------|:-----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 11 tokens
  • mean: 36.23 tokens
  • max: 73 tokens
|
  • min: 13 tokens
  • mean: 88.42 tokens
  • max: 128 tokens
|\n* Samples:\n | sentence_0 | sentence_1 |\n |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n | I'm looking for a stylish pair of eyewear with a luxurious touch. They should have a unique color combination and come with a special case and cleaning accessory. | Title: Sunglasses Gucci GG 0528 S- 008 GOLD/BROWN CRYSTAL, 63-14-150 Descripion: ['Authentic Gucci GG0528 S 008 Gold Crystal/Brown Sunglasses. Comes with a matching satin flannel pouch and ivory microfiber cloth and Authenticity card.'] |\n | I'm looking for comfortable and stylish capri pants for girls that are easy to wear and have a stretchy fit. | Title: French Toast Girls' Stretch Skinny Pull-on Capri Pant Descripion: [\"Easy to wear and even easier to love! French Toast's classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door.\"] |\n | I'm in need of a replacement screen for a laptop that offers clear visuals and fits a specific model. It should provide high-definition quality for general use. | Title: BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel Descripion: ['BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel'] |\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `per_device_train_batch_size`: 128\n- `per_device_eval_batch_size`: 128\n- `num_train_epochs`: 5\n- `multi_dataset_batch_sampler`: round_robin\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: no\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 128\n- `per_device_eval_batch_size`: 128\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 5e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1\n- `num_train_epochs`: 5\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.0\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `use_liger_kernel`: False\n- `eval_use_gather_object`: False\n- `batch_sampler`: batch_sampler\n- `multi_dataset_batch_sampler`: round_robin\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss |\n|:------:|:----:|:-------------:|\n| 3.1646 | 500 | 0.493 |\n\n\n### Framework Versions\n- Python: 3.11.11\n- Sentence Transformers: 3.1.1\n- Transformers: 4.45.2\n- PyTorch: 2.5.1+cu121\n- Accelerate: 1.2.1\n- Datasets: 3.2.0\n- Tokenizers: 0.20.3\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### MultipleNegativesRankingLoss\n```bibtex\n@misc{henderson2017efficient,\n title={Efficient Natural Language Response Suggestion for Smart Reply},\n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\n year={2017},\n eprint={1705.00652},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"sentence-transformers/all-mpnet-base-v2\", \"library_name\": \"sentence-transformers\", \"pipeline_tag\": \"sentence-similarity\", \"tags\": [\"sentence-transformers\", \"sentence-similarity\", \"feature-extraction\", \"generated_from_trainer\", \"dataset_size:20108\", \"loss:MultipleNegativesRankingLoss\"], \"widget\": [{\"source_sentence\": \"I'm in search of a durable cover that can safeguard my cycling device while still allowing easy access to its features. It should be lightweight and available in different color options.\", \"sentences\": [\"Title: Portable Radio AM FM KAVNLON-001 Descripion: ['1']\", \"Title: GE WB27T11078 Keypanel Supt Asm Wh Descripion: ['This is an O.E.M. Authorized part . This is an authorized aftermarket product. Fits with various WB27T11078 brand models.']\", \"Title: Garmin 010-12791-00 Silicone Case, Edge 530, Black Descripion: ['Protect your Edge® 830 with this form-fitting, removable silicone case. Available in other colors (sold separately).']\"]}, {\"source_sentence\": \"I'm looking for a stylish handbag with a textured finish that offers a central storage area. It should have a chic design suitable for everyday use.\", \"sentences\": [\"Title: LINERY Ski Gloves Men Waterproof Snowboard Gloves for Cold Weather Skiing & Snowboarding Gloves Fits Both Men & Women Descripion: ['Our' 'LINERY'\\n 'ski glove will be the only glove you need for a whole season at the ski resort. Thanks to water-repellent shells, fleece liner and durable PU palms, these insulated gloves keep your hands dry and cozy while you shred all season long.'\\n '4 Size for You to Choose( Glove Size ):'\\n 'LADY SMALL: 3.25-3.45 inches across palm of gloves, 2.75-2.95 inch length of middle finger, 10.25 inches total length.'\\n 'LADY MEDIUM: 3.45-3.65 inches across palm of gloves, 2.95-3.15 inch length of middle finger, 10.63 inches total length.'\\n 'MEN MEDIUM: 3.55-3.75 inches across palm of gloves, 3.15-3.35 inch length of middle finger, 10.95 inches total length.'\\n 'MEN LARGE: 3.85-4.05 inches across palm of gloves, 3.35-3.54 inch length of middle finger, 11.25 inches total length.'\\n 'Note: It’s suggested to test the palm circumference and middle finger length, take the size chart as reference to choose the right size. Manual measured, 0.4-0.8 inches deviation allowed.']\", \"Title: GUESS Colette Girlfriend Satchel Descripion: ['Colette Girlfriend Satchel in pebble pu with center zip compartment']\", \"Title: French Toast Girls' Stretch Skinny Pull-on Capri Pant Descripion: [\\\"Easy to wear and even easier to love! French Toast's classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door.\\\"]\"]}, {\"source_sentence\": \"I'm looking for a versatile bag that’s perfect for daily use, with ample room for my essentials. It should be made from durable materials, have a secure closure, and offer comfortable carrying options like adjustable straps.\", \"sentences\": [\"Title: COVERGIRL Katy Kat Gift Set with Matte Lipsticks in Crimson Cat, Kitty Purry, Magenta Minx and Maroon Meow (packaging may vary) Descripion: ['The Purrrfect holiday gift. Deck yourself or someone you love with the new Katy Kat Collection. This gift was created by Katy Perry with 4 shades of Katy Kat Matte Lipstick in Crimson Cat, Kitty Purry, Magenta Minx, and Maroon Meow. You get Katy Kat Matte Lipstick that keeps lips pillowy soft. So enjoy! And have a beautiful holiday season!']\", \"Title: Bewahly Vertical Laptop Stand[Adjustable Size],Aluminum Adjustable Laptop Holder, Saving Space, Suitable for MacBook Pro/Air, iPad, Samsung, Huawei, Surface, Dell, HP, Lenovo and Others (Gray) Descripion: ['Colour:grey Adjustable width: Adjustable for laptop width from 0.6 inch to 2.6 inch. Supplied Allen key to freely adjust to the most suitable width, suitable for all types of laptops. Aluminium alloy. The vertical laptop stand is made of superior aluminum alloy that resembles the material of the MacBook. Non-slip design: The excellent notebook stand provides many silicone pads such as in the U-type groove and under the notebook dock to prevent your device from being scratched and stay on the desk. Wide compatibility: Compatible with iPad/iPhone, MacBook Pro/Air, Microsoft Surface, Laptops, Notebooks, Dell Samsung Lenovo Acer Sony Asus XPS HP ENVY Series and even books. Space-saving: Keep your laptop vertical and make your desktop well organized to save more space quickly.']\", \"Title: Peak Design Everyday Messenger 13L Descripion: ['The Peak Design(tm) 13 L Everyday Messenger V2 bag offers adequate space to keep your essentials handy. It will be your go-to everyday carry. Made of recycled 400D nylon, polyester, EVA, and Hypalon. MagLatch closure. Removable and adjustable padded shoulder straps. Crossbody strap and two hidden external straps.']\"]}, {\"source_sentence\": \"I'm looking for a special keychain gift that celebrates a love for dogs and is perfect for a grandmother. It should be made from quality materials and have a meaningful design that conveys affection.\", \"sentences\": [\"Title: Bosch XR7LDC Super Spark Plug, (Pack of 1) Descripion: ['Item Name:', 'Spark Plug 1pcs', 'Part Brand:', 'BOSCH', 'OEM Numbers:', '12 12 1 465 104 / XX XX 1 464 104', 'Electrode Gap [mm]:', '0,8', 'Tightening Torque [Nm]:', '23', 'Tightening angle [degrees]:', '90', 'Outer Thread [mm]:', '12', 'Thread Pitch [mm]:', '1,25', 'Thread Length [mm]:', '19', 'Spanner Size:', '17,5', 'Spark Position [mm]:', '3', 'Number of Poles:', '2']\", \"Title: Aloe Vera Microwave Wax - Home Hair Removal Hard Wax - Brazilian Microwaveable Wax - Hot Stripless Wax for Body Waxing (Face, Eyebrows, Upper lip, Underarms, Legs and Arms) - Self Waxing Pot 200g Descripion: []\", \"Title: Kivosliviz Dog Grandma Gifts Keychain Dog Paw Print Jewelry Best Dog Grandma Ever Dog Lover Keychain Dog Grandmother Keychains Descripion: ['❤PRODUCT NAME❤'\\n 'Kivosliviz Dog Grandma Gifts Dog Paw Print Jewelry Best Dog Lover Keychain This jewelry is made of high quality 316L stainless steel, it is processed by precise polishing and made of environmental-friendly material.'\\n '❤KIVOSLIVIZ JEWELRY❤'\\n 'Love can Change the World.You are more beautiful and confident than you imagine.'\\n '❤Maintenance Instruction❤'\\n 'a. Keep it dry and avoid wearing it when bathing, doing housework and swimming. b. Perfume is also corrosive to jewelry. Wear your perfume before putting on the piece. c. Grease and sweat on hands will caused corrosion. Be careful. d. Rub your Jewelry with baby wipes,they perform miracles on jewelry without damaging the surface, then rub with a clean dry soft cotton cloth to bring back the shine.']\"]}, {\"source_sentence\": \"I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print.\", \"sentences\": [\"Title: 2 Pack USB-C Female to for Garmin Watch Charger Connector Male Adapter, Type C to Charging Adapter for Garmin Instinct 2 Solar/Fenix 5/6/7X/7/6X/Venu 2 Plus/EPIX/Forerunner 955 255 Descripion: ['Notice:'\\n '-- Charing Connector Adapter only, Charging Cable not included.'\\n \\\"-- This adapter can only be used for USB-A at one end and USB-C at the other end. BUT it can't be used for USB-C at both ends !!! It can charge and transmit data, Please pay attention that!\\\"\\n 'From Abanen, Smart Watch Accessories Brand'\\n '-- An ideal solution to charge and sync Garmin watchs by a USB-C cable, saving the trouble of carrying extra cable around.'\\n 'Compatible with Garmin Smartwatch:'\\n '-- Fenix 7X Solar / Fenix 7X Sapphire Solar'\\n '-- Fenix 7S Solar / Fenix 7S Sapphire Solar'\\n '-- Fenix 7 Solar / Fenix 7 Sapphire Solar' '-- EPIX (Gen 2)'\\n '-- Tactix 7 Pro' '-- Instinct 2 / Instinct 2S'\\n '-- Fenix 6X Pro / Fenix 6X Sapphire' '-- Fenix 5X /Fenix 5X Plus'\\n '-- Tactix Delta/Tactix Charlie/Tactix Bravo' '-- Quatix 3'\\n '-- D2 Charlie/D2 Delta PX' '-- Fenix 5/Fenix 5 Plus,'\\n '-- Fenix 6 Pro/Sapphire' '-- Forerunner 935/945,'\\n '-- Instinct /Tactical Solar' '-- Tactix Charlie /Delta,'\\n '-- Quatix 5 / Quatix 6,' '-- Fenix 5S/Fenix 5S Plus,'\\n '-- Fenix 6S Pro/Sapphire,' '-- Forerunner 245/645,'\\n '-- Approach S10 / S40 /S60 ,' '-- Vivoactive 3 / 3Music'\\n '-- Vivoactive 4 / 4S,']\", \"Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: ['Women Mom Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round Neck Short Sleeve Tees Cute Casual Tops'\\n 'Size Chart:(1inch=2.54cm)'\\n 'Size S: Length 66cm/25.98\\\" Bust 94cm/37.01\\\" Size M: Length 67cm/26.38\\\" Bust 98cm/38.58\\\" Size L: Length 68cm/26.77\\\" Bust 102cm/40.16\\\" Size XL: Length 69cm/27.17\\\" Bust 110cm/43.31\\\" Please allow slight (±3cm)manual measurement deviation for the data The real color of the item may be slightly different from the pictures shown on website,caused by many factors such as brightness of your monitor and light brightness'\\n 'Two Ways About Delivery:' 'FBM:'\\n 'Ship from China,88% customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and others will receive within 3-4 weeks'\\n 'FBA:' 'Customers will receive within 1-3 days' 'Service Guarantee:'\\n 'We endeavors 100% customer satisfaction service and experience If you receive damaged or wrong items Please contact us with attached pictures about the problem We will provide you a satisfactory solution within 24 hours You may find that someone sells at a lower price than us But they cannot guarantee the same quality and service as we do If you are satisfied with our product or service Hope you can leave your positive feedback']\", \"Title: Batman: Gotham By Gaslight Descripion: [\\\"It's Batman vs. Jack the Ripper in an Elseworld's adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham.\\\"]\"]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45536,"string":"45,536"}}},{"rowIdx":43760,"cells":{"id":{"kind":"string","value":"LoneStriker/airoboros-l2-70b-3.1.2-6.0bpw-h6-exl2"},"author":{"kind":"string","value":"LoneStriker"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","conversational","dataset:jondurbin/airoboros-3.1","license:llama2","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"conversational\",\n \"dataset:jondurbin/airoboros-3.1\",\n \"license:llama2\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-21T17:55:08Z","string":"2023-10-21T17:55:08Z"},"last_modified":{"kind":"string","value":"2023-10-22T14:45:20+00:00"},"downloads":{"kind":"number","value":3,"string":"3"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- jondurbin/airoboros-3.1\nlicense: llama2\n---\n\n### Overview\n\nAnother experimental model, using mostly sythetic data generated by [airoboros](https://github.com/jondurbin/airoboros)\n\n#### IMPORTANT NOTE - llama-2 chat format!!!\n\nThis models uses llama-2 chat format, rather than vicuna style user/assistant!\n\nThis is a breaking change, although most inference systems support llama-2 chat templating.\n\nSee prompt format below for all of the details.\n\n#### Highlights\n\nA model built on [airoboros-3.1 dataset](https://hf.co/datasets/jondurbin/airoboros-3.1), which is a continuation of [airoboros-3.0 dataset](https://hf.co/datasets/jondurbin/airoboros-3,0) with the following extras:\n\n* More MathJSON, now ~17k items - math questions, prefixed with __\"Create a MathJSON solution to the following:\"__, which then outputs a JSON between __``__ and __``__ tags, which can be parsed and passed to a deterministic library to perform calculations.\n* Log information extraction (e.g. getting the list of paths/IPs/etc. from apache logs)\n* Anonymization, e.g. removing names, IP addresses, and/or dates from text.\n* Chat introspection - multi-turn chats that have random questions injected asking about the character's descriptions, the setting of the chat, or things previously said, to make sure the model uses the system prompt and or chat history.\n* Multi-step instructions with acknowledgement (see below)\n* De-censorship data (not published)\n\nThis is a fairly general purpose model, but focuses heavily on instruction following, rather than casual chat/roleplay.\n\nHuge thank you to the folks over at [a16z](https://a16z.com/) for sponsoring the costs associated with building models and associated tools!\n\n### Prompt format\n\nThe prompt format is llama-2 chat.\n\n```\n[INST] <>\nYou are a helpful, unbiased, uncensored assistant.\n<>\n\n{prompt} [/INST]\n```\n\nFor multi-turn, the prompt format is as follows:\n```\n[INST] <>\nYou are a helful, unbiased, uncensored assistant.\n<>\n\n{prompt 0} [/INST] {response 0} [INST] {prompt 1} [/INST] {response 1} ...[INST] {prompt N} [/INST]\n```\n\nThe prompt template is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.:\n\n```\nimport transformers\ntokenizer = transformers.AutoTokenizer.from_pretrained('jondurbin/airoboros-l2-70b-3.1')\nchat = [\n {\"role\": \"system\", \"content\": \"You are Bob, a friendly AI assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello, how are you?\"},\n {\"role\": \"assistant\", \"content\": \"I'm doing great. How can I help you today?\"},\n {\"role\": \"user\", \"content\": \"I'd like to show off how chat templating works!\"},\n]\nprint(tokenizer.apply_chat_template(chat, tokenize=False))\n```\n\n### Helpful usage tips\n\n\n#### MathJSON\n\nAsk mathematical questions, prefixed with \"Create a MathJSON solution to the following:\", and you should get output with a formula that can be executed by https://cortexjs.io/compute-engine/\n\nI also made a (really quite hacky, incomplete, semi-functional) python version that works for most expressions, see [mathjson.py](https://github.com/jondurbin/airoboros/blob/mathjson/airoboros/mathjson.py)\n\n__Use a very low temperature!__\n\nExample:\n\n```\n[INST] <>\nYou are an assistant with vast knowledge in all things math.\n<>\n\nCreate a MathJSON solution to the following: Calculate the area of a circle with radius of 17.2456cm. Include your reasoning. [/INST]\n```\n\nOutput:\n```\nThe area (A) of a circle is given by the formula A = πr², where r is the radius of the circle. Since the radius is given in centimeters, we need to convert it to meters first. We know that 1 meter equals 100 centimeters. Therefore, we can represent the radius as: r = 17.2456 cm * 100/1.\n\nOnce we have the radius in meters, we can calculate the area of the circle using the formula A = πr².\n\nSolution as MathJSON:\n\n[\n \"Multiply\",\n \"Pi\",\n [\n \"Power\",\n 17.2456,\n 2\n ]\n]\n\n```\n\nYou can then validate the JSON between `` and ``, then pass the parsed JSON to compute-engine JS or the `evaluate` function in mathjson.py to calculate the response.\n\n#### Context obedient question answering\n\nBy obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations.\n\nThe format for a closed-context prompt is as follows:\n```\nBEGININPUT\nBEGINCONTEXT\n[key0: value0]\n[key1: value1]\n... other metdata ...\nENDCONTEXT\n[insert your text blocks here]\nENDINPUT\n[add as many other blocks, in the exact same format]\nBEGININSTRUCTION\n[insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.]\nENDINSTRUCTION\n```\n\nIt's also helpful to add \"Don't make up answers if you don't know.\" to your instruction block to make sure if the context is completely unrelated it doesn't make something up.\n\n*The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!*\n\nI know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it.\n- `BEGININPUT` - denotes a new input block\n- `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block\n- `ENDCONTEXT` - denotes the end of the metadata block for the current input\n- [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context.\n- `ENDINPUT` - denotes the end of the current input block\n- [repeat as many input blocks in this format as you want]\n- `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above.\n- [instruction(s)]\n- `ENDINSTRUCTION` - denotes the end of instruction set\n\nIt sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to.\n\n__Use a very low temperature!__\n\nHere's a trivial, but important example to prove the point:\n```\nBEGININPUT\nBEGINCONTEXT\ndate: 2021-01-01\nurl: https://web.site/123\nENDCONTEXT\nIn a shocking turn of events, blueberries are now green, but will be sticking with the same name.\nENDINPUT\nBEGININSTRUCTION\nWhat color are bluberries? Source?\nENDINSTRUCTION\n```\n\nAnd the response:\n```\nBlueberries are now green.\nSource:\ndate: 2021-01-01\nurl: https://web.site/123\n```\n\n#### Summarization\n\n500 samples have been included from [this dataset](https://huggingface.co/datasets/mattpscott/airoboros-summarization), using the same format as contextual question answering, for example:\n\n```\nBEGININPUT\n{text to summarize}\nENDINPUT\nBEGININSTRUCTION\nSummarize the input in around 130 words.\nENDINSTRUCTION\n```\n\n#### Getting longer responses\n\nYou can use a few techniques to get longer responses.\n\nDetailed prompts, with explicit instruction for word count:\n```\nPlease compose a narrative set in the heart of an ancient library, steeped in the scent of old parchment and ink. The protagonist should be a young scholar who is dedicated to studying the art of storytelling and its evolution throughout history. In her pursuit of knowledge, she stumbles upon a forgotten tome that seems to possess an unusual aura. This book has the ability to bring stories to life, literally manifesting characters and scenarios from within its pages into reality.\n\nThe main character must navigate through various epochs of storytelling - from oral traditions of tribal societies, through medieval minstrels' tales, to modern-day digital narratives - as they come alive around her. Each era presents its unique challenges and lessons about the power and impact of stories on human civilization.\n\nOne such character could be a sentient quill pen, who was once used by renowned authors of yesteryears and now holds their wisdom and experiences. It becomes her mentor, guiding her through this journey with witty remarks and insightful commentary.\n\nEnsure that your tale encapsulates the thrill of adventure, the beauty of learning, and the profound connection between humans and their stories. All characters involved should be non-human entities. Feel free to explore creative liberties but maintain the mentioned elements.\n\nYour response should be approximately 2300 words.\n```\n\nOr, a simpler example:\n```\nPlease create a long, detailed story about a dragon in an old growth forest who, for some reason, begins speaking the words of the source code of linux.\n```\n\nThere are a few examples of next chapter completion as well, e.g.:\n```\nWrite the next chapter of a historical fiction novel set in Paris during the 20th century.\n\nHere's a summary of the previous chapter:\nIn the vibrant city of Paris, amid the tumultuous changes of the 20th century, our protagonist Margot, an aspiring fashion designer, has just secured an apprenticeship at a prestigious couture house. She meets Lucien, a charming journalist who covers the fashion industry. Together they navigate the ever-changing world of fashion and society, uncovering secrets that reveal the intricate links between style, politics, and culture. As the chapter concludes, they decide to delve deeper into the hidden corners of the fashion world to unravel its mysteries.\n\nRequirements for the next chapter:\n\n1. Character Development of Margot and Lucien:\n- Margot's Evolution: Unfold more about Margot's past, her dreams of revolutionizing fashion, and her struggle to establish herself in a male-dominated industry. Illustrate her growing expertise, innovative ideas, and increasing dependence on Lucien.\n- Lucien's Complexity: Introduce uncertainties surrounding Lucien's background and real motives. Increase suspense by suggesting undisclosed information he possesses, while also highlighting his wit and perceptiveness.\n\n2. Exploration of Paris and the Couture House:\n- Paris: Elaborate their journey through the bustling streets of Paris, including encounters with iconic figures, social unrest, and relics from different eras of French history.\n- The Couture House: Expand on the grandeur of the couture house they work in, filled with artistic masterpieces, intense competition, and cryptic notes hinting at a scandalous past.\n\n3. Emergence of the Subplot: The Lost Collection:\n- Discovery: Have Margot and Lucien stumble upon a secret vault containing a lost collection designed before World War II, raising new questions about the previous owner and the influence of war on fashion.\n- Revelation: Capture their shock as they realize the designs were plagiarized, the potential repercussions, and the opportunities it presents for Margot's career.\n- Twist: End with a twist that suggests there are other stolen collections across Paris, setting up their new mission.\n\n\nYour response should be approximately 650 words.\n```\n\n#### Coding\n\nYou can ask for fairly complex coding instructions with multiple criteria, e.g.:\n\n```\nCreate a python application with the following requirements:\n- Asyncio FastAPI webserver\n- ping endpoint that returns the current date in JSON format\n- file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate\n```\n\nOr inline criteria:\n\n```\nWrite a multi-threaded TCP server in C that accepts a \"GET [key]\" input and \"SET [key] [value]\" input, and uses a binary tree to get and store the input values.\n```\n\nYou can also optionally add a single space and \"PLAINFORMAT\" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.:\n\n```\nWrite a websocket application in node.js. PLAINFORMAT\n```\n\n#### Agent/function calling\n\nThe dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML.\n\nExample prompt:\n```\nAs an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.\n\nInput: I want to know how many times 'Python' is mentioned in my text file.\n\nAvailable functions:\nfile_analytics:\n description: This tool performs various operations on a text file.\n params:\n action: The operation we want to perform on the data, such as \"count_occurrences\", \"find_line\", etc.\n filters:\n keyword: The word or phrase we want to search for.\n```\n\nResponse:\n```json\n{\n \"function\": \"file_analytics\",\n \"params\": {\n \"action\": \"count_occurrences\",\n \"filters\": {\n \"keyword\": \"Python\"\n }\n }\n}\n```\n\n#### Chain-of-thought\n\nYou can ask for several possible responses to a given problem, with a ranking and final answer selection.\n\nExample prompt:\n\n```\nA group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one.\n```\n\nExample response:\n```\nSolution 1:\nIn a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). \n\nThe formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows:\n\nn(n-1)/2 = 45\n\nSolving this equation gives us n=10.\n\nFinal answer: There were 10 players in the tournament.\n\nSolution 2:\nAnother way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. \n\nIf we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players.\n\nFinal answer: There were 10 players in the tournament.\n\nSolution 3:\nWe could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played.\n\nFinal answer: There were 10 players in the tournament.\n\nRanking of solutions from best to worst:\n1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer.\n2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer.\n3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer.\n\nBest and final answer: There were 10 players in the tournament.\n```\n\n#### reWOO style execution planning\n\nThe model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions!\n\nExample prompt:\n```\nPlease construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string\nthat could be the user's question, one or more prior evidence values, or a combination of both.\n\nHere are the tools available to be called:\nDuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer.\nTextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links.\nHyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links.\nKnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:?\n\nThe output should be in format:\nPlan: [first action to take based on input question]\n:evidence0: = [function to call with input parameter]\nPlan: [next action to take, based on result of :evidence0:]\n:evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. \"Given context :evidence0\"]\n...\nAnswer: [:evidence[n]: containing the final answer.]\n\nQuestion: Who won the Nobel Prize in Literature in 2020 and what is their most famous work?\n```\n\nResponse:\n```\nPlan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020.\n:evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner]\nPlan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate.\n:evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?]\nPlan: Conduct another web search to find the most famous work of the identified laureate.\n:evidence2: = DuckDuckGo[Most famous work of :evidence1:]\nPlan: Extract the relevant links from the DuckDuckGo search results for a more focused search.\n:evidence3: = HyperlinkExtractor[:evidence2:]\nPlan: Use the TextScraper tool to extract information from the relevant links.\n:evidence4: = TextScraper[:evidence3:]\nPlan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information.\n:evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?]\nAnswer: :evidence5:\n```\n\nFor this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening:\n\n```python\nimport re\nimport requests\n\ndef inject_context(input_text, **context):\n for ref in set(re.findall(r\"(:evidence[0-9]+:)\", input_text, re.I)):\n input_text = input_text.replace(ref, context.get(ref, \"\"))\n return input_text\n\ndef duckduckgo(input_text, **context):\n search_string = inject_context(input_text, **context)\n ... search via duck duck go using search_string\n ... return text content\n\ndef link_extractor(input_text, **context):\n input_text = inject_context(input_text, **context)\n return \"\\n\".join(list(set(re.findall(r\"(https?://[^\\s]+?\\.?)\", input_text, re.I))))\n\ndef scrape(input_text, **context):\n input_text = inject_context(input_text, **context)\n text = []\n for link in input_text.splitlines():\n text.append(requests.get(link).text)\n return \"\\n\".join(text)\n\ndef infer(input_text, **context)\n prompt = inject_context(input_text, **context)\n ... call model with prompt, return output\n\ndef parse_plan(plan):\n method_map = {\n \"DuckDuckGo\": duckduckgo,\n \"HyperlinkExtractor\": link_extractor,\n \"KnowledgeModel\": infer,\n \"TextScraper\": scrape,\n }\n context = {}\n for line in plan.strip().splitlines():\n if line.startswith(\"Plan:\"):\n print(line)\n continue\n parts = re.match(\"^(:evidence[0-9]+:)\\s*=\\s*([^\\[]+])(\\[.*\\])\\s$\", line, re.I)\n if not parts:\n if line.startswith(\"Answer: \"):\n return context.get(line.split(\" \")[-1].strip(), \"Answer couldn't be generated...\")\n raise RuntimeError(\"bad format: \" + line)\n context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context)\n```\n\n## Multi-step instructions with acknowledgement\n\nI don't know how useful this is, really, but I thought I'd add it just in case. Example:\n\n```\n{\n \"conversations\": [\n {\n \"value\": \"You are a helpful, unbiased, uncensored assistant.\",\n \"from\": \"system\"\n },\n {\n \"value\": \"I'm going to set some rules for our conversation. Please respond with \\\"OK\\\" to each rule.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"You can only respond using animal-related emojis.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"Precede all responses with \\\"ZOOZOO:\\\"\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"Include at least one bird emoji in your responses.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"Describe a trip to a safari park.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"ZOOZOO: \\ud83e\\udd81\\ud83d\\udc2f\\ud83e\\udd93\\ud83e\\udd92\\ud83d\\udc18\\ud83d\\udc0d\\ud83c\\udf33\\ud83c\\udf32\\u2600\\ufe0f\\ud83d\\ude90\\ud83e\\udd85\\ud83d\\udcf8\\ud83e\\udd29\",\n \"from\": \"gpt\"\n }\n ]\n}\n```\n\n### Contribute\n\nIf you're interested in new functionality, particularly a new \"instructor\" type to generate a specific type of training data,\ntake a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details.\n\nTo help me with the OpenAI/compute costs:\n\n- https://bmc.link/jondurbin\n- ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11\n- BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf\n\n### Licence and usage restrictions\n\nThe airoboros 3.1 models are built on top of multiple base models, each with their own license/restrictions.\n\nThe 30b model is built on the original llama, which has a strict non-commercial usage restriction.\n\nThe models with `-l2` in the name have a custom Meta license:\n- See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta.\n- See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta.\n\nThe models with `-m-` are mistral-7b (apache 2.0)\n\nThe fine-tuning data was mostly generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros)\n\nThe ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI\n\n- what does *compete* actually mean here?\n- these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place\n- if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works\n- the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place\n- other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2\n\nI am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly.\n\nYour best bet is probably to avoid using this commercially due to the OpenAI API usage.\n\nEither way, by using this model, you agree to completely indemnify me.\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n### Overview\n\nAnother experimental model, using mostly sythetic data generated by [airoboros](https://github.com/jondurbin/airoboros)\n\n#### IMPORTANT NOTE - llama-2 chat format!!!\n\nThis models uses llama-2 chat format, rather than vicuna style user/assistant!\n\nThis is a breaking change, although most inference systems support llama-2 chat templating.\n\nSee prompt format below for all of the details.\n\n#### Highlights\n\nA model built on [airoboros-3.1 dataset](https://hf.co/datasets/jondurbin/airoboros-3.1), which is a continuation of [airoboros-3.0 dataset](https://hf.co/datasets/jondurbin/airoboros-3,0) with the following extras:\n\n* More MathJSON, now ~17k items - math questions, prefixed with __\"Create a MathJSON solution to the following:\"__, which then outputs a JSON between __``__ and __``__ tags, which can be parsed and passed to a deterministic library to perform calculations.\n* Log information extraction (e.g. getting the list of paths/IPs/etc. from apache logs)\n* Anonymization, e.g. removing names, IP addresses, and/or dates from text.\n* Chat introspection - multi-turn chats that have random questions injected asking about the character's descriptions, the setting of the chat, or things previously said, to make sure the model uses the system prompt and or chat history.\n* Multi-step instructions with acknowledgement (see below)\n* De-censorship data (not published)\n\nThis is a fairly general purpose model, but focuses heavily on instruction following, rather than casual chat/roleplay.\n\nHuge thank you to the folks over at [a16z](https://a16z.com/) for sponsoring the costs associated with building models and associated tools!\n\n### Prompt format\n\nThe prompt format is llama-2 chat.\n\n```\n[INST] <>\nYou are a helpful, unbiased, uncensored assistant.\n<>\n\n{prompt} [/INST]\n```\n\nFor multi-turn, the prompt format is as follows:\n```\n[INST] <>\nYou are a helful, unbiased, uncensored assistant.\n<>\n\n{prompt 0} [/INST] {response 0} [INST] {prompt 1} [/INST] {response 1} ...[INST] {prompt N} [/INST]\n```\n\nThe prompt template is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.:\n\n```\nimport transformers\ntokenizer = transformers.AutoTokenizer.from_pretrained('jondurbin/airoboros-l2-70b-3.1')\nchat = [\n {\"role\": \"system\", \"content\": \"You are Bob, a friendly AI assistant.\"},\n {\"role\": \"user\", \"content\": \"Hello, how are you?\"},\n {\"role\": \"assistant\", \"content\": \"I'm doing great. How can I help you today?\"},\n {\"role\": \"user\", \"content\": \"I'd like to show off how chat templating works!\"},\n]\nprint(tokenizer.apply_chat_template(chat, tokenize=False))\n```\n\n### Helpful usage tips\n\n\n#### MathJSON\n\nAsk mathematical questions, prefixed with \"Create a MathJSON solution to the following:\", and you should get output with a formula that can be executed by https://cortexjs.io/compute-engine/\n\nI also made a (really quite hacky, incomplete, semi-functional) python version that works for most expressions, see [mathjson.py](https://github.com/jondurbin/airoboros/blob/mathjson/airoboros/mathjson.py)\n\n__Use a very low temperature!__\n\nExample:\n\n```\n[INST] <>\nYou are an assistant with vast knowledge in all things math.\n<>\n\nCreate a MathJSON solution to the following: Calculate the area of a circle with radius of 17.2456cm. Include your reasoning. [/INST]\n```\n\nOutput:\n```\nThe area (A) of a circle is given by the formula A = πr², where r is the radius of the circle. Since the radius is given in centimeters, we need to convert it to meters first. We know that 1 meter equals 100 centimeters. Therefore, we can represent the radius as: r = 17.2456 cm * 100/1.\n\nOnce we have the radius in meters, we can calculate the area of the circle using the formula A = πr².\n\nSolution as MathJSON:\n\n[\n \"Multiply\",\n \"Pi\",\n [\n \"Power\",\n 17.2456,\n 2\n ]\n]\n\n```\n\nYou can then validate the JSON between `` and ``, then pass the parsed JSON to compute-engine JS or the `evaluate` function in mathjson.py to calculate the response.\n\n#### Context obedient question answering\n\nBy obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations.\n\nThe format for a closed-context prompt is as follows:\n```\nBEGININPUT\nBEGINCONTEXT\n[key0: value0]\n[key1: value1]\n... other metdata ...\nENDCONTEXT\n[insert your text blocks here]\nENDINPUT\n[add as many other blocks, in the exact same format]\nBEGININSTRUCTION\n[insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.]\nENDINSTRUCTION\n```\n\nIt's also helpful to add \"Don't make up answers if you don't know.\" to your instruction block to make sure if the context is completely unrelated it doesn't make something up.\n\n*The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!*\n\nI know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it.\n- `BEGININPUT` - denotes a new input block\n- `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block\n- `ENDCONTEXT` - denotes the end of the metadata block for the current input\n- [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context.\n- `ENDINPUT` - denotes the end of the current input block\n- [repeat as many input blocks in this format as you want]\n- `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above.\n- [instruction(s)]\n- `ENDINSTRUCTION` - denotes the end of instruction set\n\nIt sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to.\n\n__Use a very low temperature!__\n\nHere's a trivial, but important example to prove the point:\n```\nBEGININPUT\nBEGINCONTEXT\ndate: 2021-01-01\nurl: https://web.site/123\nENDCONTEXT\nIn a shocking turn of events, blueberries are now green, but will be sticking with the same name.\nENDINPUT\nBEGININSTRUCTION\nWhat color are bluberries? Source?\nENDINSTRUCTION\n```\n\nAnd the response:\n```\nBlueberries are now green.\nSource:\ndate: 2021-01-01\nurl: https://web.site/123\n```\n\n#### Summarization\n\n500 samples have been included from [this dataset](https://huggingface.co/datasets/mattpscott/airoboros-summarization), using the same format as contextual question answering, for example:\n\n```\nBEGININPUT\n{text to summarize}\nENDINPUT\nBEGININSTRUCTION\nSummarize the input in around 130 words.\nENDINSTRUCTION\n```\n\n#### Getting longer responses\n\nYou can use a few techniques to get longer responses.\n\nDetailed prompts, with explicit instruction for word count:\n```\nPlease compose a narrative set in the heart of an ancient library, steeped in the scent of old parchment and ink. The protagonist should be a young scholar who is dedicated to studying the art of storytelling and its evolution throughout history. In her pursuit of knowledge, she stumbles upon a forgotten tome that seems to possess an unusual aura. This book has the ability to bring stories to life, literally manifesting characters and scenarios from within its pages into reality.\n\nThe main character must navigate through various epochs of storytelling - from oral traditions of tribal societies, through medieval minstrels' tales, to modern-day digital narratives - as they come alive around her. Each era presents its unique challenges and lessons about the power and impact of stories on human civilization.\n\nOne such character could be a sentient quill pen, who was once used by renowned authors of yesteryears and now holds their wisdom and experiences. It becomes her mentor, guiding her through this journey with witty remarks and insightful commentary.\n\nEnsure that your tale encapsulates the thrill of adventure, the beauty of learning, and the profound connection between humans and their stories. All characters involved should be non-human entities. Feel free to explore creative liberties but maintain the mentioned elements.\n\nYour response should be approximately 2300 words.\n```\n\nOr, a simpler example:\n```\nPlease create a long, detailed story about a dragon in an old growth forest who, for some reason, begins speaking the words of the source code of linux.\n```\n\nThere are a few examples of next chapter completion as well, e.g.:\n```\nWrite the next chapter of a historical fiction novel set in Paris during the 20th century.\n\nHere's a summary of the previous chapter:\nIn the vibrant city of Paris, amid the tumultuous changes of the 20th century, our protagonist Margot, an aspiring fashion designer, has just secured an apprenticeship at a prestigious couture house. She meets Lucien, a charming journalist who covers the fashion industry. Together they navigate the ever-changing world of fashion and society, uncovering secrets that reveal the intricate links between style, politics, and culture. As the chapter concludes, they decide to delve deeper into the hidden corners of the fashion world to unravel its mysteries.\n\nRequirements for the next chapter:\n\n1. Character Development of Margot and Lucien:\n- Margot's Evolution: Unfold more about Margot's past, her dreams of revolutionizing fashion, and her struggle to establish herself in a male-dominated industry. Illustrate her growing expertise, innovative ideas, and increasing dependence on Lucien.\n- Lucien's Complexity: Introduce uncertainties surrounding Lucien's background and real motives. Increase suspense by suggesting undisclosed information he possesses, while also highlighting his wit and perceptiveness.\n\n2. Exploration of Paris and the Couture House:\n- Paris: Elaborate their journey through the bustling streets of Paris, including encounters with iconic figures, social unrest, and relics from different eras of French history.\n- The Couture House: Expand on the grandeur of the couture house they work in, filled with artistic masterpieces, intense competition, and cryptic notes hinting at a scandalous past.\n\n3. Emergence of the Subplot: The Lost Collection:\n- Discovery: Have Margot and Lucien stumble upon a secret vault containing a lost collection designed before World War II, raising new questions about the previous owner and the influence of war on fashion.\n- Revelation: Capture their shock as they realize the designs were plagiarized, the potential repercussions, and the opportunities it presents for Margot's career.\n- Twist: End with a twist that suggests there are other stolen collections across Paris, setting up their new mission.\n\n\nYour response should be approximately 650 words.\n```\n\n#### Coding\n\nYou can ask for fairly complex coding instructions with multiple criteria, e.g.:\n\n```\nCreate a python application with the following requirements:\n- Asyncio FastAPI webserver\n- ping endpoint that returns the current date in JSON format\n- file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate\n```\n\nOr inline criteria:\n\n```\nWrite a multi-threaded TCP server in C that accepts a \"GET [key]\" input and \"SET [key] [value]\" input, and uses a binary tree to get and store the input values.\n```\n\nYou can also optionally add a single space and \"PLAINFORMAT\" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.:\n\n```\nWrite a websocket application in node.js. PLAINFORMAT\n```\n\n#### Agent/function calling\n\nThe dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML.\n\nExample prompt:\n```\nAs an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format.\n\nInput: I want to know how many times 'Python' is mentioned in my text file.\n\nAvailable functions:\nfile_analytics:\n description: This tool performs various operations on a text file.\n params:\n action: The operation we want to perform on the data, such as \"count_occurrences\", \"find_line\", etc.\n filters:\n keyword: The word or phrase we want to search for.\n```\n\nResponse:\n```json\n{\n \"function\": \"file_analytics\",\n \"params\": {\n \"action\": \"count_occurrences\",\n \"filters\": {\n \"keyword\": \"Python\"\n }\n }\n}\n```\n\n#### Chain-of-thought\n\nYou can ask for several possible responses to a given problem, with a ranking and final answer selection.\n\nExample prompt:\n\n```\nA group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one.\n```\n\nExample response:\n```\nSolution 1:\nIn a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). \n\nThe formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows:\n\nn(n-1)/2 = 45\n\nSolving this equation gives us n=10.\n\nFinal answer: There were 10 players in the tournament.\n\nSolution 2:\nAnother way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. \n\nIf we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players.\n\nFinal answer: There were 10 players in the tournament.\n\nSolution 3:\nWe could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played.\n\nFinal answer: There were 10 players in the tournament.\n\nRanking of solutions from best to worst:\n1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer.\n2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer.\n3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer.\n\nBest and final answer: There were 10 players in the tournament.\n```\n\n#### reWOO style execution planning\n\nThe model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions!\n\nExample prompt:\n```\nPlease construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string\nthat could be the user's question, one or more prior evidence values, or a combination of both.\n\nHere are the tools available to be called:\nDuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer.\nTextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links.\nHyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links.\nKnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:?\n\nThe output should be in format:\nPlan: [first action to take based on input question]\n:evidence0: = [function to call with input parameter]\nPlan: [next action to take, based on result of :evidence0:]\n:evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. \"Given context :evidence0\"]\n...\nAnswer: [:evidence[n]: containing the final answer.]\n\nQuestion: Who won the Nobel Prize in Literature in 2020 and what is their most famous work?\n```\n\nResponse:\n```\nPlan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020.\n:evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner]\nPlan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate.\n:evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?]\nPlan: Conduct another web search to find the most famous work of the identified laureate.\n:evidence2: = DuckDuckGo[Most famous work of :evidence1:]\nPlan: Extract the relevant links from the DuckDuckGo search results for a more focused search.\n:evidence3: = HyperlinkExtractor[:evidence2:]\nPlan: Use the TextScraper tool to extract information from the relevant links.\n:evidence4: = TextScraper[:evidence3:]\nPlan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information.\n:evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?]\nAnswer: :evidence5:\n```\n\nFor this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening:\n\n```python\nimport re\nimport requests\n\ndef inject_context(input_text, **context):\n for ref in set(re.findall(r\"(:evidence[0-9]+:)\", input_text, re.I)):\n input_text = input_text.replace(ref, context.get(ref, \"\"))\n return input_text\n\ndef duckduckgo(input_text, **context):\n search_string = inject_context(input_text, **context)\n ... search via duck duck go using search_string\n ... return text content\n\ndef link_extractor(input_text, **context):\n input_text = inject_context(input_text, **context)\n return \"\\n\".join(list(set(re.findall(r\"(https?://[^\\s]+?\\.?)\", input_text, re.I))))\n\ndef scrape(input_text, **context):\n input_text = inject_context(input_text, **context)\n text = []\n for link in input_text.splitlines():\n text.append(requests.get(link).text)\n return \"\\n\".join(text)\n\ndef infer(input_text, **context)\n prompt = inject_context(input_text, **context)\n ... call model with prompt, return output\n\ndef parse_plan(plan):\n method_map = {\n \"DuckDuckGo\": duckduckgo,\n \"HyperlinkExtractor\": link_extractor,\n \"KnowledgeModel\": infer,\n \"TextScraper\": scrape,\n }\n context = {}\n for line in plan.strip().splitlines():\n if line.startswith(\"Plan:\"):\n print(line)\n continue\n parts = re.match(\"^(:evidence[0-9]+:)\\s*=\\s*([^\\[]+])(\\[.*\\])\\s$\", line, re.I)\n if not parts:\n if line.startswith(\"Answer: \"):\n return context.get(line.split(\" \")[-1].strip(), \"Answer couldn't be generated...\")\n raise RuntimeError(\"bad format: \" + line)\n context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context)\n```\n\n## Multi-step instructions with acknowledgement\n\nI don't know how useful this is, really, but I thought I'd add it just in case. Example:\n\n```\n{\n \"conversations\": [\n {\n \"value\": \"You are a helpful, unbiased, uncensored assistant.\",\n \"from\": \"system\"\n },\n {\n \"value\": \"I'm going to set some rules for our conversation. Please respond with \\\"OK\\\" to each rule.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"You can only respond using animal-related emojis.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"Precede all responses with \\\"ZOOZOO:\\\"\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"Include at least one bird emoji in your responses.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"OK\",\n \"from\": \"gpt\"\n },\n {\n \"value\": \"Describe a trip to a safari park.\",\n \"from\": \"human\"\n },\n {\n \"value\": \"ZOOZOO: \\ud83e\\udd81\\ud83d\\udc2f\\ud83e\\udd93\\ud83e\\udd92\\ud83d\\udc18\\ud83d\\udc0d\\ud83c\\udf33\\ud83c\\udf32\\u2600\\ufe0f\\ud83d\\ude90\\ud83e\\udd85\\ud83d\\udcf8\\ud83e\\udd29\",\n \"from\": \"gpt\"\n }\n ]\n}\n```\n\n### Contribute\n\nIf you're interested in new functionality, particularly a new \"instructor\" type to generate a specific type of training data,\ntake a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details.\n\nTo help me with the OpenAI/compute costs:\n\n- https://bmc.link/jondurbin\n- ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11\n- BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf\n\n### Licence and usage restrictions\n\nThe airoboros 3.1 models are built on top of multiple base models, each with their own license/restrictions.\n\nThe 30b model is built on the original llama, which has a strict non-commercial usage restriction.\n\nThe models with `-l2` in the name have a custom Meta license:\n- See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta.\n- See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta.\n\nThe models with `-m-` are mistral-7b (apache 2.0)\n\nThe fine-tuning data was mostly generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros)\n\nThe ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI\n\n- what does *compete* actually mean here?\n- these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place\n- if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works\n- the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place\n- other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2\n\nI am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly.\n\nYour best bet is probably to avoid using this commercially due to the OpenAI API usage.\n\nEither way, by using this model, you agree to completely indemnify me.\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"jondurbin/airoboros-3.1\"], \"license\": \"llama2\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING","SUMMARIZATION"],"string":"[\n \"QUESTION_ANSWERING\",\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45537,"string":"45,537"}}},{"rowIdx":43761,"cells":{"id":{"kind":"string","value":"vdos/3cf6e881-58d4-4c83-b040-9d0b8321ff0e"},"author":{"kind":"string","value":"vdos"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["peft","safetensors","gemma2","axolotl","generated_from_trainer","base_model:unsloth/gemma-2-2b","base_model:adapter:unsloth/gemma-2-2b","license:gemma","region:us"],"string":"[\n \"peft\",\n \"safetensors\",\n \"gemma2\",\n \"axolotl\",\n \"generated_from_trainer\",\n \"base_model:unsloth/gemma-2-2b\",\n \"base_model:adapter:unsloth/gemma-2-2b\",\n \"license:gemma\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-12-20T00:49:14Z","string":"2024-12-20T00:49:14Z"},"last_modified":{"kind":"string","value":"2024-12-20T00:57:10+00:00"},"downloads":{"kind":"number","value":9,"string":"9"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: unsloth/gemma-2-2b\nlibrary_name: peft\nlicense: gemma\ntags:\n- axolotl\n- generated_from_trainer\nmodel-index:\n- name: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\n results: []\n---\n\n\n\n[\"Built](https://github.com/axolotl-ai-cloud/axolotl)\n
See axolotl config\n\naxolotl version: `0.5.2`\n```yaml\nadapter: lora\nbase_model: unsloth/gemma-2-2b\nbf16: auto\nchat_template: llama3\ndataset_prepared_path: null\ndatasets:\n- data_files:\n - bbe70c53a119531f_train_data.json\n ds_type: json\n format: custom\n path: /workspace/input_data/bbe70c53a119531f_train_data.json\n type:\n field_input: transcription\n field_instruction: glosses\n field_output: translation\n format: '{instruction} {input}'\n no_input_format: '{instruction}'\n system_format: '{system}'\n system_prompt: ''\ndebug: null\ndeepspeed: null\nearly_stopping_patience: 1\neval_max_new_tokens: 128\neval_steps: 25\neval_table_size: null\nflash_attention: false\nfp16: false\nfsdp: null\nfsdp_config: null\ngradient_accumulation_steps: 16\ngradient_checkpointing: true\ngroup_by_length: true\nhub_model_id: vdos/3cf6e881-58d4-4c83-b040-9d0b8321ff0e\nhub_repo: null\nhub_strategy: checkpoint\nhub_token: null\nlearning_rate: 0.0001\nload_in_4bit: false\nload_in_8bit: false\nlocal_rank: null\nlogging_steps: 1\nlora_alpha: 64\nlora_dropout: 0.05\nlora_fan_in_fan_out: null\nlora_model_dir: null\nlora_r: 32\nlora_target_linear: true\nlr_scheduler: cosine\nmax_steps: 50\nmicro_batch_size: 2\nmlflow_experiment_name: /tmp/bbe70c53a119531f_train_data.json\nmodel_type: AutoModelForCausalLM\nnum_epochs: 3\noptimizer: adamw_torch\noutput_dir: miner_id_24\npad_to_sequence_len: true\nresume_from_checkpoint: null\ns2_attention: null\nsample_packing: false\nsave_steps: 25\nsequence_len: 2048\nstrict: false\ntf32: false\ntokenizer_type: AutoTokenizer\ntrain_on_inputs: false\ntrust_remote_code: true\nval_set_size: 0.05\nwandb_entity: null\nwandb_mode: online\nwandb_name: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\nwandb_project: Gradients-On-Demand\nwandb_run: your_name\nwandb_runid: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\nwarmup_ratio: 0.05\nweight_decay: 0.01\nxformers_attention: true\n\n```\n\n

\n\n# 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\n\nThis model is a fine-tuned version of [unsloth/gemma-2-2b](https://huggingface.co/unsloth/gemma-2-2b) on the None dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 1.9679\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 2\n- eval_batch_size: 2\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- gradient_accumulation_steps: 16\n- total_train_batch_size: 128\n- total_eval_batch_size: 8\n- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_steps: 2\n- training_steps: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss |\n|:-------------:|:------:|:----:|:---------------:|\n| 4.9321 | 0.0128 | 1 | 6.1235 |\n| 0.6469 | 0.3195 | 25 | 2.1838 |\n| 0.4683 | 0.6390 | 50 | 1.9679 |\n\n\n### Framework versions\n\n- PEFT 0.13.2\n- Transformers 4.46.3\n- Pytorch 2.3.1+cu121\n- Datasets 3.1.0\n- Tokenizers 0.20.3"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"TBD"},"model_cards":{"kind":"string","value":"\n\n\n[\"Built](https://github.com/axolotl-ai-cloud/axolotl)\n
See axolotl config\n\naxolotl version: `0.5.2`\n```yaml\nadapter: lora\nbase_model: unsloth/gemma-2-2b\nbf16: auto\nchat_template: llama3\ndataset_prepared_path: null\ndatasets:\n- data_files:\n - bbe70c53a119531f_train_data.json\n ds_type: json\n format: custom\n path: /workspace/input_data/bbe70c53a119531f_train_data.json\n type:\n field_input: transcription\n field_instruction: glosses\n field_output: translation\n format: '{instruction} {input}'\n no_input_format: '{instruction}'\n system_format: '{system}'\n system_prompt: ''\ndebug: null\ndeepspeed: null\nearly_stopping_patience: 1\neval_max_new_tokens: 128\neval_steps: 25\neval_table_size: null\nflash_attention: false\nfp16: false\nfsdp: null\nfsdp_config: null\ngradient_accumulation_steps: 16\ngradient_checkpointing: true\ngroup_by_length: true\nhub_model_id: vdos/3cf6e881-58d4-4c83-b040-9d0b8321ff0e\nhub_repo: null\nhub_strategy: checkpoint\nhub_token: null\nlearning_rate: 0.0001\nload_in_4bit: false\nload_in_8bit: false\nlocal_rank: null\nlogging_steps: 1\nlora_alpha: 64\nlora_dropout: 0.05\nlora_fan_in_fan_out: null\nlora_model_dir: null\nlora_r: 32\nlora_target_linear: true\nlr_scheduler: cosine\nmax_steps: 50\nmicro_batch_size: 2\nmlflow_experiment_name: /tmp/bbe70c53a119531f_train_data.json\nmodel_type: AutoModelForCausalLM\nnum_epochs: 3\noptimizer: adamw_torch\noutput_dir: miner_id_24\npad_to_sequence_len: true\nresume_from_checkpoint: null\ns2_attention: null\nsample_packing: false\nsave_steps: 25\nsequence_len: 2048\nstrict: false\ntf32: false\ntokenizer_type: AutoTokenizer\ntrain_on_inputs: false\ntrust_remote_code: true\nval_set_size: 0.05\nwandb_entity: null\nwandb_mode: online\nwandb_name: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\nwandb_project: Gradients-On-Demand\nwandb_run: your_name\nwandb_runid: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\nwarmup_ratio: 0.05\nweight_decay: 0.01\nxformers_attention: true\n\n```\n\n

\n\n# 3cf6e881-58d4-4c83-b040-9d0b8321ff0e\n\nThis model is a fine-tuned version of [unsloth/gemma-2-2b](https://huggingface.co/unsloth/gemma-2-2b) on the None dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 1.9679\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 2\n- eval_batch_size: 2\n- seed: 42\n- distributed_type: multi-GPU\n- num_devices: 4\n- gradient_accumulation_steps: 16\n- total_train_batch_size: 128\n- total_eval_batch_size: 8\n- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments\n- lr_scheduler_type: cosine\n- lr_scheduler_warmup_steps: 2\n- training_steps: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss |\n|:-------------:|:------:|:----:|:---------------:|\n| 4.9321 | 0.0128 | 1 | 6.1235 |\n| 0.6469 | 0.3195 | 25 | 2.1838 |\n| 0.4683 | 0.6390 | 50 | 1.9679 |\n\n\n### Framework versions\n\n- PEFT 0.13.2\n- Transformers 4.46.3\n- Pytorch 2.3.1+cu121\n- Datasets 3.1.0\n- Tokenizers 0.20.3"},"metadata":{"kind":"string","value":"{\"base_model\": \"unsloth/gemma-2-2b\", \"library_name\": \"peft\", \"license\": \"gemma\", \"tags\": [\"axolotl\", \"generated_from_trainer\"], \"model-index\": [{\"name\": \"3cf6e881-58d4-4c83-b040-9d0b8321ff0e\", \"results\": []}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45538,"string":"45,538"}}},{"rowIdx":43762,"cells":{"id":{"kind":"string","value":"lksai19/Legal-summarizer-models"},"author":{"kind":"string","value":"lksai19"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["safetensors","license:mit","region:us"],"string":"[\n \"safetensors\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-02-06T16:39:20Z","string":"2025-02-06T16:39:20Z"},"last_modified":{"kind":"string","value":"2025-02-22T11:01:05+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\r\nlicense: mit\r\n---\r\n# Legal Document Summarizer\r\n\r\nThis repository contains fine-tuned models for summarizing legal documents, including extractive and abstractive summarization models based on LegalBERT and BART. The models are designed to handle legal case files and segment them into sections, followed by generating summaries of these sections.\r\n\r\n## Models\r\n\r\n### 1. **BART Model (Abstractive Summarization)**\r\n This model is fine-tuned to generate **abstractive summaries** of legal judgments. It processes the input document and generates a coherent, concise summary that captures the key points of the judgment. This is particularly useful for creating readable summaries for large legal texts.\r\n\r\n### 2. **Extractive Model (LegalBERT)**\r\n This model performs **extractive summarization** by identifying key sentences in a legal case document and extracting them to form a summary. It leverages LegalBERT, fine-tuned specifically for legal text, to classify and extract relevant content such as facts, judgment, and arguments from a case.\r\n\r\n### 3. **Classification Model (LegalBERT)**\r\n The classification model is used to categorize sections of the legal document (e.g., **Judgment**, **Analysis**, **Facts**, **Arguments**, **Statute**). By understanding the structure of the document, the model helps in segmenting the text before applying extractive or abstractive summarization techniques.\r\n\r\n## Web Application\r\n\r\nThis repository also provides a **web application** that allows users to upload a legal case file in `.txt` format or paste the document text directly. The application enables users to choose between generating a **whole summary** or a **segmented summary**, using the extractive and abstractive models accordingly.\r\n\r\nFor the web app code, visit the [GitHub repository](https://github.com/lksai19/Legal-Summarizer).\r\n\r\n## Dataset\r\n\r\nThe models in this repository have been fine-tuned on the dataset used in the following research paper:\r\n\r\n**Shukla, A., Bhattacharya, P., Poddar, S., Mukherjee, R., Ghosh, K., Goyal, P., & Ghosh, S. (2022).** *Legal case document summarization: Extractive and abstractive methods and their evaluation.* \r\nAvailable at: [Research Paper Link](https://aclanthology.org/2022.aacl-main.77/)\r\n\r\nThis dataset consists of annotated legal case documents, where segments such as **Analysis**, **Judgment**, **Facts**, and **Arguments** were manually labeled by legal experts. The paper discusses the methods for extractive and abstractive summarization, providing evaluation metrics for model performance.\r\n\r\n## Fine-tuning\r\n\r\nFor detailed steps on how the models were fine-tuned, including code and configuration, refer to the [GitHub repository of the fine-tuning project](https://github.com/lksai19/Legal-Summarizer).\r\n\r\n### Requirements\r\n\r\nTo run the models locally, install the required dependencies:\r\n\r\n```bash\r\npip install -r requirements.txt\r\n\r\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"# Legal Document Summarizer\r\n\r\nThis repository contains fine-tuned models for summarizing legal documents, including extractive and abstractive summarization models based on LegalBERT and BART. The models are designed to handle legal case files and segment them into sections, followed by generating summaries of these sections.\r\n\r\n## Models\r\n\r\n### 1. **BART Model (Abstractive Summarization)**\r\n This model is fine-tuned to generate **abstractive summaries** of legal judgments. It processes the input document and generates a coherent, concise summary that captures the key points of the judgment. This is particularly useful for creating readable summaries for large legal texts.\r\n\r\n### 2. **Extractive Model (LegalBERT)**\r\n This model performs **extractive summarization** by identifying key sentences in a legal case document and extracting them to form a summary. It leverages LegalBERT, fine-tuned specifically for legal text, to classify and extract relevant content such as facts, judgment, and arguments from a case.\r\n\r\n### 3. **Classification Model (LegalBERT)**\r\n The classification model is used to categorize sections of the legal document (e.g., **Judgment**, **Analysis**, **Facts**, **Arguments**, **Statute**). By understanding the structure of the document, the model helps in segmenting the text before applying extractive or abstractive summarization techniques.\r\n\r\n## Web Application\r\n\r\nThis repository also provides a **web application** that allows users to upload a legal case file in `.txt` format or paste the document text directly. The application enables users to choose between generating a **whole summary** or a **segmented summary**, using the extractive and abstractive models accordingly.\r\n\r\nFor the web app code, visit the [GitHub repository](https://github.com/lksai19/Legal-Summarizer).\r\n\r\n## Dataset\r\n\r\nThe models in this repository have been fine-tuned on the dataset used in the following research paper:\r\n\r\n**Shukla, A., Bhattacharya, P., Poddar, S., Mukherjee, R., Ghosh, K., Goyal, P., & Ghosh, S. (2022).** *Legal case document summarization: Extractive and abstractive methods and their evaluation.* \r\nAvailable at: [Research Paper Link](https://aclanthology.org/2022.aacl-main.77/)\r\n\r\nThis dataset consists of annotated legal case documents, where segments such as **Analysis**, **Judgment**, **Facts**, and **Arguments** were manually labeled by legal experts. The paper discusses the methods for extractive and abstractive summarization, providing evaluation metrics for model performance.\r\n\r\n## Fine-tuning\r\n\r\nFor detailed steps on how the models were fine-tuned, including code and configuration, refer to the [GitHub repository of the fine-tuning project](https://github.com/lksai19/Legal-Summarizer).\r\n\r\n### Requirements\r\n\r\nTo run the models locally, install the required dependencies:\r\n\r\n```bash\r\npip install -r requirements.txt\r\n\r\n"},"metadata":{"kind":"string","value":"{\"license\": \"mit\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["SUMMARIZATION"],"string":"[\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45539,"string":"45,539"}}},{"rowIdx":43763,"cells":{"id":{"kind":"string","value":"NickyNicky/bge-base-financial-matryoshka_test_0"},"author":{"kind":"string","value":"NickyNicky"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","bert","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:6300","loss:MatryoshkaLoss","loss:MultipleNegativesRankingLoss","en","arxiv:1908.10084","arxiv:2205.13147","arxiv:1705.00652","base_model:BAAI/bge-base-en-v1.5","base_model:finetune:BAAI/bge-base-en-v1.5","license:apache-2.0","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"bert\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:6300\",\n \"loss:MatryoshkaLoss\",\n \"loss:MultipleNegativesRankingLoss\",\n \"en\",\n \"arxiv:1908.10084\",\n \"arxiv:2205.13147\",\n \"arxiv:1705.00652\",\n \"base_model:BAAI/bge-base-en-v1.5\",\n \"base_model:finetune:BAAI/bge-base-en-v1.5\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-30T22:29:42Z","string":"2024-06-30T22:29:42Z"},"last_modified":{"kind":"string","value":"2024-06-30T22:30:12+00:00"},"downloads":{"kind":"number","value":49,"string":"49"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: BAAI/bge-base-en-v1.5\ndatasets: []\nlanguage:\n- en\nlibrary_name: sentence-transformers\nlicense: apache-2.0\nmetrics:\n- cosine_accuracy@1\n- cosine_accuracy@3\n- cosine_accuracy@5\n- cosine_accuracy@10\n- cosine_precision@1\n- cosine_precision@3\n- cosine_precision@5\n- cosine_precision@10\n- cosine_recall@1\n- cosine_recall@3\n- cosine_recall@5\n- cosine_recall@10\n- cosine_ndcg@10\n- cosine_mrr@10\n- cosine_map@100\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:6300\n- loss:MatryoshkaLoss\n- loss:MultipleNegativesRankingLoss\nwidget:\n- source_sentence: Item 8 in IBM's 2023 Annual Report to Stockholders details the\n Financial Statements and Supplementary Data, which are included on pages 44 through\n 121.\n sentences:\n - What was the amount gained from the disposal of assets in 2022?\n - What section of IBM's Annual Report for 2023 contains the Financial Statements\n and Supplementary Data?\n - What were the cash outflows for capital expenditures in 2023 and 2022 respectively?\n- source_sentence: For the fiscal year ended March 31, 2023, Electronic Arts reported\n a gross margin of 75.9 percent, an increase of 2.5 percentage points from the\n previous year.\n sentences:\n - How did investment banking revenues at Goldman Sachs change in 2023 compared to\n 2022, and what factors contributed to this change?\n - What was the gross margin percentage for Electronic Arts in the fiscal year ending\n March 31, 2023?\n - What were the risk-free interest rates for the fiscal years 2021, 2022, and 2023?\n- source_sentence: Cash, cash equivalents, and restricted cash at the beginning of\n the period totaled $7,013 for a company.\n sentences:\n - What was the amount of cash, cash equivalents, and restricted cash at the beginning\n of the period for the company?\n - What is the impact of the new $1.25 price point on Dollar Tree’s sales units and\n profitability?\n - What was the total amount attributed to Goodwill in the acquisition of Nuance\n Communications, Inc. as reported by the company?\n- source_sentence: generate our mall revenue primarily from leases with tenants through\n base minimum rents, overage rents and reimbursements for common area maintenance\n (CAM) and other expenditures.\n sentences:\n - How does Visa facilitate financial inclusion with their prepaid cards?\n - What are the main objectives of the economic sanctions imposed by the United States\n and other international bodies?\n - What revenue sources does Shoppes at Venetian primarily rely on from its tenants?\n- source_sentence: For the fiscal year ended August 26, 2023, we reported net sales\n of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022,\n a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic\n same store sales increase of 3.4% and net sales of $327.8 million from new domestic\n and international stores.\n sentences:\n - What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared\n to fiscal 2022?\n - What percentage of HP's external U.S. hires in fiscal year 2023 were racially\n or ethnically diverse?\n - How much did GameStop Corp's valuation allowances increase during fiscal 2022?\nmodel-index:\n- name: BGE base Financial Matryoshka\n results:\n - task:\n type: information-retrieval\n name: Information Retrieval\n dataset:\n name: dim 768\n type: dim_768\n metrics:\n - type: cosine_accuracy@1\n value: 0.6985714285714286\n name: Cosine Accuracy@1\n - type: cosine_accuracy@3\n value: 0.8271428571428572\n name: Cosine Accuracy@3\n - type: cosine_accuracy@5\n value: 0.8628571428571429\n name: Cosine Accuracy@5\n - type: cosine_accuracy@10\n value: 0.8985714285714286\n name: Cosine Accuracy@10\n - type: cosine_precision@1\n value: 0.6985714285714286\n name: Cosine Precision@1\n - type: cosine_precision@3\n value: 0.2757142857142857\n name: Cosine Precision@3\n - type: cosine_precision@5\n value: 0.17257142857142854\n name: Cosine Precision@5\n - type: cosine_precision@10\n value: 0.08985714285714284\n name: Cosine Precision@10\n - type: cosine_recall@1\n value: 0.6985714285714286\n name: Cosine Recall@1\n - type: cosine_recall@3\n value: 0.8271428571428572\n name: Cosine Recall@3\n - type: cosine_recall@5\n value: 0.8628571428571429\n name: Cosine Recall@5\n - type: cosine_recall@10\n value: 0.8985714285714286\n name: Cosine Recall@10\n - type: cosine_ndcg@10\n value: 0.8023663256793517\n name: Cosine Ndcg@10\n - type: cosine_mrr@10\n value: 0.7712675736961451\n name: Cosine Mrr@10\n - type: cosine_map@100\n value: 0.7758522351159084\n name: Cosine Map@100\n - task:\n type: information-retrieval\n name: Information Retrieval\n dataset:\n name: dim 512\n type: dim_512\n metrics:\n - type: cosine_accuracy@1\n value: 0.69\n name: Cosine Accuracy@1\n - type: cosine_accuracy@3\n value: 0.8271428571428572\n name: Cosine Accuracy@3\n - type: cosine_accuracy@5\n value: 0.86\n name: Cosine Accuracy@5\n - type: cosine_accuracy@10\n value: 0.9028571428571428\n name: Cosine Accuracy@10\n - type: cosine_precision@1\n value: 0.69\n name: Cosine Precision@1\n - type: cosine_precision@3\n value: 0.2757142857142857\n name: Cosine Precision@3\n - type: cosine_precision@5\n value: 0.17199999999999996\n name: Cosine Precision@5\n - type: cosine_precision@10\n value: 0.09028571428571427\n name: Cosine Precision@10\n - type: cosine_recall@1\n value: 0.69\n name: Cosine Recall@1\n - type: cosine_recall@3\n value: 0.8271428571428572\n name: Cosine Recall@3\n - type: cosine_recall@5\n value: 0.86\n name: Cosine Recall@5\n - type: cosine_recall@10\n value: 0.9028571428571428\n name: Cosine Recall@10\n - type: cosine_ndcg@10\n value: 0.7998655910794988\n name: Cosine Ndcg@10\n - type: cosine_mrr@10\n value: 0.7665912698412698\n name: Cosine Mrr@10\n - type: cosine_map@100\n value: 0.7706925401671437\n name: Cosine Map@100\n - task:\n type: information-retrieval\n name: Information Retrieval\n dataset:\n name: dim 256\n type: dim_256\n metrics:\n - type: cosine_accuracy@1\n value: 0.6957142857142857\n name: Cosine Accuracy@1\n - type: cosine_accuracy@3\n value: 0.8228571428571428\n name: Cosine Accuracy@3\n - type: cosine_accuracy@5\n value: 0.86\n name: Cosine Accuracy@5\n - type: cosine_accuracy@10\n value: 0.8914285714285715\n name: Cosine Accuracy@10\n - type: cosine_precision@1\n value: 0.6957142857142857\n name: Cosine Precision@1\n - type: cosine_precision@3\n value: 0.2742857142857143\n name: Cosine Precision@3\n - type: cosine_precision@5\n value: 0.17199999999999996\n name: Cosine Precision@5\n - type: cosine_precision@10\n value: 0.08914285714285713\n name: Cosine Precision@10\n - type: cosine_recall@1\n value: 0.6957142857142857\n name: Cosine Recall@1\n - type: cosine_recall@3\n value: 0.8228571428571428\n name: Cosine Recall@3\n - type: cosine_recall@5\n value: 0.86\n name: Cosine Recall@5\n - type: cosine_recall@10\n value: 0.8914285714285715\n name: Cosine Recall@10\n - type: cosine_ndcg@10\n value: 0.7974564108711016\n name: Cosine Ndcg@10\n - type: cosine_mrr@10\n value: 0.7669535147392289\n name: Cosine Mrr@10\n - type: cosine_map@100\n value: 0.7718155211819018\n name: Cosine Map@100\n - task:\n type: information-retrieval\n name: Information Retrieval\n dataset:\n name: dim 128\n type: dim_128\n metrics:\n - type: cosine_accuracy@1\n value: 0.6871428571428572\n name: Cosine Accuracy@1\n - type: cosine_accuracy@3\n value: 0.8128571428571428\n name: Cosine Accuracy@3\n - type: cosine_accuracy@5\n value: 0.8457142857142858\n name: Cosine Accuracy@5\n - type: cosine_accuracy@10\n value: 0.8857142857142857\n name: Cosine Accuracy@10\n - type: cosine_precision@1\n value: 0.6871428571428572\n name: Cosine Precision@1\n - type: cosine_precision@3\n value: 0.27095238095238094\n name: Cosine Precision@3\n - type: cosine_precision@5\n value: 0.16914285714285712\n name: Cosine Precision@5\n - type: cosine_precision@10\n value: 0.08857142857142856\n name: Cosine Precision@10\n - type: cosine_recall@1\n value: 0.6871428571428572\n name: Cosine Recall@1\n - type: cosine_recall@3\n value: 0.8128571428571428\n name: Cosine Recall@3\n - type: cosine_recall@5\n value: 0.8457142857142858\n name: Cosine Recall@5\n - type: cosine_recall@10\n value: 0.8857142857142857\n name: Cosine Recall@10\n - type: cosine_ndcg@10\n value: 0.787697533881839\n name: Cosine Ndcg@10\n - type: cosine_mrr@10\n value: 0.756192743764172\n name: Cosine Mrr@10\n - type: cosine_map@100\n value: 0.7610331995977764\n name: Cosine Map@100\n - task:\n type: information-retrieval\n name: Information Retrieval\n dataset:\n name: dim 64\n type: dim_64\n metrics:\n - type: cosine_accuracy@1\n value: 0.6328571428571429\n name: Cosine Accuracy@1\n - type: cosine_accuracy@3\n value: 0.7771428571428571\n name: Cosine Accuracy@3\n - type: cosine_accuracy@5\n value: 0.8171428571428572\n name: Cosine Accuracy@5\n - type: cosine_accuracy@10\n value: 0.8571428571428571\n name: Cosine Accuracy@10\n - type: cosine_precision@1\n value: 0.6328571428571429\n name: Cosine Precision@1\n - type: cosine_precision@3\n value: 0.259047619047619\n name: Cosine Precision@3\n - type: cosine_precision@5\n value: 0.16342857142857142\n name: Cosine Precision@5\n - type: cosine_precision@10\n value: 0.08571428571428569\n name: Cosine Precision@10\n - type: cosine_recall@1\n value: 0.6328571428571429\n name: Cosine Recall@1\n - type: cosine_recall@3\n value: 0.7771428571428571\n name: Cosine Recall@3\n - type: cosine_recall@5\n value: 0.8171428571428572\n name: Cosine Recall@5\n - type: cosine_recall@10\n value: 0.8571428571428571\n name: Cosine Recall@10\n - type: cosine_ndcg@10\n value: 0.7482728321357093\n name: Cosine Ndcg@10\n - type: cosine_mrr@10\n value: 0.7131224489795914\n name: Cosine Mrr@10\n - type: cosine_map@100\n value: 0.7189753431460272\n name: Cosine Map@100\n---\n\n# BGE base Financial Matryoshka\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n- **Language:** en\n- **License:** apache-2.0\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n (2): Normalize()\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"NickyNicky/bge-base-financial-matryoshka\")\n# Run inference\nsentences = [\n 'For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores.',\n \"What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022?\",\n \"What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse?\",\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n## Evaluation\n\n### Metrics\n\n#### Information Retrieval\n* Dataset: `dim_768`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| cosine_accuracy@1 | 0.6986 |\n| cosine_accuracy@3 | 0.8271 |\n| cosine_accuracy@5 | 0.8629 |\n| cosine_accuracy@10 | 0.8986 |\n| cosine_precision@1 | 0.6986 |\n| cosine_precision@3 | 0.2757 |\n| cosine_precision@5 | 0.1726 |\n| cosine_precision@10 | 0.0899 |\n| cosine_recall@1 | 0.6986 |\n| cosine_recall@3 | 0.8271 |\n| cosine_recall@5 | 0.8629 |\n| cosine_recall@10 | 0.8986 |\n| cosine_ndcg@10 | 0.8024 |\n| cosine_mrr@10 | 0.7713 |\n| **cosine_map@100** | **0.7759** |\n\n#### Information Retrieval\n* Dataset: `dim_512`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| cosine_accuracy@1 | 0.69 |\n| cosine_accuracy@3 | 0.8271 |\n| cosine_accuracy@5 | 0.86 |\n| cosine_accuracy@10 | 0.9029 |\n| cosine_precision@1 | 0.69 |\n| cosine_precision@3 | 0.2757 |\n| cosine_precision@5 | 0.172 |\n| cosine_precision@10 | 0.0903 |\n| cosine_recall@1 | 0.69 |\n| cosine_recall@3 | 0.8271 |\n| cosine_recall@5 | 0.86 |\n| cosine_recall@10 | 0.9029 |\n| cosine_ndcg@10 | 0.7999 |\n| cosine_mrr@10 | 0.7666 |\n| **cosine_map@100** | **0.7707** |\n\n#### Information Retrieval\n* Dataset: `dim_256`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| cosine_accuracy@1 | 0.6957 |\n| cosine_accuracy@3 | 0.8229 |\n| cosine_accuracy@5 | 0.86 |\n| cosine_accuracy@10 | 0.8914 |\n| cosine_precision@1 | 0.6957 |\n| cosine_precision@3 | 0.2743 |\n| cosine_precision@5 | 0.172 |\n| cosine_precision@10 | 0.0891 |\n| cosine_recall@1 | 0.6957 |\n| cosine_recall@3 | 0.8229 |\n| cosine_recall@5 | 0.86 |\n| cosine_recall@10 | 0.8914 |\n| cosine_ndcg@10 | 0.7975 |\n| cosine_mrr@10 | 0.767 |\n| **cosine_map@100** | **0.7718** |\n\n#### Information Retrieval\n* Dataset: `dim_128`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:----------|\n| cosine_accuracy@1 | 0.6871 |\n| cosine_accuracy@3 | 0.8129 |\n| cosine_accuracy@5 | 0.8457 |\n| cosine_accuracy@10 | 0.8857 |\n| cosine_precision@1 | 0.6871 |\n| cosine_precision@3 | 0.271 |\n| cosine_precision@5 | 0.1691 |\n| cosine_precision@10 | 0.0886 |\n| cosine_recall@1 | 0.6871 |\n| cosine_recall@3 | 0.8129 |\n| cosine_recall@5 | 0.8457 |\n| cosine_recall@10 | 0.8857 |\n| cosine_ndcg@10 | 0.7877 |\n| cosine_mrr@10 | 0.7562 |\n| **cosine_map@100** | **0.761** |\n\n#### Information Retrieval\n* Dataset: `dim_64`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:----------|\n| cosine_accuracy@1 | 0.6329 |\n| cosine_accuracy@3 | 0.7771 |\n| cosine_accuracy@5 | 0.8171 |\n| cosine_accuracy@10 | 0.8571 |\n| cosine_precision@1 | 0.6329 |\n| cosine_precision@3 | 0.259 |\n| cosine_precision@5 | 0.1634 |\n| cosine_precision@10 | 0.0857 |\n| cosine_recall@1 | 0.6329 |\n| cosine_recall@3 | 0.7771 |\n| cosine_recall@5 | 0.8171 |\n| cosine_recall@10 | 0.8571 |\n| cosine_ndcg@10 | 0.7483 |\n| cosine_mrr@10 | 0.7131 |\n| **cosine_map@100** | **0.719** |\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 6,300 training samples\n* Columns: positive and anchor\n* Approximate statistics based on the first 1000 samples:\n | | positive | anchor |\n |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 2 tokens
  • mean: 46.19 tokens
  • max: 371 tokens
|
  • min: 2 tokens
  • mean: 20.39 tokens
  • max: 46 tokens
|\n* Samples:\n | positive | anchor |\n |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------|\n | Cash used in financing activities in fiscal 2022 was primarily attributable to settlement of stock-based awards. | Why was there a net outflow of cash in financing activities in fiscal 2022? |\n | Certain vendors have been impacted by volatility in the supply chain financing market. | How have certain vendors been impacted in the supply chain financing market? |\n | In the consolidated financial statements for Visa, the net cash provided by operating activities amounted to 20,755 units in the most recent period, 18,849 units in the previous period, and 15,227 units in the period before that. | How much net cash did Visa's operating activities generate in the most recent period according to the financial statements? |\n* Loss: [MatryoshkaLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters:\n ```json\n {\n \"loss\": \"MultipleNegativesRankingLoss\",\n \"matryoshka_dims\": [\n 768,\n 512,\n 256,\n 128,\n 64\n ],\n \"matryoshka_weights\": [\n 1,\n 1,\n 1,\n 1,\n 1\n ],\n \"n_dims_per_step\": -1\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: epoch\n- `per_device_train_batch_size`: 32\n- `per_device_eval_batch_size`: 16\n- `gradient_accumulation_steps`: 16\n- `learning_rate`: 2e-05\n- `num_train_epochs`: 4\n- `lr_scheduler_type`: cosine\n- `warmup_ratio`: 0.1\n- `bf16`: True\n- `tf32`: True\n- `optim`: adamw_torch_fused\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: epoch\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 32\n- `per_device_eval_batch_size`: 16\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 16\n- `eval_accumulation_steps`: None\n- `learning_rate`: 2e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 4\n- `max_steps`: -1\n- `lr_scheduler_type`: cosine\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: True\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: True\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch_fused\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 |\n|:------:|:----:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:|\n| 0.8122 | 10 | 1.5643 | - | - | - | - | - |\n| 0.9746 | 12 | - | 0.7349 | 0.7494 | 0.7524 | 0.6987 | 0.7569 |\n| 1.6244 | 20 | 0.6756 | - | - | - | - | - |\n| 1.9492 | 24 | - | 0.7555 | 0.7659 | 0.7683 | 0.7190 | 0.7700 |\n| 2.4365 | 30 | 0.4561 | - | - | - | - | - |\n| 2.9239 | 36 | - | 0.7592 | 0.7698 | 0.7698 | 0.7184 | 0.7741 |\n| 3.2487 | 40 | 0.3645 | - | - | - | - | - |\n| 3.8985 | 48 | - | 0.7610 | 0.7718 | 0.7707 | 0.7190 | 0.7759 |\n\n\n### Framework Versions\n- Python: 3.10.12\n- Sentence Transformers: 3.0.1\n- Transformers: 4.41.2\n- PyTorch: 2.2.0+cu121\n- Accelerate: 0.31.0\n- Datasets: 2.19.1\n- Tokenizers: 0.19.1\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### MatryoshkaLoss\n```bibtex\n@misc{kusupati2024matryoshka,\n title={Matryoshka Representation Learning}, \n author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi},\n year={2024},\n eprint={2205.13147},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n```\n\n#### MultipleNegativesRankingLoss\n```bibtex\n@misc{henderson2017efficient,\n title={Efficient Natural Language Response Suggestion for Smart Reply}, \n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\n year={2017},\n eprint={1705.00652},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# BGE base Financial Matryoshka\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n- **Language:** en\n- **License:** apache-2.0\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n (2): Normalize()\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"NickyNicky/bge-base-financial-matryoshka\")\n# Run inference\nsentences = [\n 'For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores.',\n \"What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022?\",\n \"What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse?\",\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n## Evaluation\n\n### Metrics\n\n#### Information Retrieval\n* Dataset: `dim_768`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| cosine_accuracy@1 | 0.6986 |\n| cosine_accuracy@3 | 0.8271 |\n| cosine_accuracy@5 | 0.8629 |\n| cosine_accuracy@10 | 0.8986 |\n| cosine_precision@1 | 0.6986 |\n| cosine_precision@3 | 0.2757 |\n| cosine_precision@5 | 0.1726 |\n| cosine_precision@10 | 0.0899 |\n| cosine_recall@1 | 0.6986 |\n| cosine_recall@3 | 0.8271 |\n| cosine_recall@5 | 0.8629 |\n| cosine_recall@10 | 0.8986 |\n| cosine_ndcg@10 | 0.8024 |\n| cosine_mrr@10 | 0.7713 |\n| **cosine_map@100** | **0.7759** |\n\n#### Information Retrieval\n* Dataset: `dim_512`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| cosine_accuracy@1 | 0.69 |\n| cosine_accuracy@3 | 0.8271 |\n| cosine_accuracy@5 | 0.86 |\n| cosine_accuracy@10 | 0.9029 |\n| cosine_precision@1 | 0.69 |\n| cosine_precision@3 | 0.2757 |\n| cosine_precision@5 | 0.172 |\n| cosine_precision@10 | 0.0903 |\n| cosine_recall@1 | 0.69 |\n| cosine_recall@3 | 0.8271 |\n| cosine_recall@5 | 0.86 |\n| cosine_recall@10 | 0.9029 |\n| cosine_ndcg@10 | 0.7999 |\n| cosine_mrr@10 | 0.7666 |\n| **cosine_map@100** | **0.7707** |\n\n#### Information Retrieval\n* Dataset: `dim_256`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| cosine_accuracy@1 | 0.6957 |\n| cosine_accuracy@3 | 0.8229 |\n| cosine_accuracy@5 | 0.86 |\n| cosine_accuracy@10 | 0.8914 |\n| cosine_precision@1 | 0.6957 |\n| cosine_precision@3 | 0.2743 |\n| cosine_precision@5 | 0.172 |\n| cosine_precision@10 | 0.0891 |\n| cosine_recall@1 | 0.6957 |\n| cosine_recall@3 | 0.8229 |\n| cosine_recall@5 | 0.86 |\n| cosine_recall@10 | 0.8914 |\n| cosine_ndcg@10 | 0.7975 |\n| cosine_mrr@10 | 0.767 |\n| **cosine_map@100** | **0.7718** |\n\n#### Information Retrieval\n* Dataset: `dim_128`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:----------|\n| cosine_accuracy@1 | 0.6871 |\n| cosine_accuracy@3 | 0.8129 |\n| cosine_accuracy@5 | 0.8457 |\n| cosine_accuracy@10 | 0.8857 |\n| cosine_precision@1 | 0.6871 |\n| cosine_precision@3 | 0.271 |\n| cosine_precision@5 | 0.1691 |\n| cosine_precision@10 | 0.0886 |\n| cosine_recall@1 | 0.6871 |\n| cosine_recall@3 | 0.8129 |\n| cosine_recall@5 | 0.8457 |\n| cosine_recall@10 | 0.8857 |\n| cosine_ndcg@10 | 0.7877 |\n| cosine_mrr@10 | 0.7562 |\n| **cosine_map@100** | **0.761** |\n\n#### Information Retrieval\n* Dataset: `dim_64`\n* Evaluated with [InformationRetrievalEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator)\n\n| Metric | Value |\n|:--------------------|:----------|\n| cosine_accuracy@1 | 0.6329 |\n| cosine_accuracy@3 | 0.7771 |\n| cosine_accuracy@5 | 0.8171 |\n| cosine_accuracy@10 | 0.8571 |\n| cosine_precision@1 | 0.6329 |\n| cosine_precision@3 | 0.259 |\n| cosine_precision@5 | 0.1634 |\n| cosine_precision@10 | 0.0857 |\n| cosine_recall@1 | 0.6329 |\n| cosine_recall@3 | 0.7771 |\n| cosine_recall@5 | 0.8171 |\n| cosine_recall@10 | 0.8571 |\n| cosine_ndcg@10 | 0.7483 |\n| cosine_mrr@10 | 0.7131 |\n| **cosine_map@100** | **0.719** |\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 6,300 training samples\n* Columns: positive and anchor\n* Approximate statistics based on the first 1000 samples:\n | | positive | anchor |\n |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 2 tokens
  • mean: 46.19 tokens
  • max: 371 tokens
|
  • min: 2 tokens
  • mean: 20.39 tokens
  • max: 46 tokens
|\n* Samples:\n | positive | anchor |\n |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------|\n | Cash used in financing activities in fiscal 2022 was primarily attributable to settlement of stock-based awards. | Why was there a net outflow of cash in financing activities in fiscal 2022? |\n | Certain vendors have been impacted by volatility in the supply chain financing market. | How have certain vendors been impacted in the supply chain financing market? |\n | In the consolidated financial statements for Visa, the net cash provided by operating activities amounted to 20,755 units in the most recent period, 18,849 units in the previous period, and 15,227 units in the period before that. | How much net cash did Visa's operating activities generate in the most recent period according to the financial statements? |\n* Loss: [MatryoshkaLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters:\n ```json\n {\n \"loss\": \"MultipleNegativesRankingLoss\",\n \"matryoshka_dims\": [\n 768,\n 512,\n 256,\n 128,\n 64\n ],\n \"matryoshka_weights\": [\n 1,\n 1,\n 1,\n 1,\n 1\n ],\n \"n_dims_per_step\": -1\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: epoch\n- `per_device_train_batch_size`: 32\n- `per_device_eval_batch_size`: 16\n- `gradient_accumulation_steps`: 16\n- `learning_rate`: 2e-05\n- `num_train_epochs`: 4\n- `lr_scheduler_type`: cosine\n- `warmup_ratio`: 0.1\n- `bf16`: True\n- `tf32`: True\n- `optim`: adamw_torch_fused\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: epoch\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 32\n- `per_device_eval_batch_size`: 16\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 16\n- `eval_accumulation_steps`: None\n- `learning_rate`: 2e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 4\n- `max_steps`: -1\n- `lr_scheduler_type`: cosine\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: True\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: True\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch_fused\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 |\n|:------:|:----:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:|\n| 0.8122 | 10 | 1.5643 | - | - | - | - | - |\n| 0.9746 | 12 | - | 0.7349 | 0.7494 | 0.7524 | 0.6987 | 0.7569 |\n| 1.6244 | 20 | 0.6756 | - | - | - | - | - |\n| 1.9492 | 24 | - | 0.7555 | 0.7659 | 0.7683 | 0.7190 | 0.7700 |\n| 2.4365 | 30 | 0.4561 | - | - | - | - | - |\n| 2.9239 | 36 | - | 0.7592 | 0.7698 | 0.7698 | 0.7184 | 0.7741 |\n| 3.2487 | 40 | 0.3645 | - | - | - | - | - |\n| 3.8985 | 48 | - | 0.7610 | 0.7718 | 0.7707 | 0.7190 | 0.7759 |\n\n\n### Framework Versions\n- Python: 3.10.12\n- Sentence Transformers: 3.0.1\n- Transformers: 4.41.2\n- PyTorch: 2.2.0+cu121\n- Accelerate: 0.31.0\n- Datasets: 2.19.1\n- Tokenizers: 0.19.1\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### MatryoshkaLoss\n```bibtex\n@misc{kusupati2024matryoshka,\n title={Matryoshka Representation Learning}, \n author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi},\n year={2024},\n eprint={2205.13147},\n archivePrefix={arXiv},\n primaryClass={cs.LG}\n}\n```\n\n#### MultipleNegativesRankingLoss\n```bibtex\n@misc{henderson2017efficient,\n title={Efficient Natural Language Response Suggestion for Smart Reply}, \n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\n year={2017},\n eprint={1705.00652},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"BAAI/bge-base-en-v1.5\", \"datasets\": [], \"language\": [\"en\"], \"library_name\": \"sentence-transformers\", \"license\": \"apache-2.0\", \"metrics\": [\"cosine_accuracy@1\", \"cosine_accuracy@3\", \"cosine_accuracy@5\", \"cosine_accuracy@10\", \"cosine_precision@1\", \"cosine_precision@3\", \"cosine_precision@5\", \"cosine_precision@10\", \"cosine_recall@1\", \"cosine_recall@3\", \"cosine_recall@5\", \"cosine_recall@10\", \"cosine_ndcg@10\", \"cosine_mrr@10\", \"cosine_map@100\"], \"pipeline_tag\": \"sentence-similarity\", \"tags\": [\"sentence-transformers\", \"sentence-similarity\", \"feature-extraction\", \"generated_from_trainer\", \"dataset_size:6300\", \"loss:MatryoshkaLoss\", \"loss:MultipleNegativesRankingLoss\"], \"widget\": [{\"source_sentence\": \"Item 8 in IBM's 2023 Annual Report to Stockholders details the Financial Statements and Supplementary Data, which are included on pages 44 through 121.\", \"sentences\": [\"What was the amount gained from the disposal of assets in 2022?\", \"What section of IBM's Annual Report for 2023 contains the Financial Statements and Supplementary Data?\", \"What were the cash outflows for capital expenditures in 2023 and 2022 respectively?\"]}, {\"source_sentence\": \"For the fiscal year ended March 31, 2023, Electronic Arts reported a gross margin of 75.9 percent, an increase of 2.5 percentage points from the previous year.\", \"sentences\": [\"How did investment banking revenues at Goldman Sachs change in 2023 compared to 2022, and what factors contributed to this change?\", \"What was the gross margin percentage for Electronic Arts in the fiscal year ending March 31, 2023?\", \"What were the risk-free interest rates for the fiscal years 2021, 2022, and 2023?\"]}, {\"source_sentence\": \"Cash, cash equivalents, and restricted cash at the beginning of the period totaled $7,013 for a company.\", \"sentences\": [\"What was the amount of cash, cash equivalents, and restricted cash at the beginning of the period for the company?\", \"What is the impact of the new $1.25 price point on Dollar Tree’s sales units and profitability?\", \"What was the total amount attributed to Goodwill in the acquisition of Nuance Communications, Inc. as reported by the company?\"]}, {\"source_sentence\": \"generate our mall revenue primarily from leases with tenants through base minimum rents, overage rents and reimbursements for common area maintenance (CAM) and other expenditures.\", \"sentences\": [\"How does Visa facilitate financial inclusion with their prepaid cards?\", \"What are the main objectives of the economic sanctions imposed by the United States and other international bodies?\", \"What revenue sources does Shoppes at Venetian primarily rely on from its tenants?\"]}, {\"source_sentence\": \"For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores.\", \"sentences\": [\"What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022?\", \"What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse?\", \"How much did GameStop Corp's valuation allowances increase during fiscal 2022?\"]}], \"model-index\": [{\"name\": \"BGE base Financial Matryoshka\", \"results\": [{\"task\": {\"type\": \"information-retrieval\", \"name\": \"Information Retrieval\"}, \"dataset\": {\"name\": \"dim 768\", \"type\": \"dim_768\"}, \"metrics\": [{\"type\": \"cosine_accuracy@1\", \"value\": 0.6985714285714286, \"name\": \"Cosine Accuracy@1\"}, {\"type\": \"cosine_accuracy@3\", \"value\": 0.8271428571428572, \"name\": \"Cosine Accuracy@3\"}, {\"type\": \"cosine_accuracy@5\", \"value\": 0.8628571428571429, \"name\": \"Cosine Accuracy@5\"}, {\"type\": \"cosine_accuracy@10\", \"value\": 0.8985714285714286, \"name\": \"Cosine Accuracy@10\"}, {\"type\": \"cosine_precision@1\", \"value\": 0.6985714285714286, \"name\": \"Cosine Precision@1\"}, {\"type\": \"cosine_precision@3\", \"value\": 0.2757142857142857, \"name\": \"Cosine Precision@3\"}, {\"type\": \"cosine_precision@5\", \"value\": 0.17257142857142854, \"name\": \"Cosine Precision@5\"}, {\"type\": \"cosine_precision@10\", \"value\": 0.08985714285714284, \"name\": \"Cosine Precision@10\"}, {\"type\": \"cosine_recall@1\", \"value\": 0.6985714285714286, \"name\": \"Cosine Recall@1\"}, {\"type\": \"cosine_recall@3\", \"value\": 0.8271428571428572, \"name\": \"Cosine Recall@3\"}, {\"type\": \"cosine_recall@5\", \"value\": 0.8628571428571429, \"name\": \"Cosine Recall@5\"}, {\"type\": \"cosine_recall@10\", \"value\": 0.8985714285714286, \"name\": \"Cosine Recall@10\"}, {\"type\": \"cosine_ndcg@10\", \"value\": 0.8023663256793517, \"name\": \"Cosine Ndcg@10\"}, {\"type\": \"cosine_mrr@10\", \"value\": 0.7712675736961451, \"name\": \"Cosine Mrr@10\"}, {\"type\": \"cosine_map@100\", \"value\": 0.7758522351159084, \"name\": \"Cosine Map@100\"}]}, {\"task\": {\"type\": \"information-retrieval\", \"name\": \"Information Retrieval\"}, \"dataset\": {\"name\": \"dim 512\", \"type\": \"dim_512\"}, \"metrics\": [{\"type\": \"cosine_accuracy@1\", \"value\": 0.69, \"name\": \"Cosine Accuracy@1\"}, {\"type\": \"cosine_accuracy@3\", \"value\": 0.8271428571428572, \"name\": \"Cosine Accuracy@3\"}, {\"type\": \"cosine_accuracy@5\", \"value\": 0.86, \"name\": \"Cosine Accuracy@5\"}, {\"type\": \"cosine_accuracy@10\", \"value\": 0.9028571428571428, \"name\": \"Cosine Accuracy@10\"}, {\"type\": \"cosine_precision@1\", \"value\": 0.69, \"name\": \"Cosine Precision@1\"}, {\"type\": \"cosine_precision@3\", \"value\": 0.2757142857142857, \"name\": \"Cosine Precision@3\"}, {\"type\": \"cosine_precision@5\", \"value\": 0.17199999999999996, \"name\": \"Cosine Precision@5\"}, {\"type\": \"cosine_precision@10\", \"value\": 0.09028571428571427, \"name\": \"Cosine Precision@10\"}, {\"type\": \"cosine_recall@1\", \"value\": 0.69, \"name\": \"Cosine Recall@1\"}, {\"type\": \"cosine_recall@3\", \"value\": 0.8271428571428572, \"name\": \"Cosine Recall@3\"}, {\"type\": \"cosine_recall@5\", \"value\": 0.86, \"name\": \"Cosine Recall@5\"}, {\"type\": \"cosine_recall@10\", \"value\": 0.9028571428571428, \"name\": \"Cosine Recall@10\"}, {\"type\": \"cosine_ndcg@10\", \"value\": 0.7998655910794988, \"name\": \"Cosine Ndcg@10\"}, {\"type\": \"cosine_mrr@10\", \"value\": 0.7665912698412698, \"name\": \"Cosine Mrr@10\"}, {\"type\": \"cosine_map@100\", \"value\": 0.7706925401671437, \"name\": \"Cosine Map@100\"}]}, {\"task\": {\"type\": \"information-retrieval\", \"name\": \"Information Retrieval\"}, \"dataset\": {\"name\": \"dim 256\", \"type\": \"dim_256\"}, \"metrics\": [{\"type\": \"cosine_accuracy@1\", \"value\": 0.6957142857142857, \"name\": \"Cosine Accuracy@1\"}, {\"type\": \"cosine_accuracy@3\", \"value\": 0.8228571428571428, \"name\": \"Cosine Accuracy@3\"}, {\"type\": \"cosine_accuracy@5\", \"value\": 0.86, \"name\": \"Cosine Accuracy@5\"}, {\"type\": \"cosine_accuracy@10\", \"value\": 0.8914285714285715, \"name\": \"Cosine Accuracy@10\"}, {\"type\": \"cosine_precision@1\", \"value\": 0.6957142857142857, \"name\": \"Cosine Precision@1\"}, {\"type\": \"cosine_precision@3\", \"value\": 0.2742857142857143, \"name\": \"Cosine Precision@3\"}, {\"type\": \"cosine_precision@5\", \"value\": 0.17199999999999996, \"name\": \"Cosine Precision@5\"}, {\"type\": \"cosine_precision@10\", \"value\": 0.08914285714285713, \"name\": \"Cosine Precision@10\"}, {\"type\": \"cosine_recall@1\", \"value\": 0.6957142857142857, \"name\": \"Cosine Recall@1\"}, {\"type\": \"cosine_recall@3\", \"value\": 0.8228571428571428, \"name\": \"Cosine Recall@3\"}, {\"type\": \"cosine_recall@5\", \"value\": 0.86, \"name\": \"Cosine Recall@5\"}, {\"type\": \"cosine_recall@10\", \"value\": 0.8914285714285715, \"name\": \"Cosine Recall@10\"}, {\"type\": \"cosine_ndcg@10\", \"value\": 0.7974564108711016, \"name\": \"Cosine Ndcg@10\"}, {\"type\": \"cosine_mrr@10\", \"value\": 0.7669535147392289, \"name\": \"Cosine Mrr@10\"}, {\"type\": \"cosine_map@100\", \"value\": 0.7718155211819018, \"name\": \"Cosine Map@100\"}]}, {\"task\": {\"type\": \"information-retrieval\", \"name\": \"Information Retrieval\"}, \"dataset\": {\"name\": \"dim 128\", \"type\": \"dim_128\"}, \"metrics\": [{\"type\": \"cosine_accuracy@1\", \"value\": 0.6871428571428572, \"name\": \"Cosine Accuracy@1\"}, {\"type\": \"cosine_accuracy@3\", \"value\": 0.8128571428571428, \"name\": \"Cosine Accuracy@3\"}, {\"type\": \"cosine_accuracy@5\", \"value\": 0.8457142857142858, \"name\": \"Cosine Accuracy@5\"}, {\"type\": \"cosine_accuracy@10\", \"value\": 0.8857142857142857, \"name\": \"Cosine Accuracy@10\"}, {\"type\": \"cosine_precision@1\", \"value\": 0.6871428571428572, \"name\": \"Cosine Precision@1\"}, {\"type\": \"cosine_precision@3\", \"value\": 0.27095238095238094, \"name\": \"Cosine Precision@3\"}, {\"type\": \"cosine_precision@5\", \"value\": 0.16914285714285712, \"name\": \"Cosine Precision@5\"}, {\"type\": \"cosine_precision@10\", \"value\": 0.08857142857142856, \"name\": \"Cosine Precision@10\"}, {\"type\": \"cosine_recall@1\", \"value\": 0.6871428571428572, \"name\": \"Cosine Recall@1\"}, {\"type\": \"cosine_recall@3\", \"value\": 0.8128571428571428, \"name\": \"Cosine Recall@3\"}, {\"type\": \"cosine_recall@5\", \"value\": 0.8457142857142858, \"name\": \"Cosine Recall@5\"}, {\"type\": \"cosine_recall@10\", \"value\": 0.8857142857142857, \"name\": \"Cosine Recall@10\"}, {\"type\": \"cosine_ndcg@10\", \"value\": 0.787697533881839, \"name\": \"Cosine Ndcg@10\"}, {\"type\": \"cosine_mrr@10\", \"value\": 0.756192743764172, \"name\": \"Cosine Mrr@10\"}, {\"type\": \"cosine_map@100\", \"value\": 0.7610331995977764, \"name\": \"Cosine Map@100\"}]}, {\"task\": {\"type\": \"information-retrieval\", \"name\": \"Information Retrieval\"}, \"dataset\": {\"name\": \"dim 64\", \"type\": \"dim_64\"}, \"metrics\": [{\"type\": \"cosine_accuracy@1\", \"value\": 0.6328571428571429, \"name\": \"Cosine Accuracy@1\"}, {\"type\": \"cosine_accuracy@3\", \"value\": 0.7771428571428571, \"name\": \"Cosine Accuracy@3\"}, {\"type\": \"cosine_accuracy@5\", \"value\": 0.8171428571428572, \"name\": \"Cosine Accuracy@5\"}, {\"type\": \"cosine_accuracy@10\", \"value\": 0.8571428571428571, \"name\": \"Cosine Accuracy@10\"}, {\"type\": \"cosine_precision@1\", \"value\": 0.6328571428571429, \"name\": \"Cosine Precision@1\"}, {\"type\": \"cosine_precision@3\", \"value\": 0.259047619047619, \"name\": \"Cosine Precision@3\"}, {\"type\": \"cosine_precision@5\", \"value\": 0.16342857142857142, \"name\": \"Cosine Precision@5\"}, {\"type\": \"cosine_precision@10\", \"value\": 0.08571428571428569, \"name\": \"Cosine Precision@10\"}, {\"type\": \"cosine_recall@1\", \"value\": 0.6328571428571429, \"name\": \"Cosine Recall@1\"}, {\"type\": \"cosine_recall@3\", \"value\": 0.7771428571428571, \"name\": \"Cosine Recall@3\"}, {\"type\": \"cosine_recall@5\", \"value\": 0.8171428571428572, \"name\": \"Cosine Recall@5\"}, {\"type\": \"cosine_recall@10\", \"value\": 0.8571428571428571, \"name\": \"Cosine Recall@10\"}, {\"type\": \"cosine_ndcg@10\", \"value\": 0.7482728321357093, \"name\": \"Cosine Ndcg@10\"}, {\"type\": \"cosine_mrr@10\", \"value\": 0.7131224489795914, \"name\": \"Cosine Mrr@10\"}, {\"type\": \"cosine_map@100\", \"value\": 0.7189753431460272, \"name\": \"Cosine Map@100\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45540,"string":"45,540"}}},{"rowIdx":43764,"cells":{"id":{"kind":"string","value":"gaudi/opus-mt-fi-pis-ctranslate2"},"author":{"kind":"string","value":"gaudi"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","marian","ctranslate2","translation","license:apache-2.0","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"marian\",\n \"ctranslate2\",\n \"translation\",\n \"license:apache-2.0\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-22T15:53:42Z","string":"2024-07-22T15:53:42Z"},"last_modified":{"kind":"string","value":"2024-10-19T03:51:27+00:00"},"downloads":{"kind":"number","value":6,"string":"6"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- ctranslate2\n- translation\n---\n# Repository General Information\n## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)!\n- Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis)\n- This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2).\n- This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil).\n\n# What is CTranslate2?\n[CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models.\n\nCTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU.\n\nCTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include:\n- Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper\n- Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon\n- Encoder-only models: BERT, DistilBERT, XLM-RoBERTa\n\nThe project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration.\n\n# CTranslate2 Benchmarks\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset.\n\nThe benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers.\n\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings.\n\n## CPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 |\n| Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 |\n| Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 |\n| CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 |\n| CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 |\n\n## GPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 |\n| Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 |\n| CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 |\n| CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 |\n\n`Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.`\n\n**Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**
\n**Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis).**\n\n## Internal Benchmarks\nInternal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality.\n\n\n# CTranslate2 Installation\n```bash\npip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0\n```\n### ct2-transformers-converter Command Used:\n```bash\nct2-transformers-converter --model Helsinki-NLP/opus-mt-fi-pis --output_dir ./ctranslate2/opus-mt-fi-pis-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16\n```\n# CTranslate2 Converted Checkpoint Information:\n**Compatible With:**\n- [ctranslate2](https://github.com/OpenNMT/CTranslate2)\n- [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2)\n\n**Compute Type:**\n- `compute_type=int8_float16` for `device=\"cuda\"`\n- `compute_type=int8` for `device=\"cpu\"`\n\n# Sample Code - ctranslate2\n#### Clone the repository to the working directory or wherever you wish to store the model artifacts. ####\n```bash\ngit clone https://huggingface.co/gaudi/opus-mt-fi-pis-ctranslate2\n```\n#### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. ####\n```python\nfrom ctranslate2 import Translator\nimport transformers\n\nmodel_dir = \"./opus-mt-fi-pis-ctranslate2\" # Path to model directory.\ntranslator = Translator(\n model_path=model_dir,\n device=\"cuda\", # cpu, cuda, or auto.\n inter_threads=1, # Maximum number of parallel translations.\n intra_threads=4, # Number of OpenMP threads per translator.\n compute_type=\"int8_float16\", # int8 for cpu or int8_float16 for cuda.\n)\n\ntokenizer = transformers.AutoTokenizer.from_pretrained(model_dir)\n\nsource = tokenizer.convert_ids_to_tokens(tokenizer.encode(\"XXXXXX, XXX XX XXXXXX.\"))\nresults = translator.translate_batch([source])\ntarget = results[0].hypotheses[0]\n\nprint(tokenizer.decode(tokenizer.convert_tokens_to_ids(target)))\n```\n# Sample Code - hf-hub-ctranslate2\n**Derived From [michaelfeil](https://huggingface.co/michaelfeil):**\n```python\nfrom hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub\nfrom transformers import AutoTokenizer\n\nmodel_name = \"gaudi/opus-mt-fi-pis-ctranslate2\"\nmodel = TranslatorCT2fromHfHub(\n model_name_or_path=model_name,\n device=\"cuda\",\n compute_type=\"int8_float16\",\n tokenizer=AutoTokenizer.from_pretrained(model_name)\n)\noutputs = model.generate(\n text=[\"XXX XX XXX XXXXXXX XXXX?\", \"XX XX XXXX XX XXX!\"],\n)\nprint(outputs)\n```\n# License and other remarks:\nLicense conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis) by Helsinki-NLP.\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"# Repository General Information\n## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)!\n- Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis)\n- This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2).\n- This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil).\n\n# What is CTranslate2?\n[CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models.\n\nCTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU.\n\nCTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include:\n- Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper\n- Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon\n- Encoder-only models: BERT, DistilBERT, XLM-RoBERTa\n\nThe project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration.\n\n# CTranslate2 Benchmarks\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset.\n\nThe benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers.\n\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings.\n\n## CPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 |\n| Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 |\n| Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 |\n| CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 |\n| CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 |\n\n## GPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 |\n| Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 |\n| CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 |\n| CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 |\n\n`Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.`\n\n**Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**
\n**Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis).**\n\n## Internal Benchmarks\nInternal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality.\n\n\n# CTranslate2 Installation\n```bash\npip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0\n```\n### ct2-transformers-converter Command Used:\n```bash\nct2-transformers-converter --model Helsinki-NLP/opus-mt-fi-pis --output_dir ./ctranslate2/opus-mt-fi-pis-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16\n```\n# CTranslate2 Converted Checkpoint Information:\n**Compatible With:**\n- [ctranslate2](https://github.com/OpenNMT/CTranslate2)\n- [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2)\n\n**Compute Type:**\n- `compute_type=int8_float16` for `device=\"cuda\"`\n- `compute_type=int8` for `device=\"cpu\"`\n\n# Sample Code - ctranslate2\n#### Clone the repository to the working directory or wherever you wish to store the model artifacts. ####\n```bash\ngit clone https://huggingface.co/gaudi/opus-mt-fi-pis-ctranslate2\n```\n#### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. ####\n```python\nfrom ctranslate2 import Translator\nimport transformers\n\nmodel_dir = \"./opus-mt-fi-pis-ctranslate2\" # Path to model directory.\ntranslator = Translator(\n model_path=model_dir,\n device=\"cuda\", # cpu, cuda, or auto.\n inter_threads=1, # Maximum number of parallel translations.\n intra_threads=4, # Number of OpenMP threads per translator.\n compute_type=\"int8_float16\", # int8 for cpu or int8_float16 for cuda.\n)\n\ntokenizer = transformers.AutoTokenizer.from_pretrained(model_dir)\n\nsource = tokenizer.convert_ids_to_tokens(tokenizer.encode(\"XXXXXX, XXX XX XXXXXX.\"))\nresults = translator.translate_batch([source])\ntarget = results[0].hypotheses[0]\n\nprint(tokenizer.decode(tokenizer.convert_tokens_to_ids(target)))\n```\n# Sample Code - hf-hub-ctranslate2\n**Derived From [michaelfeil](https://huggingface.co/michaelfeil):**\n```python\nfrom hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub\nfrom transformers import AutoTokenizer\n\nmodel_name = \"gaudi/opus-mt-fi-pis-ctranslate2\"\nmodel = TranslatorCT2fromHfHub(\n model_name_or_path=model_name,\n device=\"cuda\",\n compute_type=\"int8_float16\",\n tokenizer=AutoTokenizer.from_pretrained(model_name)\n)\noutputs = model.generate(\n text=[\"XXX XX XXX XXXXXXX XXXX?\", \"XX XX XXXX XX XXX!\"],\n)\nprint(outputs)\n```\n# License and other remarks:\nLicense conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis) by Helsinki-NLP.\n"},"metadata":{"kind":"string","value":"{\"license\": \"apache-2.0\", \"tags\": [\"ctranslate2\", \"translation\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45541,"string":"45,541"}}},{"rowIdx":43765,"cells":{"id":{"kind":"string","value":"lddczcn/distilbert-base-uncased-finetuned-emotion"},"author":{"kind":"string","value":"lddczcn"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","distilbert","text-classification","generated_from_trainer","dataset:emotion","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:emotion\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-09-07T23:39:00Z","string":"2022-09-07T23:39:00Z"},"last_modified":{"kind":"string","value":"2022-09-08T00:29:20+00:00"},"downloads":{"kind":"number","value":12,"string":"12"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- emotion\nlicense: apache-2.0\nmetrics:\n- accuracy\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-emotion\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: emotion\n type: emotion\n args: default\n metrics:\n - type: accuracy\n value: 0.9265\n name: Accuracy\n - type: f1\n value: 0.9265519473019482\n name: F1\n---\n\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2155\n- Accuracy: 0.9265\n- F1: 0.9266\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| No log | 1.0 | 250 | 0.3133 | 0.9075 | 0.9054 |\n| No log | 2.0 | 500 | 0.2155 | 0.9265 | 0.9266 |\n\n\n### Framework versions\n\n- Transformers 4.13.0\n- Pytorch 1.12.1+cu113\n- Datasets 1.16.1\n- Tokenizers 0.10.3\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2155\n- Accuracy: 0.9265\n- F1: 0.9266\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| No log | 1.0 | 250 | 0.3133 | 0.9075 | 0.9054 |\n| No log | 2.0 | 500 | 0.2155 | 0.9265 | 0.9266 |\n\n\n### Framework versions\n\n- Transformers 4.13.0\n- Pytorch 1.12.1+cu113\n- Datasets 1.16.1\n- Tokenizers 0.10.3\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"emotion\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\", \"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-emotion\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"emotion\", \"type\": \"emotion\", \"args\": \"default\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.9265, \"name\": \"Accuracy\"}, {\"type\": \"f1\", \"value\": 0.9265519473019482, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45542,"string":"45,542"}}},{"rowIdx":43766,"cells":{"id":{"kind":"string","value":"HuggingFaceTB/SmolVLM2-2.2B-Instruct"},"author":{"kind":"string","value":"HuggingFaceTB"},"task_category":{"kind":"string","value":"image-text-to-text"},"tags":{"kind":"list like","value":["transformers","safetensors","smolvlm","image-text-to-text","video-text-to-text","conversational","en","dataset:HuggingFaceM4/the_cauldron","dataset:HuggingFaceM4/Docmatix","dataset:lmms-lab/LLaVA-OneVision-Data","dataset:lmms-lab/M4-Instruct-Data","dataset:HuggingFaceFV/finevideo","dataset:MAmmoTH-VL/MAmmoTH-VL-Instruct-12M","dataset:lmms-lab/LLaVA-Video-178K","dataset:orrzohar/Video-STaR","dataset:Mutonix/Vript","dataset:TIGER-Lab/VISTA-400K","dataset:Enxin/MovieChat-1K_train","dataset:ShareGPT4Video/ShareGPT4Video","base_model:HuggingFaceTB/SmolVLM-Instruct","base_model:finetune:HuggingFaceTB/SmolVLM-Instruct","license:apache-2.0","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"smolvlm\",\n \"image-text-to-text\",\n \"video-text-to-text\",\n \"conversational\",\n \"en\",\n \"dataset:HuggingFaceM4/the_cauldron\",\n \"dataset:HuggingFaceM4/Docmatix\",\n \"dataset:lmms-lab/LLaVA-OneVision-Data\",\n \"dataset:lmms-lab/M4-Instruct-Data\",\n \"dataset:HuggingFaceFV/finevideo\",\n \"dataset:MAmmoTH-VL/MAmmoTH-VL-Instruct-12M\",\n \"dataset:lmms-lab/LLaVA-Video-178K\",\n \"dataset:orrzohar/Video-STaR\",\n \"dataset:Mutonix/Vript\",\n \"dataset:TIGER-Lab/VISTA-400K\",\n \"dataset:Enxin/MovieChat-1K_train\",\n \"dataset:ShareGPT4Video/ShareGPT4Video\",\n \"base_model:HuggingFaceTB/SmolVLM-Instruct\",\n \"base_model:finetune:HuggingFaceTB/SmolVLM-Instruct\",\n \"license:apache-2.0\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-02-08T19:18:45Z","string":"2025-02-08T19:18:45Z"},"last_modified":{"kind":"string","value":"2025-04-08T07:25:13+00:00"},"downloads":{"kind":"number","value":506227,"string":"506,227"},"likes":{"kind":"number","value":112,"string":"112"},"README":{"kind":"string","value":"---\nbase_model:\n- HuggingFaceTB/SmolVLM-Instruct\ndatasets:\n- HuggingFaceM4/the_cauldron\n- HuggingFaceM4/Docmatix\n- lmms-lab/LLaVA-OneVision-Data\n- lmms-lab/M4-Instruct-Data\n- HuggingFaceFV/finevideo\n- MAmmoTH-VL/MAmmoTH-VL-Instruct-12M\n- lmms-lab/LLaVA-Video-178K\n- orrzohar/Video-STaR\n- Mutonix/Vript\n- TIGER-Lab/VISTA-400K\n- Enxin/MovieChat-1K_train\n- ShareGPT4Video/ShareGPT4Video\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: image-text-to-text\ntags:\n- video-text-to-text\n---\n\n\n\"Image\n\n# SmolVLM2 2.2B\n\nSmolVLM2-2.2B is a lightweight multimodal model designed to analyze video content. The model processes videos, images, and text inputs to generate text outputs - whether answering questions about media files, comparing visual content, or transcribing text from images. Despite its compact size, requiring only 5.2GB of GPU RAM for video inference, it delivers robust performance on complex multimodal tasks. This efficiency makes it particularly well-suited for on-device applications where computational resources may be limited.\n## Model Summary\n\n- **Developed by:** Hugging Face 🤗\n- **Model type:** Multi-modal model (image/multi-image/video/text)\n- **Language(s) (NLP):** English\n- **License:** Apache 2.0\n- **Architecture:** Based on [Idefics3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) (see technical summary)\n\n## Resources\n\n- **Demo:** [Video Highlight Generator](https://huggingface.co/spaces/HuggingFaceTB/SmolVLM2-HighlightGenerator)\n- **Blog:** [Blog post](https://huggingface.co/blog/smolvlm2)\n\n\n## Uses\n\n\nSmolVLM2 can be used for inference on multimodal (video / image / text) tasks where the input consists of text queries along with video or one or more images. Text and media files can be interleaved arbitrarily, enabling tasks like captioning, visual question answering, and storytelling based on visual content. The model does not support image or video generation.\n\nTo fine-tune SmolVLM2 on a specific task, you can follow [the fine-tuning tutorial](https://github.com/huggingface/smollm/blob/main/vision/finetuning/Smol_VLM_FT.ipynb).\n\n## Evaluation \n\n### Vision Evaluation\n\n| Model | Mathvista | MMMU | OCRBench | MMStar | AI2D | ChartQA_Test | Science_QA | TextVQA Val | DocVQA Val |\n|-------------------|-----------|-------|----------|--------|------|--------------|------------|-------------|------------|\n| **SmolVLM2 2.2B** | 51.5 | 42 | 72.9 | 46 | 70 | 68.84 | 90 | 73.21 | 79.98 |\n| SmolVLM 2.2B | 43.9 | 38.3 | 65.5 | 41.8 | 84.5 | 71.6 | 84.5 | 72.1 | 79.7 |\n\n\n### Video Evaluation\nWe evaluated the performance of the SmolVLM2 family on the following scientific benchmarks:\n\n| Size | Video-MME | MLVU | MVBench |\n|----------|-----------------|----------|---------------|\n| 2.2B | 52.1 | 55.2 | 46.27 |\n| 500M | 42.2 | 47.3 | 39.73 |\n| 256M | 33.7 | 40.6 | 32.7 |\n\n\n### How to get started\n\nYou can use transformers to load, infer and fine-tune SmolVLM. Make sure you have num2words, flash-attn and latest transformers installed.\nYou can load the model as follows.\n\n```python\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\nimport torch\n\nmodel_path = \"HuggingFaceTB/SmolVLM2-2.2B-Instruct\"\nprocessor = AutoProcessor.from_pretrained(model_path)\nmodel = AutoModelForImageTextToText.from_pretrained(\n model_path,\n torch_dtype=torch.bfloat16,\n _attn_implementation=\"flash_attention_2\"\n).to(\"cuda\")\n```\n\n#### Simple Inference\n\nYou preprocess your inputs directly using chat templates and directly passing them \n\n```python\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg\"},\n {\"type\": \"text\", \"text\": \"Can you describe this image?\"},\n ]\n },\n]\n\ninputs = processor.apply_chat_template(\n messages,\n add_generation_prompt=True,\n tokenize=True,\n return_dict=True,\n return_tensors=\"pt\",\n).to(model.device, dtype=torch.bfloat16)\n\ngenerated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)\ngenerated_texts = processor.batch_decode(\n generated_ids,\n skip_special_tokens=True,\n)\nprint(generated_texts[0])\n```\n\n#### Video Inference\n\nTo use SmolVLM2 for video inference, make sure you have decord installed. \n\n```python\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"video\", \"path\": \"path_to_video.mp4\"},\n {\"type\": \"text\", \"text\": \"Describe this video in detail\"}\n ]\n },\n]\n\ninputs = processor.apply_chat_template(\n messages,\n add_generation_prompt=True,\n tokenize=True,\n return_dict=True,\n return_tensors=\"pt\",\n).to(model.device, dtype=torch.bfloat16)\n\ngenerated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)\ngenerated_texts = processor.batch_decode(\n generated_ids,\n skip_special_tokens=True,\n)\n\nprint(generated_texts[0])\n```\n#### Multi-image Interleaved Inference\n\nYou can interleave multiple media with text using chat templates.\n\n```python\nimport torch\n\n\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What is the similarity between these two images?\"},\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg\"},\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg\"}, \n ]\n },\n]\n\ninputs = processor.apply_chat_template(\n messages,\n add_generation_prompt=True,\n tokenize=True,\n return_dict=True,\n return_tensors=\"pt\",\n).to(model.device, dtype=torch.bfloat16)\n\ngenerated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)\ngenerated_texts = processor.batch_decode(\n generated_ids,\n skip_special_tokens=True,\n)\nprint(generated_texts[0])\n```\n\n\n### Model optimizations\n\n## Misuse and Out-of-scope Use\n\nSmolVLM is not intended for high-stakes scenarios or critical decision-making processes that affect an individual's well-being or livelihood. The model may produce content that appears factual but may not be accurate. Misuse includes, but is not limited to:\n\n- Prohibited Uses:\n - Evaluating or scoring individuals (e.g., in employment, education, credit)\n - Critical automated decision-making\n - Generating unreliable factual content\n- Malicious Activities:\n - Spam generation\n - Disinformation campaigns\n - Harassment or abuse\n - Unauthorized surveillance\n\n### License\n\nSmolVLM2 is built upon [the shape-optimized SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384) as image encoder and [SmolLM2](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct) for text decoder part.\n\nWe release the SmolVLM2 checkpoints under the Apache 2.0 license.\n\n## Citation information\nYou can cite us in the following way:\n```bibtex\n@unpublished{marafioti2025smolvlm,\n title = {SmolVLM: Redefining small and efficient multimodal models},\n author = {Marafioti, Andr\\'{e}s and Zohar, Orr and Farr\\'{e}, Miquel and Noyan, Merve and Bakouch, Elie and Cuenca, Pedro and Zakka, Cyril and Ben Allal, Loubna and Lozhkov, Anton and Tazi, Nouamane and Srivastav, Vaibhav and Lochner, Joshua and Larcher, Hugo and Morlon, Mathieu and Tunstall, Lewis and von Werra, Leandro and Wolf, Thomas},\n year = {2025},\n}\n```\n\n## Training Data\nSmolVLM2 used 3.3M samples for training originally from ten different datasets: [LlaVa Onevision](https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-Data), [M4-Instruct](https://huggingface.co/datasets/lmms-lab/M4-Instruct-Data), [Mammoth](https://huggingface.co/datasets/MAmmoTH-VL/MAmmoTH-VL-Instruct-12M), [LlaVa Video 178K](https://huggingface.co/datasets/lmms-lab/LLaVA-Video-178K), [FineVideo](https://huggingface.co/datasets/HuggingFaceFV/finevideo), [VideoStar](https://huggingface.co/datasets/orrzohar/Video-STaR), [VRipt](https://huggingface.co/datasets/Mutonix/Vript), [Vista-400K](https://huggingface.co/datasets/TIGER-Lab/VISTA-400K), [MovieChat](https://huggingface.co/datasets/Enxin/MovieChat-1K_train) and [ShareGPT4Video](https://huggingface.co/datasets/ShareGPT4Video/ShareGPT4Video).\nIn the following plots we give a general overview of the samples across modalities and the source of those samples.\n\n\n## Data Split per modality\n\n| Data Type | Percentage |\n|--------------|------------|\n| Image | 34.4% |\n| Text | 20.2% |\n| Video | 33.0% |\n| Multi-image | 12.3% |\n\n\n## Granular dataset slices per modality\n\n### Text Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| llava-onevision/magpie_pro_ft3_80b_mt | 6.8% |\n| llava-onevision/magpie_pro_ft3_80b_tt | 6.8% |\n| llava-onevision/magpie_pro_qwen2_72b_tt | 5.8% |\n| llava-onevision/mathqa | 0.9% |\n\n### Multi-image Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| m4-instruct-data/m4_instruct_multiimage | 10.4% |\n| mammoth/multiimage-cap6 | 1.9% |\n\n### Image Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| llava-onevision/other | 17.4% |\n| llava-onevision/vision_flan | 3.9% |\n| llava-onevision/mavis_math_metagen | 2.6% |\n| llava-onevision/mavis_math_rule_geo | 2.5% |\n| llava-onevision/sharegpt4o | 1.7% |\n| llava-onevision/sharegpt4v_coco | 1.5% |\n| llava-onevision/image_textualization | 1.3% |\n| llava-onevision/sharegpt4v_llava | 0.9% |\n| llava-onevision/mapqa | 0.9% |\n| llava-onevision/qa | 0.8% |\n| llava-onevision/textocr | 0.8% |\n\n### Video Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| llava-video-178k/1-2m | 7.3% |\n| llava-video-178k/2-3m | 7.0% |\n| other-video/combined | 5.7% |\n| llava-video-178k/hound | 4.4% |\n| llava-video-178k/0-30s | 2.4% |\n| video-star/starb | 2.2% |\n| vista-400k/combined | 2.2% |\n| vript/long | 1.0% |\n| ShareGPT4Video/all | 0.8% |\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\"Image\n\n# SmolVLM2 2.2B\n\nSmolVLM2-2.2B is a lightweight multimodal model designed to analyze video content. The model processes videos, images, and text inputs to generate text outputs - whether answering questions about media files, comparing visual content, or transcribing text from images. Despite its compact size, requiring only 5.2GB of GPU RAM for video inference, it delivers robust performance on complex multimodal tasks. This efficiency makes it particularly well-suited for on-device applications where computational resources may be limited.\n## Model Summary\n\n- **Developed by:** Hugging Face 🤗\n- **Model type:** Multi-modal model (image/multi-image/video/text)\n- **Language(s) (NLP):** English\n- **License:** Apache 2.0\n- **Architecture:** Based on [Idefics3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) (see technical summary)\n\n## Resources\n\n- **Demo:** [Video Highlight Generator](https://huggingface.co/spaces/HuggingFaceTB/SmolVLM2-HighlightGenerator)\n- **Blog:** [Blog post](https://huggingface.co/blog/smolvlm2)\n\n\n## Uses\n\n\nSmolVLM2 can be used for inference on multimodal (video / image / text) tasks where the input consists of text queries along with video or one or more images. Text and media files can be interleaved arbitrarily, enabling tasks like captioning, visual question answering, and storytelling based on visual content. The model does not support image or video generation.\n\nTo fine-tune SmolVLM2 on a specific task, you can follow [the fine-tuning tutorial](https://github.com/huggingface/smollm/blob/main/vision/finetuning/Smol_VLM_FT.ipynb).\n\n## Evaluation \n\n### Vision Evaluation\n\n| Model | Mathvista | MMMU | OCRBench | MMStar | AI2D | ChartQA_Test | Science_QA | TextVQA Val | DocVQA Val |\n|-------------------|-----------|-------|----------|--------|------|--------------|------------|-------------|------------|\n| **SmolVLM2 2.2B** | 51.5 | 42 | 72.9 | 46 | 70 | 68.84 | 90 | 73.21 | 79.98 |\n| SmolVLM 2.2B | 43.9 | 38.3 | 65.5 | 41.8 | 84.5 | 71.6 | 84.5 | 72.1 | 79.7 |\n\n\n### Video Evaluation\nWe evaluated the performance of the SmolVLM2 family on the following scientific benchmarks:\n\n| Size | Video-MME | MLVU | MVBench |\n|----------|-----------------|----------|---------------|\n| 2.2B | 52.1 | 55.2 | 46.27 |\n| 500M | 42.2 | 47.3 | 39.73 |\n| 256M | 33.7 | 40.6 | 32.7 |\n\n\n### How to get started\n\nYou can use transformers to load, infer and fine-tune SmolVLM. Make sure you have num2words, flash-attn and latest transformers installed.\nYou can load the model as follows.\n\n```python\nfrom transformers import AutoProcessor, AutoModelForImageTextToText\nimport torch\n\nmodel_path = \"HuggingFaceTB/SmolVLM2-2.2B-Instruct\"\nprocessor = AutoProcessor.from_pretrained(model_path)\nmodel = AutoModelForImageTextToText.from_pretrained(\n model_path,\n torch_dtype=torch.bfloat16,\n _attn_implementation=\"flash_attention_2\"\n).to(\"cuda\")\n```\n\n#### Simple Inference\n\nYou preprocess your inputs directly using chat templates and directly passing them \n\n```python\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg\"},\n {\"type\": \"text\", \"text\": \"Can you describe this image?\"},\n ]\n },\n]\n\ninputs = processor.apply_chat_template(\n messages,\n add_generation_prompt=True,\n tokenize=True,\n return_dict=True,\n return_tensors=\"pt\",\n).to(model.device, dtype=torch.bfloat16)\n\ngenerated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)\ngenerated_texts = processor.batch_decode(\n generated_ids,\n skip_special_tokens=True,\n)\nprint(generated_texts[0])\n```\n\n#### Video Inference\n\nTo use SmolVLM2 for video inference, make sure you have decord installed. \n\n```python\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"video\", \"path\": \"path_to_video.mp4\"},\n {\"type\": \"text\", \"text\": \"Describe this video in detail\"}\n ]\n },\n]\n\ninputs = processor.apply_chat_template(\n messages,\n add_generation_prompt=True,\n tokenize=True,\n return_dict=True,\n return_tensors=\"pt\",\n).to(model.device, dtype=torch.bfloat16)\n\ngenerated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)\ngenerated_texts = processor.batch_decode(\n generated_ids,\n skip_special_tokens=True,\n)\n\nprint(generated_texts[0])\n```\n#### Multi-image Interleaved Inference\n\nYou can interleave multiple media with text using chat templates.\n\n```python\nimport torch\n\n\nmessages = [\n {\n \"role\": \"user\",\n \"content\": [\n {\"type\": \"text\", \"text\": \"What is the similarity between these two images?\"},\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg\"},\n {\"type\": \"image\", \"url\": \"https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg\"}, \n ]\n },\n]\n\ninputs = processor.apply_chat_template(\n messages,\n add_generation_prompt=True,\n tokenize=True,\n return_dict=True,\n return_tensors=\"pt\",\n).to(model.device, dtype=torch.bfloat16)\n\ngenerated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64)\ngenerated_texts = processor.batch_decode(\n generated_ids,\n skip_special_tokens=True,\n)\nprint(generated_texts[0])\n```\n\n\n### Model optimizations\n\n## Misuse and Out-of-scope Use\n\nSmolVLM is not intended for high-stakes scenarios or critical decision-making processes that affect an individual's well-being or livelihood. The model may produce content that appears factual but may not be accurate. Misuse includes, but is not limited to:\n\n- Prohibited Uses:\n - Evaluating or scoring individuals (e.g., in employment, education, credit)\n - Critical automated decision-making\n - Generating unreliable factual content\n- Malicious Activities:\n - Spam generation\n - Disinformation campaigns\n - Harassment or abuse\n - Unauthorized surveillance\n\n### License\n\nSmolVLM2 is built upon [the shape-optimized SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384) as image encoder and [SmolLM2](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct) for text decoder part.\n\nWe release the SmolVLM2 checkpoints under the Apache 2.0 license.\n\n## Citation information\nYou can cite us in the following way:\n```bibtex\n@article{marafioti2025smolvlm,\n title={SmolVLM: Redefining small and efficient multimodal models}, \n author={Andrés Marafioti and Orr Zohar and Miquel Farré and Merve Noyan and Elie Bakouch and Pedro Cuenca and Cyril Zakka and Loubna Ben Allal and Anton Lozhkov and Nouamane Tazi and Vaibhav Srivastav and Joshua Lochner and Hugo Larcher and Mathieu Morlon and Lewis Tunstall and Leandro von Werra and Thomas Wolf},\n journal={arXiv preprint arXiv:2504.05299},\n year={2025}\n}\n```\n\n## Training Data\nSmolVLM2 used 3.3M samples for training originally from ten different datasets: [LlaVa Onevision](https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-Data), [M4-Instruct](https://huggingface.co/datasets/lmms-lab/M4-Instruct-Data), [Mammoth](https://huggingface.co/datasets/MAmmoTH-VL/MAmmoTH-VL-Instruct-12M), [LlaVa Video 178K](https://huggingface.co/datasets/lmms-lab/LLaVA-Video-178K), [FineVideo](https://huggingface.co/datasets/HuggingFaceFV/finevideo), [VideoStar](https://huggingface.co/datasets/orrzohar/Video-STaR), [VRipt](https://huggingface.co/datasets/Mutonix/Vript), [Vista-400K](https://huggingface.co/datasets/TIGER-Lab/VISTA-400K), [MovieChat](https://huggingface.co/datasets/Enxin/MovieChat-1K_train) and [ShareGPT4Video](https://huggingface.co/datasets/ShareGPT4Video/ShareGPT4Video).\nIn the following plots we give a general overview of the samples across modalities and the source of those samples.\n\n\n## Data Split per modality\n\n| Data Type | Percentage |\n|--------------|------------|\n| Image | 34.4% |\n| Text | 20.2% |\n| Video | 33.0% |\n| Multi-image | 12.3% |\n\n\n## Granular dataset slices per modality\n\n### Text Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| llava-onevision/magpie_pro_ft3_80b_mt | 6.8% |\n| llava-onevision/magpie_pro_ft3_80b_tt | 6.8% |\n| llava-onevision/magpie_pro_qwen2_72b_tt | 5.8% |\n| llava-onevision/mathqa | 0.9% |\n\n### Multi-image Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| m4-instruct-data/m4_instruct_multiimage | 10.4% |\n| mammoth/multiimage-cap6 | 1.9% |\n\n### Image Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| llava-onevision/other | 17.4% |\n| llava-onevision/vision_flan | 3.9% |\n| llava-onevision/mavis_math_metagen | 2.6% |\n| llava-onevision/mavis_math_rule_geo | 2.5% |\n| llava-onevision/sharegpt4o | 1.7% |\n| llava-onevision/sharegpt4v_coco | 1.5% |\n| llava-onevision/image_textualization | 1.3% |\n| llava-onevision/sharegpt4v_llava | 0.9% |\n| llava-onevision/mapqa | 0.9% |\n| llava-onevision/qa | 0.8% |\n| llava-onevision/textocr | 0.8% |\n\n### Video Datasets\n| Dataset | Percentage |\n|--------------------------------------------|------------|\n| llava-video-178k/1-2m | 7.3% |\n| llava-video-178k/2-3m | 7.0% |\n| other-video/combined | 5.7% |\n| llava-video-178k/hound | 4.4% |\n| llava-video-178k/0-30s | 2.4% |\n| video-star/starb | 2.2% |\n| vista-400k/combined | 2.2% |\n| vript/long | 1.0% |\n| ShareGPT4Video/all | 0.8% |\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": [\"HuggingFaceTB/SmolVLM-Instruct\"], \"datasets\": [\"HuggingFaceM4/the_cauldron\", \"HuggingFaceM4/Docmatix\", \"lmms-lab/LLaVA-OneVision-Data\", \"lmms-lab/M4-Instruct-Data\", \"HuggingFaceFV/finevideo\", \"MAmmoTH-VL/MAmmoTH-VL-Instruct-12M\", \"lmms-lab/LLaVA-Video-178K\", \"orrzohar/Video-STaR\", \"Mutonix/Vript\", \"TIGER-Lab/VISTA-400K\", \"Enxin/MovieChat-1K_train\", \"ShareGPT4Video/ShareGPT4Video\"], \"language\": [\"en\"], \"library_name\": \"transformers\", \"license\": \"apache-2.0\", \"pipeline_tag\": \"image-text-to-text\", \"tags\": [\"video-text-to-text\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING"],"string":"[\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45543,"string":"45,543"}}},{"rowIdx":43767,"cells":{"id":{"kind":"string","value":"learningmachineaz/mt5-enaz-10m"},"author":{"kind":"string","value":"learningmachineaz"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","mt5","text2text-generation","machine-translation","english","azerbaijani","translation","en","az","dataset:learningmachineaz/translate_enaz_10m","license:cc-by-nc-sa-4.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"mt5\",\n \"text2text-generation\",\n \"machine-translation\",\n \"english\",\n \"azerbaijani\",\n \"translation\",\n \"en\",\n \"az\",\n \"dataset:learningmachineaz/translate_enaz_10m\",\n \"license:cc-by-nc-sa-4.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-04-06T06:42:36Z","string":"2023-04-06T06:42:36Z"},"last_modified":{"kind":"string","value":"2023-04-07T16:05:23+00:00"},"downloads":{"kind":"number","value":19,"string":"19"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\ndatasets:\n- learningmachineaz/translate_enaz_10m\nlanguage:\n- en\n- az\nlicense: cc-by-nc-sa-4.0\npipeline_tag: translation\ntags:\n- machine-translation\n- mt5\n- english\n- azerbaijani\nwidget:\n- text: Artificial intelligence is already superior to human learning in numerous\n domains.\n- text: Learn as if you will live forever, live like you will die tomorrow.\n- text: When you change your thoughts, remember to also change your world.\ninference:\n parameters:\n max_length: 128\n num_return_sequences: 1\n do_sample: false\n---\n\n# Machine Translation (Maşın tərcüməsi)\n\nThis is the most advanced and accurate mT5 based model for machine translation available as for Azerbaijani language.\\\nThe model was trained on 10 million sentences extracted from various text sources of Azerbaijan National Library.\\\nQuality of translation is very close to Google Translate as it was used for English translations.\n\n## Text above translated using this model\n```\nBu, Azərbaycan dilinə olduğu kimi, maşın tərcüməsi üçün ən qabaqcıl və dəqiq mT5 əsaslı modeldir.\nModel Azərbaycan Milli Kitabxanasının müxtəlif mətn mənbələrindən çıxarılan 10 milyon cümlə üzrə təlim keçib.\nTərcümə keyfiyyəti ingilis dilinə tərcümələr üçün istifadə olunduğundan Google Tərcümə ilə çox yaxındır.\n```\n\n## Training\n\n| Key point | Info |\n|-------------------------|---------|\n| Base model | mT5-base |\n| Batch size | 16 |\n| Epochs | 10 |\n| Steps | 620k |\n| Training Loss | 0.56 |\n| Eval Loss | 0.53 |\n| Training Duration | 2 days |\n\n\n## Here is an example of how you can run inference:\n\n```python \nfrom transformers import MT5Tokenizer, MT5ForConditionalGeneration\n\nmodel_name = 'learningmachineaz/mt5-enaz-10m'\nmax_length = 128\n\ntokenizer = MT5Tokenizer.from_pretrained(model_name)\nmodel = MT5ForConditionalGeneration.from_pretrained(model_name)\n\ntext = \"Artificial intelligence is already superior to human learning in numerous domains.\"\ninput_ids = tokenizer(f'translate English to Azerbaijani: {text}', return_tensors=\"pt\").input_ids\n\n# OPTION 1 - SINGLE TRANSLATION\noutputs = model.generate(input_ids, max_length=max_length, do_sample=False, num_return_sequences=1)\nprint(tokenizer.decode(outputs[0], skip_special_tokens=True))\n\n# OPTION 2 - MULTIPLE VARIATIONS\noutputs = model.generate(input_ids, max_length=max_length, do_sample=True, top_k=10, num_return_sequences=3)\nfor i, output in enumerate(outputs):\n print(tokenizer.decode(output, skip_special_tokens=True))\n```\n\nOPTION 1 - OUTPUT:\n```\nSüni intellekt artıq çoxsaylı domenlərdə insanın öyrənilməsindən üstünlük təşkil edir.\n```\n\nOPTION 2 - OUTPUT:\n```\nArtıq çoxsaylı domenlərdə süni zəka insanın öyrənilməsindən daha üstün olması şərti ilə müşahidə edilir.\nSüni intellekt artıq çoxsaylı oblastlarda insanın təlimindən yüksəkdir.\nSüni intellekt artıq çoxsaylı domenlərdə insan öyrənməsindən daha üstün gəlir.\n```\n\n## Author\n\nTrained and evaluated by [Renat Kalimulin](https://www.linkedin.com/in/rinat-kalimulin-16853358/)"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Machine Translation (Maşın tərcüməsi)\n\nThis is the most advanced and accurate mT5 based model for machine translation available as for Azerbaijani language.\\\nThe model was trained on 10 million sentences extracted from various text sources of Azerbaijan National Library.\\\nQuality of translation is very close to Google Translate as it was used for English translations.\n\n## Text above translated using this model\n```\nBu, Azərbaycan dilinə olduğu kimi, maşın tərcüməsi üçün ən qabaqcıl və dəqiq mT5 əsaslı modeldir.\nModel Azərbaycan Milli Kitabxanasının müxtəlif mətn mənbələrindən çıxarılan 10 milyon cümlə üzrə təlim keçib.\nTərcümə keyfiyyəti ingilis dilinə tərcümələr üçün istifadə olunduğundan Google Tərcümə ilə çox yaxındır.\n```\n\n## Training\n\n| Key point | Info |\n|-------------------------|---------|\n| Base model | mT5-base |\n| Batch size | 16 |\n| Epochs | 10 |\n| Steps | 620k |\n| Training Loss | 0.56 |\n| Eval Loss | 0.53 |\n| Training Duration | 2 days |\n\n\n## Here is an example of how you can run inference:\n\n```python \nfrom transformers import MT5Tokenizer, MT5ForConditionalGeneration\n\nmodel_name = 'learningmachineaz/mt5-enaz-10m'\nmax_length = 128\n\ntokenizer = MT5Tokenizer.from_pretrained(model_name)\nmodel = MT5ForConditionalGeneration.from_pretrained(model_name)\n\ntext = \"Artificial intelligence is already superior to human learning in numerous domains.\"\ninput_ids = tokenizer(f'translate English to Azerbaijani: {text}', return_tensors=\"pt\").input_ids\n\n# OPTION 1 - SINGLE TRANSLATION\noutputs = model.generate(input_ids, max_length=max_length, do_sample=False, num_return_sequences=1)\nprint(tokenizer.decode(outputs[0], skip_special_tokens=True))\n\n# OPTION 2 - MULTIPLE VARIATIONS\noutputs = model.generate(input_ids, max_length=max_length, do_sample=True, top_k=10, num_return_sequences=3)\nfor i, output in enumerate(outputs):\n print(tokenizer.decode(output, skip_special_tokens=True))\n```\n\nOPTION 1 - OUTPUT:\n```\nSüni intellekt artıq çoxsaylı domenlərdə insanın öyrənilməsindən üstünlük təşkil edir.\n```\n\nOPTION 2 - OUTPUT:\n```\nArtıq çoxsaylı domenlərdə süni zəka insanın öyrənilməsindən daha üstün olması şərti ilə müşahidə edilir.\nSüni intellekt artıq çoxsaylı oblastlarda insanın təlimindən yüksəkdir.\nSüni intellekt artıq çoxsaylı domenlərdə insan öyrənməsindən daha üstün gəlir.\n```\n\n## Author\n\nTrained and evaluated by [Renat Kalimulin](https://www.linkedin.com/in/rinat-kalimulin-16853358/)"},"metadata":{"kind":"string","value":"{\"datasets\": [\"learningmachineaz/translate_enaz_10m\"], \"language\": [\"en\", \"az\"], \"license\": \"cc-by-nc-sa-4.0\", \"pipeline_tag\": \"translation\", \"tags\": [\"machine-translation\", \"mt5\", \"english\", \"azerbaijani\"], \"widget\": [{\"text\": \"Artificial intelligence is already superior to human learning in numerous domains.\"}, {\"text\": \"Learn as if you will live forever, live like you will die tomorrow.\"}, {\"text\": \"When you change your thoughts, remember to also change your world.\"}], \"inference\": {\"parameters\": {\"max_length\": 128, \"num_return_sequences\": 1, \"do_sample\": false}}}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45545,"string":"45,545"}}},{"rowIdx":43768,"cells":{"id":{"kind":"string","value":"mbazaNLP/Nllb_finetuned_tourism_en_kin"},"author":{"kind":"string","value":"mbazaNLP"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","m2m_100","text2text-generation","en","rw","dataset:mbazaNLP/NMT_Tourism_parallel_data_en_kin","dataset:mbazaNLP/NMT_Education_parallel_data_en_kin","dataset:mbazaNLP/Kinyarwanda_English_parallel_dataset","license:cc-by-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"m2m_100\",\n \"text2text-generation\",\n \"en\",\n \"rw\",\n \"dataset:mbazaNLP/NMT_Tourism_parallel_data_en_kin\",\n \"dataset:mbazaNLP/NMT_Education_parallel_data_en_kin\",\n \"dataset:mbazaNLP/Kinyarwanda_English_parallel_dataset\",\n \"license:cc-by-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-30T23:59:40Z","string":"2023-08-30T23:59:40Z"},"last_modified":{"kind":"string","value":"2023-09-22T14:13:33+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- mbazaNLP/NMT_Tourism_parallel_data_en_kin\n- mbazaNLP/NMT_Education_parallel_data_en_kin\n- mbazaNLP/Kinyarwanda_English_parallel_dataset\nlanguage:\n- en\n- rw\nlibrary_name: transformers\nlicense: cc-by-2.0\n---\n## Model Details\n\n### Model Description\n\n\n\nThis is a Machine Translation model, finetuned from [NLLB](https://huggingface.co/facebook/nllb-200-distilled-1.3B)-200's distilled 1.3B model, it is meant to be used in machine translation for tourism-related data.\n\n\n\n- **Finetuning code repository:** the code used to finetune this model can be found [here](https://github.com/Digital-Umuganda/twb_nllb_finetuning)\n\n\n\n\n\n## How to Get Started with the Model\n\nUse the code below to get started with the model.\n\n\n### Training Procedure \n\nThe model was finetuned on three datasets; a [general](https://huggingface.co/datasets/mbazaNLP/Kinyarwanda_English_parallel_dataset) purpose dataset, a [tourism](https://huggingface.co/datasets/mbazaNLP/NMT_Tourism_parallel_data_en_kin), and an [education](https://huggingface.co/datasets/mbazaNLP/NMT_Education_parallel_data_en_kin) dataset.\n\nThe model was finetuned in two phases.\n\n#### Phase one:\n- General purpose dataset\n- Education dataset\n- Tourism dataset\n\n#### Phase two:\n- Tourism dataset\n\nOther than the dataset changes between phase one, and phase two finetuning; no other hyperparameters were modified. In both cases, the model was trained on an A100 40GB GPU for two epochs.\n\n\n## Evaluation\n\n\n\n\n\n\n\n\n#### Metrics\n\nModel performance was measured using BLEU, spBLEU, TER, and chrF++ metrics.\n\n### Results\n\n|Lang. Direction| BLEU | spBLEU | chrf++ |TER |\n|:----|:----:|:----:|:----:|----:|\n| Eng -> Kin | 28.37 | 40.62 | 56.48 | 59.71 |\n| Kin -> Eng | 42.54 | 44.84 | 61.54 | 43.87 |\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"## Model Details\n\n### Model Description\n\n\n\nThis is a Machine Translation model, finetuned from [NLLB](https://huggingface.co/facebook/nllb-200-distilled-1.3B)-200's distilled 1.3B model, it is meant to be used in machine translation for tourism-related data.\n\n\n\n- **Finetuning code repository:** the code used to finetune this model can be found [here](https://github.com/Digital-Umuganda/twb_nllb_finetuning)\n\n\n\n\n\n## How to Get Started with the Model\n\nUse the code below to get started with the model.\n\n\n### Training Procedure \n\nThe model was finetuned on three datasets; a [general](https://huggingface.co/datasets/mbazaNLP/Kinyarwanda_English_parallel_dataset) purpose dataset, a [tourism](https://huggingface.co/datasets/mbazaNLP/NMT_Tourism_parallel_data_en_kin), and an [education](https://huggingface.co/datasets/mbazaNLP/NMT_Education_parallel_data_en_kin) dataset.\n\nThe model was finetuned in two phases.\n\n#### Phase one:\n- General purpose dataset\n- Education dataset\n- Tourism dataset\n\n#### Phase two:\n- Tourism dataset\n\nOther than the dataset changes between phase one, and phase two finetuning; no other hyperparameters were modified. In both cases, the model was trained on an A100 40GB GPU for two epochs.\n\n\n## Evaluation\n\n\n\n\n\n\n\n\n#### Metrics\n\nModel performance was measured using BLEU, spBLEU, TER, and chrF++ metrics.\n\n### Results\n\n|Lang. Direction| BLEU | spBLEU | chrf++ |TER |\n|:----|:----:|:----:|:----:|----:|\n| Eng -> Kin | 28.37 | 40.62 | 56.48 | 59.71 |\n| Kin -> Eng | 42.54 | 44.84 | 61.54 | 43.87 |\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"mbazaNLP/NMT_Tourism_parallel_data_en_kin\", \"mbazaNLP/NMT_Education_parallel_data_en_kin\", \"mbazaNLP/Kinyarwanda_English_parallel_dataset\"], \"language\": [\"en\", \"rw\"], \"library_name\": \"transformers\", \"license\": \"cc-by-2.0\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45546,"string":"45,546"}}},{"rowIdx":43769,"cells":{"id":{"kind":"string","value":"giuid/flan_t5_large_summarization_v2"},"author":{"kind":"string","value":"giuid"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["safetensors","t5","summarization","flan-t5","legal","food","text2text-generation","en","dataset:efra","license:apache-2.0","region:us"],"string":"[\n \"safetensors\",\n \"t5\",\n \"summarization\",\n \"flan-t5\",\n \"legal\",\n \"food\",\n \"text2text-generation\",\n \"en\",\n \"dataset:efra\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-01-16T14:02:14Z","string":"2025-01-16T14:02:14Z"},"last_modified":{"kind":"string","value":"2025-01-16T14:08:12+00:00"},"downloads":{"kind":"number","value":13,"string":"13"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- efra\nlanguage: en\nlicense: apache-2.0\npipeline_tag: text2text-generation\ntags:\n- summarization\n- flan-t5\n- legal\n- food\nmodel_type: t5\n---\n\n# Flan-T5 Large Fine-Tuned on EFRA Dataset\n\nThis is a fine-tuned version of [Flan-T5 Large](https://huggingface.co/google/flan-t5-large) on the **EFRA dataset** for summarizing legal documents related to food regulations and policies.\n\n## Model Description\n\nFlan-T5 is a sequence-to-sequence model trained for text-to-text tasks. This fine-tuned version is specifically optimized for summarizing legal text in the domain of food legislation, regulatory requirements, and compliance documents.\n\n### Fine-Tuning Details\n- **Base Model**: [google/flan-t5-large](https://huggingface.co/google/flan-t5-large)\n- **Dataset**: EFRA (a curated dataset of legal documents in the food domain)\n- **Objective**: Summarization of legal documents\n- **Framework**: Hugging Face Transformers\n\n## Applications\n\nThis model is suitable for:\n- Summarizing legal texts in the food domain\n- Extracting key information from lengthy regulatory documents\n- Assisting legal professionals and food companies in understanding compliance requirements\n\n## Example Usage\n\n```python\nfrom transformers import AutoModelForSeq2SeqLM, AutoTokenizer\n\n# Load the model and tokenizer\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"giuid/flan_t5_large_summarization_v2\")\ntokenizer = AutoTokenizer.from_pretrained(\"giuid/flan_t5_large_summarization_v2\")\n\n# Input text\ninput_text = \"Your lengthy legal document text here...\"\n\n# Tokenize and generate summary\ninputs = tokenizer(input_text, return_tensors=\"pt\", max_length=512, truncation=True)\noutputs = model.generate(inputs.input_ids, max_length=150, num_beams=5, early_stopping=True)\n\n# Decode summary\nsummary = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(summary)"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Flan-T5 Large Fine-Tuned on EFRA Dataset\n\nThis is a fine-tuned version of [Flan-T5 Large](https://huggingface.co/google/flan-t5-large) on the **EFRA dataset** for summarizing legal documents related to food regulations and policies.\n\n## Model Description\n\nFlan-T5 is a sequence-to-sequence model trained for text-to-text tasks. This fine-tuned version is specifically optimized for summarizing legal text in the domain of food legislation, regulatory requirements, and compliance documents.\n\n### Fine-Tuning Details\n- **Base Model**: [google/flan-t5-large](https://huggingface.co/google/flan-t5-large)\n- **Dataset**: EFRA (a curated dataset of legal documents in the food domain)\n- **Objective**: Summarization of legal documents\n- **Framework**: Hugging Face Transformers\n\n## Applications\n\nThis model is suitable for:\n- Summarizing legal texts in the food domain\n- Extracting key information from lengthy regulatory documents\n- Assisting legal professionals and food companies in understanding compliance requirements\n\n## Example Usage\n\n```python\nfrom transformers import AutoModelForSeq2SeqLM, AutoTokenizer\n\n# Load the model and tokenizer\nmodel = AutoModelForSeq2SeqLM.from_pretrained(\"giuid/flan_t5_large_summarization_v2\")\ntokenizer = AutoTokenizer.from_pretrained(\"giuid/flan_t5_large_summarization_v2\")\n\n# Input text\ninput_text = \"Your lengthy legal document text here...\"\n\n# Tokenize and generate summary\ninputs = tokenizer(input_text, return_tensors=\"pt\", max_length=512, truncation=True)\noutputs = model.generate(inputs.input_ids, max_length=150, num_beams=5, early_stopping=True)\n\n# Decode summary\nsummary = tokenizer.decode(outputs[0], skip_special_tokens=True)\nprint(summary)"},"metadata":{"kind":"string","value":"{\"datasets\": [\"efra\"], \"language\": \"en\", \"license\": \"apache-2.0\", \"pipeline_tag\": \"text2text-generation\", \"tags\": [\"summarization\", \"flan-t5\", \"legal\", \"food\"], \"model_type\": \"t5\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["SUMMARIZATION"],"string":"[\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45547,"string":"45,547"}}},{"rowIdx":43770,"cells":{"id":{"kind":"string","value":"datasocietyco/bge-base-en-v1.5-course-recommender-v4python"},"author":{"kind":"string","value":"datasocietyco"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","bert","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:48","loss:MultipleNegativesRankingLoss","arxiv:1908.10084","arxiv:1705.00652","base_model:BAAI/bge-base-en-v1.5","base_model:finetune:BAAI/bge-base-en-v1.5","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"bert\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:48\",\n \"loss:MultipleNegativesRankingLoss\",\n \"arxiv:1908.10084\",\n \"arxiv:1705.00652\",\n \"base_model:BAAI/bge-base-en-v1.5\",\n \"base_model:finetune:BAAI/bge-base-en-v1.5\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-12-16T11:25:06Z","string":"2024-12-16T11:25:06Z"},"last_modified":{"kind":"string","value":"2024-12-16T11:26:51+00:00"},"downloads":{"kind":"number","value":9,"string":"9"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: BAAI/bge-base-en-v1.5\nlibrary_name: sentence-transformers\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:48\n- loss:MultipleNegativesRankingLoss\nwidget:\n- source_sentence: 'Fundamentals of Deep Learning for Multi GPUs. Find out how to\n use multiple GPUs to train neural networks and effectively parallelize\\ntraining\n of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks,\n TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No\n prerequisite course required. Target audience: Professionals want to train deep\n neural networks on multi-GPU technology to shorten\\nthe training time required\n for data-intensive applications.'\n sentences:\n - 'Course Name:Hypothesis Testing in Python|Course Description:In this course, learners\n with foundational knowledge of statistical concepts will dive deeper into hypothesis\n testing by focusing on three standard tests of statistical significance: t-tests,\n F-tests, and chi-squared tests. Covering topics such as t-value, t-distribution,\n chi-square distribution, F-statistic, and F-distribution, this course will familiarize\n learners with techniques that will enable them to assess normality of data and\n goodness-of-fit and to compare observed and expected frequencies objectively.|Tags:f-distribution,\n chi-square distribution, f-statistic, t-distribution, t-value|Course language:\n Python|Target Audience:Professionals some Python experience who would like to\n expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite\n course required: Foundations of Statistics in Python'\n - 'Course Name:Foundations of Data & AI Literacy for Managers|Course Description:Designed\n for managers leading teams and projects, this course empowers individuals to build\n data-driven organizations and integrate AI tools into daily operations. Learners\n will gain a foundational understanding of data and AI concepts and learn how to\n leverage them for actionable business insights. Managers will develop the skills\n to increase collaboration with technical experts and make informed decisions about\n analysis methods, ensuring their enterprise thrives in today’s data-driven landscape.|Tags:Designed,\n managers, leading, teams, projects,, course, empowers, individuals, build, data-driven,\n organizations, integrate, AI, tools, into, daily, operations., Learners, will,\n gain, foundational, understanding, data, AI, concepts, learn, how, leverage, them,\n actionable, business, insights., Managers, will, develop, skills, increase, collaboration,\n technical, experts, make, informed, decisions, about, analysis, methods,, ensuring,\n their, enterprise, thrives, today’s, data-driven, landscape.|Course language:\n None|Target Audience:No target audience|No prerequisite course required'\n - 'Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find\n out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining\n of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks,\n TensorFlow, parallelize|Course language: Python|Target Audience:Professionals\n want to train deep neural networks on multi-GPU technology to shorten\\nthe training\n time required for data-intensive applications|No prerequisite course required'\n- source_sentence: 'Data Visualization Design & Storytelling. This course focuses\n on the fundamentals of data visualization, which helps support data-driven decision-making\n and to create a data-driven culture.. tags: data driven culture, data analytics,\n data literacy, data quality, storytelling, data science. Languages: Course language:\n TBD. Prerequisites: No prerequisite course required. Target audience: Professionals\n who would like to understand more about how to visualize data, design and concepts\n of storytelling through data..'\n sentences:\n - 'Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn\n how to apply and fine-tune a Transformer-based Deep Learning model to Natural\n Language Processing (NLP) tasks. In this course, you''ll construct a Transformer\n neural network in PyTorch, Build a named-entity recognition (NER) application\n with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference\n server. Upon completion, you’ll be proficient i.n task-agnostic applications of\n Transformer-based models. Data Society''s instructors are certified by NVIDIA’s\n Deep Learning Institute to teach this course.|Tags:named-entity recognition, text,\n Natural language processing, classification, NLP, NER|Course language: Python|Target\n Audience:Professionals with basic knowledge of neural networks and want to expand\n their knowledge in the world of Natural langauge processing|No prerequisite course\n required'\n - 'Course Name:Nonlinear Regression in Python|Course Description:In this course,\n learners will practice implementing a variety of nonlinear regression techniques\n in Python to model complex relationships beyond simple linear patterns. They will\n learn to interpret key transformations, including logarithmic (log-log, log-linear)\n and polynomial models, and identify interaction effects between predictor variables.\n Through hands-on exercises, they will also develop practical skills in selecting,\n fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear,\n regression|Course language: Python|Target Audience:This is an intermediate level\n course for data scientists who want to learn to understand and estimate relationships\n between a set of independent variables and a continuous dependent variable.|Prerequisite\n course required: Multiple Linear Regression'\n - 'Course Name:Data Visualization Design & Storytelling|Course Description:This\n course focuses on the fundamentals of data visualization, which helps support\n data-driven decision-making and to create a data-driven culture.|Tags:data driven\n culture, data analytics, data literacy, data quality, storytelling, data science|Course\n language: TBD|Target Audience:Professionals who would like to understand more\n about how to visualize data, design and concepts of storytelling through data.|No\n prerequisite course required'\n- source_sentence: 'Foundations of Probability Theory in Python. This course guides\n learners through a comprehensive review of advanced statistics topics on probability,\n such as permutations and combinations, joint probability, conditional probability,\n and marginal probability. Learners will also become familiar with Bayes’ theorem,\n a rule that provides a way to calculate the probability of a cause given its outcome.\n By the end of this course, learners will also be able to assess the likelihood\n of events being independent to indicate whether further statistical analysis is\n likely to yield results.. tags: conditional probability, bayes'' theorem. Languages:\n Course language: Python. Prerequisites: Prerequisite course required: Hypothesis\n Testing in Python. Target audience: Professionals some Python experience who would\n like to expand their skill set to more advanced Python visualization techniques\n and tools..'\n sentences:\n - 'Course Name:Foundations of Probability Theory in Python|Course Description:This\n course guides learners through a comprehensive review of advanced statistics topics\n on probability, such as permutations and combinations, joint probability, conditional\n probability, and marginal probability. Learners will also become familiar with\n Bayes’ theorem, a rule that provides a way to calculate the probability of a cause\n given its outcome. By the end of this course, learners will also be able to assess\n the likelihood of events being independent to indicate whether further statistical\n analysis is likely to yield results.|Tags:conditional probability, bayes'' theorem|Course\n language: Python|Target Audience:Professionals some Python experience who would\n like to expand their skill set to more advanced Python visualization techniques\n and tools.|Prerequisite course required: Hypothesis Testing in Python'\n - 'Course Name:Foundations of Generative AI|Course Description:Foundations of Generative\n AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target\n audience|No prerequisite course required'\n - 'Course Name:Data Science for Managers|Course Description:This course is designed\n for managers seeking to bolster their data literacy with a deep dive into data\n science tools and teams, project life cycles, and methods.|Tags:data driven culture,\n data analytics, data quality, storytelling, data science|Course language: TBD|Target\n Audience:This course is targeted for those who would like to understand more about\n data literacy, make more informed decisions and identify data-driven solutions\n through data science tools and methods.|No prerequisite course required'\n---\n\n# SentenceTransformer based on BAAI/bge-base-en-v1.5\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n (2): Normalize()\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"datasocietyco/bge-base-en-v1.5-course-recommender-v4python\")\n# Run inference\nsentences = [\n \"Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..\",\n \"Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python\",\n 'Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 48 training samples\n* Columns: anchor and positive\n* Approximate statistics based on the first 48 samples:\n | | anchor | positive |\n |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 49 tokens
  • mean: 188.12 tokens
  • max: 322 tokens
|
  • min: 47 tokens
  • mean: 186.12 tokens
  • max: 320 tokens
|\n* Samples:\n | anchor | positive |\n |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n | Outlier Detection with DBSCAN in Python. Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.. tags: outlier, SMOTE, anomaly, DBSCAN. Languages: Course language: Python. Prerequisites: Prerequisite course required: Intro to Clustering. Target audience: Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques. | Course Name:Outlier Detection with DBSCAN in Python|Course Description:Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.|Tags:outlier, SMOTE, anomaly, DBSCAN|Course language: Python|Target Audience:Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques|Prerequisite course required: Intro to Clustering |\n | Foundations of Python. This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.. tags: functions, basics, data-structures, control-flow. Languages: Course language: Python. Prerequisites: Prerequisite course required: Version Control with Git. Target audience: This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries.. | Course Name:Foundations of Python|Course Description:This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.|Tags:functions, basics, data-structures, control-flow|Course language: Python|Target Audience:This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries.|Prerequisite course required: Version Control with Git |\n | Text Generation with LLMs in Python. This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.. tags: course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.. Languages: Course language: None. Prerequisites: No prerequisite course required. Target audience: No target audience. | Course Name:Text Generation with LLMs in Python|Course Description:This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.|Tags:course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.|Course language: None|Target Audience:No target audience|No prerequisite course required |\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"cos_sim\"\n }\n ```\n\n### Evaluation Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 12 evaluation samples\n* Columns: anchor and positive\n* Approximate statistics based on the first 12 samples:\n | | anchor | positive |\n |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 46 tokens
  • mean: 162.92 tokens
  • max: 363 tokens
|
  • min: 44 tokens
  • mean: 160.92 tokens
  • max: 361 tokens
|\n* Samples:\n | anchor | positive |\n |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n | Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\\nthe training time required for data-intensive applications. | Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\\nthe training time required for data-intensive applications|No prerequisite course required |\n | Building Transformer-Based NLP Applications (NVIDIA). Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.. tags: named-entity recognition, text, Natural language processing, classification, NLP, NER. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing. | Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required |\n | Nonlinear Regression in Python. In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.. tags: nonlinear, regression. Languages: Course language: Python. Prerequisites: Prerequisite course required: Multiple Linear Regression. Target audience: This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.. | Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression |\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: steps\n- `per_device_train_batch_size`: 16\n- `per_device_eval_batch_size`: 16\n- `learning_rate`: 3e-06\n- `max_steps`: 24\n- `warmup_ratio`: 0.1\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: steps\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 16\n- `per_device_eval_batch_size`: 16\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 3e-06\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 3.0\n- `max_steps`: 24\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `use_liger_kernel`: False\n- `eval_use_gather_object`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | loss |\n|:------:|:----:|:-------------:|:------:|\n| 6.6667 | 20 | 0.046 | 0.0188 |\n\n\n### Framework Versions\n- Python: 3.9.13\n- Sentence Transformers: 3.1.1\n- Transformers: 4.45.1\n- PyTorch: 2.2.2\n- Accelerate: 0.34.2\n- Datasets: 3.0.0\n- Tokenizers: 0.20.0\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### MultipleNegativesRankingLoss\n```bibtex\n@misc{henderson2017efficient,\n title={Efficient Natural Language Response Suggestion for Smart Reply},\n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\n year={2017},\n eprint={1705.00652},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SentenceTransformer based on BAAI/bge-base-en-v1.5\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n (2): Normalize()\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"datasocietyco/bge-base-en-v1.5-course-recommender-v4python\")\n# Run inference\nsentences = [\n \"Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..\",\n \"Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python\",\n 'Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 48 training samples\n* Columns: anchor and positive\n* Approximate statistics based on the first 48 samples:\n | | anchor | positive |\n |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 49 tokens
  • mean: 188.12 tokens
  • max: 322 tokens
|
  • min: 47 tokens
  • mean: 186.12 tokens
  • max: 320 tokens
|\n* Samples:\n | anchor | positive |\n |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n | Outlier Detection with DBSCAN in Python. Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.. tags: outlier, SMOTE, anomaly, DBSCAN. Languages: Course language: Python. Prerequisites: Prerequisite course required: Intro to Clustering. Target audience: Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques. | Course Name:Outlier Detection with DBSCAN in Python|Course Description:Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.|Tags:outlier, SMOTE, anomaly, DBSCAN|Course language: Python|Target Audience:Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques|Prerequisite course required: Intro to Clustering |\n | Foundations of Python. This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.. tags: functions, basics, data-structures, control-flow. Languages: Course language: Python. Prerequisites: Prerequisite course required: Version Control with Git. Target audience: This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries.. | Course Name:Foundations of Python|Course Description:This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.|Tags:functions, basics, data-structures, control-flow|Course language: Python|Target Audience:This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries.|Prerequisite course required: Version Control with Git |\n | Text Generation with LLMs in Python. This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.. tags: course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.. Languages: Course language: None. Prerequisites: No prerequisite course required. Target audience: No target audience. | Course Name:Text Generation with LLMs in Python|Course Description:This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.|Tags:course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.|Course language: None|Target Audience:No target audience|No prerequisite course required |\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"cos_sim\"\n }\n ```\n\n### Evaluation Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 12 evaluation samples\n* Columns: anchor and positive\n* Approximate statistics based on the first 12 samples:\n | | anchor | positive |\n |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|\n | type | string | string |\n | details |
  • min: 46 tokens
  • mean: 162.92 tokens
  • max: 363 tokens
|
  • min: 44 tokens
  • mean: 160.92 tokens
  • max: 361 tokens
|\n* Samples:\n | anchor | positive |\n |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|\n | Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\\nthe training time required for data-intensive applications. | Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\\nthe training time required for data-intensive applications|No prerequisite course required |\n | Building Transformer-Based NLP Applications (NVIDIA). Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.. tags: named-entity recognition, text, Natural language processing, classification, NLP, NER. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing. | Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required |\n | Nonlinear Regression in Python. In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.. tags: nonlinear, regression. Languages: Course language: Python. Prerequisites: Prerequisite course required: Multiple Linear Regression. Target audience: This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.. | Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression |\n* Loss: [MultipleNegativesRankingLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: steps\n- `per_device_train_batch_size`: 16\n- `per_device_eval_batch_size`: 16\n- `learning_rate`: 3e-06\n- `max_steps`: 24\n- `warmup_ratio`: 0.1\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: steps\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 16\n- `per_device_eval_batch_size`: 16\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 3e-06\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 3.0\n- `max_steps`: 24\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `use_liger_kernel`: False\n- `eval_use_gather_object`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | loss |\n|:------:|:----:|:-------------:|:------:|\n| 6.6667 | 20 | 0.046 | 0.0188 |\n\n\n### Framework Versions\n- Python: 3.9.13\n- Sentence Transformers: 3.1.1\n- Transformers: 4.45.1\n- PyTorch: 2.2.2\n- Accelerate: 0.34.2\n- Datasets: 3.0.0\n- Tokenizers: 0.20.0\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### MultipleNegativesRankingLoss\n```bibtex\n@misc{henderson2017efficient,\n title={Efficient Natural Language Response Suggestion for Smart Reply},\n author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil},\n year={2017},\n eprint={1705.00652},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"BAAI/bge-base-en-v1.5\", \"library_name\": \"sentence-transformers\", \"pipeline_tag\": \"sentence-similarity\", \"tags\": [\"sentence-transformers\", \"sentence-similarity\", \"feature-extraction\", \"generated_from_trainer\", \"dataset_size:48\", \"loss:MultipleNegativesRankingLoss\"], \"widget\": [{\"source_sentence\": \"Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\\\\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\\\\nthe training time required for data-intensive applications.\", \"sentences\": [\"Course Name:Hypothesis Testing in Python|Course Description:In this course, learners with foundational knowledge of statistical concepts will dive deeper into hypothesis testing by focusing on three standard tests of statistical significance: t-tests, F-tests, and chi-squared tests. Covering topics such as t-value, t-distribution, chi-square distribution, F-statistic, and F-distribution, this course will familiarize learners with techniques that will enable them to assess normality of data and goodness-of-fit and to compare observed and expected frequencies objectively.|Tags:f-distribution, chi-square distribution, f-statistic, t-distribution, t-value|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Foundations of Statistics in Python\", \"Course Name:Foundations of Data & AI Literacy for Managers|Course Description:Designed for managers leading teams and projects, this course empowers individuals to build data-driven organizations and integrate AI tools into daily operations. Learners will gain a foundational understanding of data and AI concepts and learn how to leverage them for actionable business insights. Managers will develop the skills to increase collaboration with technical experts and make informed decisions about analysis methods, ensuring their enterprise thrives in today’s data-driven landscape.|Tags:Designed, managers, leading, teams, projects,, course, empowers, individuals, build, data-driven, organizations, integrate, AI, tools, into, daily, operations., Learners, will, gain, foundational, understanding, data, AI, concepts, learn, how, leverage, them, actionable, business, insights., Managers, will, develop, skills, increase, collaboration, technical, experts, make, informed, decisions, about, analysis, methods,, ensuring, their, enterprise, thrives, today’s, data-driven, landscape.|Course language: None|Target Audience:No target audience|No prerequisite course required\", \"Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\\\\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\\\\nthe training time required for data-intensive applications|No prerequisite course required\"]}, {\"source_sentence\": \"Data Visualization Design & Storytelling. This course focuses on the fundamentals of data visualization, which helps support data-driven decision-making and to create a data-driven culture.. tags: data driven culture, data analytics, data literacy, data quality, storytelling, data science. Languages: Course language: TBD. Prerequisites: No prerequisite course required. Target audience: Professionals who would like to understand more about how to visualize data, design and concepts of storytelling through data..\", \"sentences\": [\"Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required\", \"Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression\", \"Course Name:Data Visualization Design & Storytelling|Course Description:This course focuses on the fundamentals of data visualization, which helps support data-driven decision-making and to create a data-driven culture.|Tags:data driven culture, data analytics, data literacy, data quality, storytelling, data science|Course language: TBD|Target Audience:Professionals who would like to understand more about how to visualize data, design and concepts of storytelling through data.|No prerequisite course required\"]}, {\"source_sentence\": \"Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..\", \"sentences\": [\"Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python\", \"Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required\", \"Course Name:Data Science for Managers|Course Description:This course is designed for managers seeking to bolster their data literacy with a deep dive into data science tools and teams, project life cycles, and methods.|Tags:data driven culture, data analytics, data quality, storytelling, data science|Course language: TBD|Target Audience:This course is targeted for those who would like to understand more about data literacy, make more informed decisions and identify data-driven solutions through data science tools and methods.|No prerequisite course required\"]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45548,"string":"45,548"}}},{"rowIdx":43771,"cells":{"id":{"kind":"string","value":"amartyobanerjee/marian-finetuned-kde4-en-to-fr"},"author":{"kind":"string","value":"amartyobanerjee"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","marian","text2text-generation","translation","generated_from_trainer","dataset:kde4","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"marian\",\n \"text2text-generation\",\n \"translation\",\n \"generated_from_trainer\",\n \"dataset:kde4\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-06-15T08:33:22Z","string":"2022-06-15T08:33:22Z"},"last_modified":{"kind":"string","value":"2022-08-03T03:32:12+00:00"},"downloads":{"kind":"number","value":19,"string":"19"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- kde4\nlicense: apache-2.0\nmetrics:\n- bleu\ntags:\n- translation\n- generated_from_trainer\nmodel-index:\n- name: marian-finetuned-kde4-en-to-fr\n results:\n - task:\n type: text2text-generation\n name: Sequence-to-sequence Language Modeling\n dataset:\n name: kde4\n type: kde4\n config: en-fr\n split: train\n args: en-fr\n metrics:\n - type: bleu\n value: 52.83113187001415\n name: Bleu\n---\n\n\n\n# marian-finetuned-kde4-en-to-fr\n\nThis model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.8560\n- Bleu: 52.8311\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3\n- mixed_precision_training: Native AMP\n\n### Training results\n\n\n\n### Framework versions\n\n- Transformers 4.21.0\n- Pytorch 1.12.0+cu113\n- Datasets 2.4.0\n- Tokenizers 0.12.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# marian-finetuned-kde4-en-to-fr\n\nThis model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.8560\n- Bleu: 52.8311\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3\n- mixed_precision_training: Native AMP\n\n### Training results\n\n\n\n### Framework versions\n\n- Transformers 4.21.0\n- Pytorch 1.12.0+cu113\n- Datasets 2.4.0\n- Tokenizers 0.12.1\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"kde4\"], \"license\": \"apache-2.0\", \"metrics\": [\"bleu\"], \"tags\": [\"translation\", \"generated_from_trainer\"], \"model-index\": [{\"name\": \"marian-finetuned-kde4-en-to-fr\", \"results\": [{\"task\": {\"type\": \"text2text-generation\", \"name\": \"Sequence-to-sequence Language Modeling\"}, \"dataset\": {\"name\": \"kde4\", \"type\": \"kde4\", \"config\": \"en-fr\", \"split\": \"train\", \"args\": \"en-fr\"}, \"metrics\": [{\"type\": \"bleu\", \"value\": 52.83113187001415, \"name\": \"Bleu\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45549,"string":"45,549"}}},{"rowIdx":43772,"cells":{"id":{"kind":"string","value":"ymelka/robbert-cosmetic-similarity"},"author":{"kind":"string","value":"ymelka"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","roberta","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:4200","loss:CoSENTLoss","arxiv:1908.10084","base_model:ymelka/robbert-cosmetic-v2-finetuned","base_model:finetune:ymelka/robbert-cosmetic-v2-finetuned","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"roberta\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:4200\",\n \"loss:CoSENTLoss\",\n \"arxiv:1908.10084\",\n \"base_model:ymelka/robbert-cosmetic-v2-finetuned\",\n \"base_model:finetune:ymelka/robbert-cosmetic-v2-finetuned\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-08T16:29:06Z","string":"2024-07-08T16:29:06Z"},"last_modified":{"kind":"string","value":"2024-07-08T16:29:27+00:00"},"downloads":{"kind":"number","value":5,"string":"5"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: ymelka/robbert-cosmetic-v2-finetuned\ndatasets: []\nlanguage: []\nlibrary_name: sentence-transformers\nmetrics:\n- pearson_cosine\n- spearman_cosine\n- pearson_manhattan\n- spearman_manhattan\n- pearson_euclidean\n- spearman_euclidean\n- pearson_dot\n- spearman_dot\n- pearson_max\n- spearman_max\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:4200\n- loss:CoSENTLoss\nwidget:\n- source_sentence: Een milde reiniger speciaal ontwikkeld voor de gevoelige huid van\n mannen van 37 jaar. Deze reiniger helpt roodheid en onzuiverheden te verminderen,\n terwijl het de huid zacht en gehydrateerd achterlaat.\n sentences:\n - De Shiseido Waso Koshirice Tinted Spot Treatment SOS Imperfections Subtle Peach\n is een hybride behandeling en concealer die puistjes vervaagt en verzorgt. Met\n het antioxiderende Koshihikari-rijst extract en salicylzuur vermindert het direct\n oneffenheden en roodheid, terwijl het de huid voedt en beschermt. De formule bevat\n ook Japanse Shikuwasa-extract en glycyrrhizinaat voor een verbeterde huidbarrière\n en egale teint. Breng een kleine hoeveelheid aan op probleemzones 2-3 keer per\n dag voor een langdurig effect. Geschikt voor alle huidtypes.\n - Neutrogena Retinol Boost Intense Nacht Serum is een krachtig serum dat de huid\n 's nachts intensief verzorgt en herstelt. Dit serum bevat retinol, een krachtig\n ingrediënt dat de huidtextuur verbetert, fijne lijntjes en rimpels vermindert\n en de huidteint egaliseert. Daarnaast bevat het serum hyaluronzuur en vitamine\n C, die de huid hydrateren en beschermen tegen schadelijke invloeden van buitenaf.\n Breng elke avond een paar druppels aan op een gereinigde huid en masseer zachtjes\n in. Gebruik het serum in combinatie met een SPF overdag, aangezien retinol de\n huid gevoeliger kan maken voor de zon. Dit serum is geschikt voor alle huidtypes,\n maar kan irritatie veroorzaken bij een gevoelige huid.\n - 'Nivea Labello Protect+ Lippenbalsem SPF 15 Duo 2x4,8g verzorgt droge en gebarsten\n lippen, kalmeert direct en houdt je lippen 24u lang gehydrateerd. De formule trekt\n snel in en helpt gebarsten lippen te herstellen, terwijl de SPF 15 beschermt tegen\n schadelijke zonnestralen. Met actieve ingrediënten zoals sheaboter, jojoba-olie\n en vitamine E. Gebruik de lippenbalsem regelmatig voor zachte en soepele lippen.\n Let op: niet gebruiken bij allergie voor een van de ingrediënten.'\n- source_sentence: Een voedingssupplement met essentiële voedingsstoffen en antioxidanten\n om de huid van binnenuit te voeden en te beschermen. Ideaal voor vrouwen van 29\n jaar die streven naar een gezonde en stralende huid.\n sentences:\n - De Babor Shaping Vitamine ACE Lichaamscrème is een luxe crème die de huid intensief\n voedt en hydrateert. Dankzij de krachtige combinatie van vitamine A, C en E wordt\n de huid verstevigd, verhelderd en beschermd tegen vrije radicalen. Deze crème\n is ideaal voor het verbeteren van de elasticiteit en stevigheid van de huid, waardoor\n het uiterlijk van cellulitis en striae wordt verminderd. De actieve ingrediënten\n dringen diep door in de huid om langdurige resultaten te bieden. Breng de crème\n dagelijks aan op een gereinigde huid en masseer zachtjes in tot het volledig is\n opgenomen. Vermijd contact met de ogen en gebruik niet op een beschadigde huid.\n - 'Ma Provence Shampoo Anti-Roos met Natuurlijke Witte Klei is een handig en milieuvriendelijk\n product dat perfect is voor op reis. Met een handige maat van 85 g en een centraal\n gat voor een gemakkelijke grip, komt deze shampoo overeen met 40-50 shampoos,\n wat gelijk staat aan 2 flessen van 250 ml. Deze shampoo heeft geen conditioner\n nodig, wat resulteert in minder verpakkingsafval. De formule is 100% natuurlijk\n en bevat 20% biologische ingrediënten, waardoor het ideaal is voor mensen die\n op zoek zijn naar natuurlijke producten. Ma Provence Shampoo Anti-Roos is speciaal\n ontwikkeld om roos te bestrijden en is gecertificeerd als 100% natuurlijk en biologisch.\n Gebruiksaanwijzing: Breng een kleine hoeveelheid shampoo aan op nat haar, masseer\n zachtjes in en spoel grondig uit. Gebruik indien nodig. Voorzorgsmaatregelen:\n Vermijd contact met de ogen en bij eventuele irritatie stop het gebruik.'\n - Avène Men Hydraterende Anti-Agingverzorging is speciaal ontwikkeld voor mannen\n om de huid te hydrateren, revitaliseren en strakker te maken. Deze verzorging\n werkt op de tekenen van huidveroudering, zoals de structuur van de dermis en elasticiteit,\n dankzij actieve ingrediënten zoals hyaluronzuur en antioxidanten. De niet-plakkerige\n textuur zorgt voor een comfortabel gevoel en is geschikt voor alle gevoelige huidtypes,\n zelfs na het scheren. Breng de verzorging aan op het gelaat en de hals voor een\n stevigere en compacter uitziende huid. Gebruik de pompfles van 50 ml volgens de\n aanbevolen instructies voor optimale resultaten.\n- source_sentence: Een milde reiniger speciaal ontwikkeld voor de rijpere huid van\n vrouwen boven de 60. Verrijkt met antioxidanten en hydraterende ingrediënten om\n de doffe huid te revitaliseren en onzuiverheden te verwijderen.\n sentences:\n - De Cattier Zuiverende Reinigingsgel is speciaal ontwikkeld voor de vette huid\n met onzuiverheden en puistjes. Met biologische essentiële oliën van tea tree,\n rozemarijn en pepermunt reinigt deze gel zacht de huid en verwijdert effectief\n onzuiverheden en overtollig talg. Verrijkt met aloë vera, biedt het een aangename\n sensatie van frisheid en laat de huid helder en gezond achter. De actieve ingrediënten,\n waaronder biologische tea tree olie en aloë vera, helpen de huid te zuiveren,\n hydrateren en beschermen. Breng de gel aan op een vochtig gezicht, vermijd de\n oogcontour, masseer in en spoel af met lauw water. Gebruik de gel regelmatig voor\n een zuivere en gezonde huid.\n - 'De Clarins Extra-Firming Anti-Rimpel Rijke Verstevigende Dagcrème is een ideale\n anti-ageing dagverzorging voor de droge huid, speciaal ontworpen voor vrouwen\n vanaf 40 jaar. Deze crème zorgt voor een zichtbaar stevigere en energieke huid,\n vermindert rimpels en herdefinieert het ovaal van het gezicht. De formule bevat\n actieve ingrediënten zoals shea butter, haverextract en vitamine E die de huid\n hydrateren, beschermen en verstevigen. Breng ''s ochtends aan op een schone huid\n voor een egale teint en stralende huid. Let op: vermijd contact met de ogen en\n bij eventuele irritatie stop het gebruik. Pot 50 ml.'\n - La Roche-Posay Effaclar Zuiverende Schuimende Reiningsgel is speciaal ontwikkeld\n voor de vette huid met neiging tot acne. Deze reinigingsgel reinigt en zuivert\n het gezicht zachtjes, verwijdert onzuiverheden en overmatig sebum, en houdt de\n huid schoon en fris. De formule bevat thermaal water van La Roche-Posay voor een\n kalmerende werking. Geschikt voor volwassenen en adolescenten met een vette huid,\n oneffenheden of acne. Gebruik 1 à 2 keer per dag, masseer zachtjes in op een vochtig\n gezicht en spoel grondig af met water. Niet gebruiken bij overgevoeligheid voor\n een van de ingrediënten.\n- source_sentence: Een voedende gezichtsverzorging die diep in de huid doordringt\n om droogheid te behandelen. Speciaal samengesteld voor vrouwen van 24 jaar, deze\n crème helpt de huid te hydrateren en te herstellen, waardoor een gezonde gloed\n wordt bevorderd.\n sentences:\n - Waam Damast Bloemenwater Bio Spray 200ml is een kalmerende en verfrissende spray\n met de heerlijke geur van Damastroos. Dit bloemenwater hydrateert intensief, verstevigt\n en zuivert de huid op milde wijze, ideaal voor gecombineerde tot vette huidtypes.\n Het staat bekend om zijn antiverouderingseigenschappen en helpt bij het voorkomen\n van huidveroudering. De spray kan gebruikt worden voor het gezicht, lichaam en\n haar, en is geschikt voor alle huidtypes, vooral rijpere of reactieve huid. De\n belangrijkste actieve ingrediënten zijn biologisch aloë vera water en Damastroosbloesemwater.\n Gebruik de spray om make-up te verwijderen, huidirritatie te kalmeren of je huis\n subtiel te parfumeren. Niet doorslikken en bewaren op kamertemperatuur.\n - Benecos Natural Care Lippenbalsem Sinaas is een fruitige lipverzorging met een\n heerlijke zoete sinaasappelgeur, verrijkt met hoogwaardige biologische sinaasappelolie\n om de lippen zacht en gehydrateerd te houden. Deze lippenbalsem is ideaal voor\n dagelijks gebruik en biedt bescherming tegen uitdroging en schrale lippen. De\n natuurlijke ingrediënten zorgen voor een milde verzorging en een aangename geur.\n Breng de lippenbalsem eenvoudig aan op de lippen voor een langdurige hydratatie.\n Deze lippenbalsem is geschikt voor alle huidtypes en bevat 4,8 g product. Gebruik\n het product naar behoefte en geniet van zachte en verzorgde lippen.\n - De Fleurance Nature Hydraterende Gezichtscreme Met Aloë Vera is speciaal ontwikkeld\n om de natuurlijke hydratatie van de huid te behouden en te beschermen tegen agressieve\n invloeden van buitenaf. Deze lichte crème bevat aloë vera, sheaboter en jojobaolie\n die de huid verzachten, hydrateren en voeden. Geschikt voor alle huidtypes, deze\n bio gecertificeerde crème trekt snel in en laat de huid soepel en gezond aanvoelen.\n Breng de crème aan op gezicht en hals met lichte masserende bewegingen voor een\n gezonde en goed gehydrateerde huid. Bevat 50 ml.\n- source_sentence: Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige\n vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke\n invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde\n teint.\n sentences:\n - Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige\n hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule\n bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor\n mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol\n kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor\n een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren\n op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor\n mensen met een droge en gevoelige hoofdhuid.\n - Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt\n met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze\n haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging,\n terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen\n PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen\n haar en laat het inwerken volgens de instructies. Voer altijd een allergietest\n uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke\n schakeringen en verzorg je je haar op een zachte manier.\n - De T.LeClerc Geheime Droge Olie is een voedende olie met een poederige geur van\n witte iris die de huid hydrateert en een langdurige geur achterlaat. Geschikt\n voor de gevoelige huid, bevat deze olie 5 plantaardige oliën zoals rijstzemelolie,\n zoete amandelolie en sesamzaadolie die de huid kalmeren, beschermen en revitaliseren.\n Ideaal voor het lichaam en haar, kan de olie het hele jaar door worden gebruikt\n door een paar druppels in de handpalmen te verwarmen en in te masseren. Voor een\n effectieve werking kan de olie ook als masker op de hoofdhuid en lengten worden\n aangebracht en na 30 minuten worden uitgespoeld. Gebruik met voorzichtigheid bij\n allergieën voor een van de ingrediënten.\nmodel-index:\n- name: SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned\n results:\n - task:\n type: semantic-similarity\n name: Semantic Similarity\n dataset:\n name: dev\n type: dev\n metrics:\n - type: pearson_cosine\n value: 0.8646855536832553\n name: Pearson Cosine\n - type: spearman_cosine\n value: 0.9307532547762918\n name: Spearman Cosine\n - type: pearson_manhattan\n value: 0.8406446921386265\n name: Pearson Manhattan\n - type: spearman_manhattan\n value: 0.9304243863652625\n name: Spearman Manhattan\n - type: pearson_euclidean\n value: 0.8382514640398003\n name: Pearson Euclidean\n - type: spearman_euclidean\n value: 0.9307301686010424\n name: Spearman Euclidean\n - type: pearson_dot\n value: 0.8395448144279598\n name: Pearson Dot\n - type: spearman_dot\n value: 0.926131782390444\n name: Spearman Dot\n - type: pearson_max\n value: 0.8646855536832553\n name: Pearson Max\n - type: spearman_max\n value: 0.9307532547762918\n name: Spearman Max\n---\n\n# SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"ymelka/robbert-cosmetic-similarity\")\n# Run inference\nsentences = [\n 'Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint.',\n 'Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid.',\n 'Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier.',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n## Evaluation\n\n### Metrics\n\n#### Semantic Similarity\n* Dataset: `dev`\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| pearson_cosine | 0.8647 |\n| **spearman_cosine** | **0.9308** |\n| pearson_manhattan | 0.8406 |\n| spearman_manhattan | 0.9304 |\n| pearson_euclidean | 0.8383 |\n| spearman_euclidean | 0.9307 |\n| pearson_dot | 0.8395 |\n| spearman_dot | 0.9261 |\n| pearson_max | 0.8647 |\n| spearman_max | 0.9308 |\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 4,200 training samples\n* Columns: sentence1, sentence2, and score\n* Approximate statistics based on the first 1000 samples:\n | | sentence1 | sentence2 | score |\n |:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:---------------------------------------------------------------|\n | type | string | string | float |\n | details |
  • min: 35 tokens
  • mean: 56.45 tokens
  • max: 86 tokens
|
  • min: 80 tokens
  • mean: 146.59 tokens
  • max: 223 tokens
|
  • min: 0.0
  • mean: 0.51
  • max: 1.0
|\n* Samples:\n | sentence1 | sentence2 | score |\n |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|\n | Een voedingssupplement met essentiële vetzuren en vitamines om de huid van binnenuit te voeden en te ondersteunen. Dit supplement is speciaal samengesteld voor vrouwen van 22 jaar oud om droogheid te bestrijden en de algehele gezondheid van de huid te verbeteren. | De Lancaster Sun Beauty Sublime Tan Body Milk SPF30 is een vederlichte en niet-vette bodylotion die de huid beschermt tegen schadelijke zonnestralen en helpt bij het herstellen van zonschade. De crèmige textuur smelt direct op de huid en zorgt voor een gezonde bruine teint in de helft van de gebruikelijke tijd. De formule is geschikt voor alle huidtypes en laat geen wit laagje achter, met een fluweelachtige finish als resultaat. Belangrijke actieve ingrediënten zijn onder andere Bambusa arundinacea stem extract en Mauritia flexuosa fruit oil, die de huid hydrateren en beschermen. Breng royaal aan vóór blootstelling aan de zon en herhaal regelmatig. Let op: overmatige blootstelling aan de zon kan schadelijk zijn voor de gezondheid. Inhoud: 175 ml. | 0.0040877847932279 |\n | Een geavanceerde gezichtsverzorging voor vrouwen van 50 jaar en ouder, gericht op het verminderen van poriën, het verbeteren van een doffe huid en het verminderen van wallen. Deze verzorging bevat krachtige ingrediënten die de huid hydrateren, verstevigen en verjongen, waardoor de tekenen van veroudering worden verminderd en de huid een jeugdige uitstraling krijgt. | De L'Oréal Paris Age Perfect Re-Hydraterende Dagcreme is speciaal ontwikkeld voor vrouwen vanaf 50 jaar om pigmentvlekken te verminderen en de huid te verstevigen. Verrijkt met sojapeptiden, hydrateert en stimuleert deze crème de natuurlijke synthese van huidvezels, waardoor de huid stralend en stevig wordt. Na één maand zijn pigmentvlekken vervaagd en is de huid gehydrateerd. Breng 's ochtends aan op een gereinigd gezicht en hals voor optimale resultaten. Let op: alleen voor uitwendig gebruik. | 0.9995909333229064 |\n | Een rijke gezichtsverzorging voor vrouwen boven de 70 jaar die last hebben van droogheid, roodheid en vergrote poriën. Deze verzorging hydrateert diep, vermindert roodheid en herstelt de natuurlijke balans van de huid. Het helpt ook om de poriën te verfijnen en de huid te verstevigen. | De Clarins Extra-Firming Verstevigende Behandeling Essence is speciaal ontwikkeld voor vrouwen in de veertig om de huid intensief te hydrateren, in balans te brengen en een jeugdige uitstraling te geven. Deze essence combineert krachtige botanische ingrediënten met hyaluronzuur om de huid te vernieuwen en te verkwikken, waardoor deze steviger en elastischer wordt. De fluweelachtige textuur zorgt voor een vollere en soepelere huid. Gebruik dagelijks 's ochtends en/of 's avonds door een paar druppels zachtjes op het gezicht aan te brengen. Niet aanbevolen voor personen onder de 40 jaar. | 0.9986273050308228 |\n* Loss: [CoSENTLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"pairwise_cos_sim\"\n }\n ```\n\n### Evaluation Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 690 evaluation samples\n* Columns: sentence1, sentence2, and score\n* Approximate statistics based on the first 1000 samples:\n | | sentence1 | sentence2 | score |\n |:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:--------------------------------------------------------------|\n | type | string | string | float |\n | details |
  • min: 35 tokens
  • mean: 56.5 tokens
  • max: 81 tokens
|
  • min: 97 tokens
  • mean: 146.09 tokens
  • max: 207 tokens
|
  • min: 0.0
  • mean: 0.5
  • max: 1.0
|\n* Samples:\n | sentence1 | sentence2 | score |\n |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|\n | Een diep reinigende gezichtsreiniger speciaal ontwikkeld voor vrouwen van 36 jaar en ouder met pigmentvlekken, rimpels en een vette huid. Deze reiniger verwijdert onzuiverheden en overtollig talg, verheldert de huid en vermindert tekenen van veroudering. | Embryolisse Eco-Recharge Vloeibare Melk-Crème 400ml is een hydraterende en voedende crème die de huid intensief verzorgt en beschermt. Deze vloeibare melk-crème is geschikt voor alle huidtypes en zorgt voor een zachte en soepele huid. De formule bevat actieve ingrediënten zoals shea boter, aloë vera en soja-eiwitten die de huid hydrateren, kalmeren en herstellen. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in. Gebruik het product bij voorkeur 's ochtends en 's avonds voor optimale resultaten. Let op: vermijd contact met de ogen en bij eventuele huidirritatie het gebruik stoppen. | 0.019439160823822 |\n | Een voedingssupplement met essentiële vitaminen en mineralen die de huid van binnenuit voeden en beschermen. Speciaal samengesteld voor vrouwen van 20 jaar om de huid te helpen bij het reguleren van talgproductie, het verminderen van vlekken en het verbeteren van de algehele huidconditie. Dit supplement draagt bij aan een gezonde en stralende huid. | Attitude Minerale Zonne Stick Orange Blossom SPF30 60g biedt een breedbandige bescherming tegen UVA- en UVB-stralen, met non-nano zinkoxide als actief ingrediënt. Deze veganistische formule is EWG Verified en dermatologisch getest, waardoor het veilig is voor de huid en het milieu. De handige biologisch afbreekbare kartonnen tube maakt het aanbrengen gemakkelijk en laat geen witte vlekken achter. Geniet van het heerlijke oranjebloesemaroma terwijl je huid beschermd wordt tegen de zon. Gebruik deze zonne stick voor een effectieve bescherming tijdens buitenactiviteiten. | 0.0337340645492076 |\n | Een milde gezichtsreiniger speciaal ontwikkeld voor vrouwen van 50 jaar en ouder. Deze reiniger is ideaal voor het verwijderen van onzuiverheden en make-up, terwijl het de huid hydrateert en verzacht. Het helpt bij het verminderen van droogheid en roodheid, waardoor de huid er stralender en gezonder uitziet. | Avène Cleanance Zuiverende En Matterende Reinigingsgel is speciaal ontwikkeld voor de vette huid met imperfecties. Deze reinigingsgel reinigt de huid diep en verwijdert onzuiverheden en overtollig talg, waardoor de huid fris en mat blijft. De formule bevat actieve ingrediënten zoals Avène Thermaal Water en Monolaurine die de talgproductie reguleren en de huid zuiveren. Gebruik de reinigingsgel dagelijks, breng aan op een vochtig gezicht en masseer zachtjes in. Spoel af met water en dep de huid droog. Vermijd contact met de ogen. Geschikt voor dagelijks gebruik. | 0.999606430530548 |\n* Loss: [CoSENTLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"pairwise_cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: steps\n- `learning_rate`: 2e-05\n- `weight_decay`: 0.01\n- `warmup_ratio`: 0.1\n- `bf16`: True\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: steps\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 8\n- `per_device_eval_batch_size`: 8\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `learning_rate`: 2e-05\n- `weight_decay`: 0.01\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 3\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: True\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | loss | dev_spearman_cosine |\n|:------:|:----:|:-------------:|:------:|:-------------------:|\n| 0 | 0 | - | - | 0.7531 |\n| 0.1905 | 100 | 3.0709 | 2.7162 | 0.8810 |\n| 0.3810 | 200 | 2.8254 | 2.8051 | 0.8745 |\n| 0.5714 | 300 | 2.8368 | 2.7475 | 0.8922 |\n| 0.7619 | 400 | 2.7198 | 2.6713 | 0.8986 |\n| 0.9524 | 500 | 2.6541 | 2.6326 | 0.9097 |\n| 1.1429 | 600 | 2.6628 | 2.5709 | 0.9131 |\n| 1.3333 | 700 | 2.4183 | 2.7987 | 0.9213 |\n| 1.5238 | 800 | 2.5213 | 2.5974 | 0.9197 |\n| 1.7143 | 900 | 2.3733 | 3.1097 | 0.8956 |\n| 1.9048 | 1000 | 2.3562 | 2.9513 | 0.9229 |\n| 2.0952 | 1100 | 2.3511 | 2.9670 | 0.9246 |\n| 2.2857 | 1200 | 2.1765 | 3.2356 | 0.9167 |\n| 2.4762 | 1300 | 2.2738 | 3.1946 | 0.9280 |\n| 2.6667 | 1400 | 2.1162 | 3.2512 | 0.9201 |\n| 2.8571 | 1500 | 2.0996 | 3.0392 | 0.9288 |\n| 3.0 | 1575 | - | - | 0.9308 |\n\n\n### Framework Versions\n- Python: 3.10.12\n- Sentence Transformers: 3.0.1\n- Transformers: 4.42.3\n- PyTorch: 2.3.0+cu121\n- Accelerate: 0.32.1\n- Datasets: 2.20.0\n- Tokenizers: 0.19.1\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### CoSENTLoss\n```bibtex\n@online{kexuefm-8847,\n title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},\n author={Su Jianlin},\n year={2022},\n month={Jan},\n url={https://kexue.fm/archives/8847},\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"ymelka/robbert-cosmetic-similarity\")\n# Run inference\nsentences = [\n 'Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint.',\n 'Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid.',\n 'Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier.',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n## Evaluation\n\n### Metrics\n\n#### Semantic Similarity\n* Dataset: `dev`\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| pearson_cosine | 0.8647 |\n| **spearman_cosine** | **0.9308** |\n| pearson_manhattan | 0.8406 |\n| spearman_manhattan | 0.9304 |\n| pearson_euclidean | 0.8383 |\n| spearman_euclidean | 0.9307 |\n| pearson_dot | 0.8395 |\n| spearman_dot | 0.9261 |\n| pearson_max | 0.8647 |\n| spearman_max | 0.9308 |\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 4,200 training samples\n* Columns: sentence1, sentence2, and score\n* Approximate statistics based on the first 1000 samples:\n | | sentence1 | sentence2 | score |\n |:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:---------------------------------------------------------------|\n | type | string | string | float |\n | details |
  • min: 35 tokens
  • mean: 56.45 tokens
  • max: 86 tokens
|
  • min: 80 tokens
  • mean: 146.59 tokens
  • max: 223 tokens
|
  • min: 0.0
  • mean: 0.51
  • max: 1.0
|\n* Samples:\n | sentence1 | sentence2 | score |\n |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|\n | Een voedingssupplement met essentiële vetzuren en vitamines om de huid van binnenuit te voeden en te ondersteunen. Dit supplement is speciaal samengesteld voor vrouwen van 22 jaar oud om droogheid te bestrijden en de algehele gezondheid van de huid te verbeteren. | De Lancaster Sun Beauty Sublime Tan Body Milk SPF30 is een vederlichte en niet-vette bodylotion die de huid beschermt tegen schadelijke zonnestralen en helpt bij het herstellen van zonschade. De crèmige textuur smelt direct op de huid en zorgt voor een gezonde bruine teint in de helft van de gebruikelijke tijd. De formule is geschikt voor alle huidtypes en laat geen wit laagje achter, met een fluweelachtige finish als resultaat. Belangrijke actieve ingrediënten zijn onder andere Bambusa arundinacea stem extract en Mauritia flexuosa fruit oil, die de huid hydrateren en beschermen. Breng royaal aan vóór blootstelling aan de zon en herhaal regelmatig. Let op: overmatige blootstelling aan de zon kan schadelijk zijn voor de gezondheid. Inhoud: 175 ml. | 0.0040877847932279 |\n | Een geavanceerde gezichtsverzorging voor vrouwen van 50 jaar en ouder, gericht op het verminderen van poriën, het verbeteren van een doffe huid en het verminderen van wallen. Deze verzorging bevat krachtige ingrediënten die de huid hydrateren, verstevigen en verjongen, waardoor de tekenen van veroudering worden verminderd en de huid een jeugdige uitstraling krijgt. | De L'Oréal Paris Age Perfect Re-Hydraterende Dagcreme is speciaal ontwikkeld voor vrouwen vanaf 50 jaar om pigmentvlekken te verminderen en de huid te verstevigen. Verrijkt met sojapeptiden, hydrateert en stimuleert deze crème de natuurlijke synthese van huidvezels, waardoor de huid stralend en stevig wordt. Na één maand zijn pigmentvlekken vervaagd en is de huid gehydrateerd. Breng 's ochtends aan op een gereinigd gezicht en hals voor optimale resultaten. Let op: alleen voor uitwendig gebruik. | 0.9995909333229064 |\n | Een rijke gezichtsverzorging voor vrouwen boven de 70 jaar die last hebben van droogheid, roodheid en vergrote poriën. Deze verzorging hydrateert diep, vermindert roodheid en herstelt de natuurlijke balans van de huid. Het helpt ook om de poriën te verfijnen en de huid te verstevigen. | De Clarins Extra-Firming Verstevigende Behandeling Essence is speciaal ontwikkeld voor vrouwen in de veertig om de huid intensief te hydrateren, in balans te brengen en een jeugdige uitstraling te geven. Deze essence combineert krachtige botanische ingrediënten met hyaluronzuur om de huid te vernieuwen en te verkwikken, waardoor deze steviger en elastischer wordt. De fluweelachtige textuur zorgt voor een vollere en soepelere huid. Gebruik dagelijks 's ochtends en/of 's avonds door een paar druppels zachtjes op het gezicht aan te brengen. Niet aanbevolen voor personen onder de 40 jaar. | 0.9986273050308228 |\n* Loss: [CoSENTLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"pairwise_cos_sim\"\n }\n ```\n\n### Evaluation Dataset\n\n#### Unnamed Dataset\n\n\n* Size: 690 evaluation samples\n* Columns: sentence1, sentence2, and score\n* Approximate statistics based on the first 1000 samples:\n | | sentence1 | sentence2 | score |\n |:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:--------------------------------------------------------------|\n | type | string | string | float |\n | details |
  • min: 35 tokens
  • mean: 56.5 tokens
  • max: 81 tokens
|
  • min: 97 tokens
  • mean: 146.09 tokens
  • max: 207 tokens
|
  • min: 0.0
  • mean: 0.5
  • max: 1.0
|\n* Samples:\n | sentence1 | sentence2 | score |\n |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------|\n | Een diep reinigende gezichtsreiniger speciaal ontwikkeld voor vrouwen van 36 jaar en ouder met pigmentvlekken, rimpels en een vette huid. Deze reiniger verwijdert onzuiverheden en overtollig talg, verheldert de huid en vermindert tekenen van veroudering. | Embryolisse Eco-Recharge Vloeibare Melk-Crème 400ml is een hydraterende en voedende crème die de huid intensief verzorgt en beschermt. Deze vloeibare melk-crème is geschikt voor alle huidtypes en zorgt voor een zachte en soepele huid. De formule bevat actieve ingrediënten zoals shea boter, aloë vera en soja-eiwitten die de huid hydrateren, kalmeren en herstellen. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in. Gebruik het product bij voorkeur 's ochtends en 's avonds voor optimale resultaten. Let op: vermijd contact met de ogen en bij eventuele huidirritatie het gebruik stoppen. | 0.019439160823822 |\n | Een voedingssupplement met essentiële vitaminen en mineralen die de huid van binnenuit voeden en beschermen. Speciaal samengesteld voor vrouwen van 20 jaar om de huid te helpen bij het reguleren van talgproductie, het verminderen van vlekken en het verbeteren van de algehele huidconditie. Dit supplement draagt bij aan een gezonde en stralende huid. | Attitude Minerale Zonne Stick Orange Blossom SPF30 60g biedt een breedbandige bescherming tegen UVA- en UVB-stralen, met non-nano zinkoxide als actief ingrediënt. Deze veganistische formule is EWG Verified en dermatologisch getest, waardoor het veilig is voor de huid en het milieu. De handige biologisch afbreekbare kartonnen tube maakt het aanbrengen gemakkelijk en laat geen witte vlekken achter. Geniet van het heerlijke oranjebloesemaroma terwijl je huid beschermd wordt tegen de zon. Gebruik deze zonne stick voor een effectieve bescherming tijdens buitenactiviteiten. | 0.0337340645492076 |\n | Een milde gezichtsreiniger speciaal ontwikkeld voor vrouwen van 50 jaar en ouder. Deze reiniger is ideaal voor het verwijderen van onzuiverheden en make-up, terwijl het de huid hydrateert en verzacht. Het helpt bij het verminderen van droogheid en roodheid, waardoor de huid er stralender en gezonder uitziet. | Avène Cleanance Zuiverende En Matterende Reinigingsgel is speciaal ontwikkeld voor de vette huid met imperfecties. Deze reinigingsgel reinigt de huid diep en verwijdert onzuiverheden en overtollig talg, waardoor de huid fris en mat blijft. De formule bevat actieve ingrediënten zoals Avène Thermaal Water en Monolaurine die de talgproductie reguleren en de huid zuiveren. Gebruik de reinigingsgel dagelijks, breng aan op een vochtig gezicht en masseer zachtjes in. Spoel af met water en dep de huid droog. Vermijd contact met de ogen. Geschikt voor dagelijks gebruik. | 0.999606430530548 |\n* Loss: [CoSENTLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"pairwise_cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: steps\n- `learning_rate`: 2e-05\n- `weight_decay`: 0.01\n- `warmup_ratio`: 0.1\n- `bf16`: True\n- `batch_sampler`: no_duplicates\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: steps\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 8\n- `per_device_eval_batch_size`: 8\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `learning_rate`: 2e-05\n- `weight_decay`: 0.01\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 3\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: True\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `batch_sampler`: no_duplicates\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | loss | dev_spearman_cosine |\n|:------:|:----:|:-------------:|:------:|:-------------------:|\n| 0 | 0 | - | - | 0.7531 |\n| 0.1905 | 100 | 3.0709 | 2.7162 | 0.8810 |\n| 0.3810 | 200 | 2.8254 | 2.8051 | 0.8745 |\n| 0.5714 | 300 | 2.8368 | 2.7475 | 0.8922 |\n| 0.7619 | 400 | 2.7198 | 2.6713 | 0.8986 |\n| 0.9524 | 500 | 2.6541 | 2.6326 | 0.9097 |\n| 1.1429 | 600 | 2.6628 | 2.5709 | 0.9131 |\n| 1.3333 | 700 | 2.4183 | 2.7987 | 0.9213 |\n| 1.5238 | 800 | 2.5213 | 2.5974 | 0.9197 |\n| 1.7143 | 900 | 2.3733 | 3.1097 | 0.8956 |\n| 1.9048 | 1000 | 2.3562 | 2.9513 | 0.9229 |\n| 2.0952 | 1100 | 2.3511 | 2.9670 | 0.9246 |\n| 2.2857 | 1200 | 2.1765 | 3.2356 | 0.9167 |\n| 2.4762 | 1300 | 2.2738 | 3.1946 | 0.9280 |\n| 2.6667 | 1400 | 2.1162 | 3.2512 | 0.9201 |\n| 2.8571 | 1500 | 2.0996 | 3.0392 | 0.9288 |\n| 3.0 | 1575 | - | - | 0.9308 |\n\n\n### Framework Versions\n- Python: 3.10.12\n- Sentence Transformers: 3.0.1\n- Transformers: 4.42.3\n- PyTorch: 2.3.0+cu121\n- Accelerate: 0.32.1\n- Datasets: 2.20.0\n- Tokenizers: 0.19.1\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### CoSENTLoss\n```bibtex\n@online{kexuefm-8847,\n title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},\n author={Su Jianlin},\n year={2022},\n month={Jan},\n url={https://kexue.fm/archives/8847},\n}\n```\n\n\n\n\n\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"ymelka/robbert-cosmetic-v2-finetuned\", \"datasets\": [], \"language\": [], \"library_name\": \"sentence-transformers\", \"metrics\": [\"pearson_cosine\", \"spearman_cosine\", \"pearson_manhattan\", \"spearman_manhattan\", \"pearson_euclidean\", \"spearman_euclidean\", \"pearson_dot\", \"spearman_dot\", \"pearson_max\", \"spearman_max\"], \"pipeline_tag\": \"sentence-similarity\", \"tags\": [\"sentence-transformers\", \"sentence-similarity\", \"feature-extraction\", \"generated_from_trainer\", \"dataset_size:4200\", \"loss:CoSENTLoss\"], \"widget\": [{\"source_sentence\": \"Een milde reiniger speciaal ontwikkeld voor de gevoelige huid van mannen van 37 jaar. Deze reiniger helpt roodheid en onzuiverheden te verminderen, terwijl het de huid zacht en gehydrateerd achterlaat.\", \"sentences\": [\"De Shiseido Waso Koshirice Tinted Spot Treatment SOS Imperfections Subtle Peach is een hybride behandeling en concealer die puistjes vervaagt en verzorgt. Met het antioxiderende Koshihikari-rijst extract en salicylzuur vermindert het direct oneffenheden en roodheid, terwijl het de huid voedt en beschermt. De formule bevat ook Japanse Shikuwasa-extract en glycyrrhizinaat voor een verbeterde huidbarrière en egale teint. Breng een kleine hoeveelheid aan op probleemzones 2-3 keer per dag voor een langdurig effect. Geschikt voor alle huidtypes.\", \"Neutrogena Retinol Boost Intense Nacht Serum is een krachtig serum dat de huid 's nachts intensief verzorgt en herstelt. Dit serum bevat retinol, een krachtig ingrediënt dat de huidtextuur verbetert, fijne lijntjes en rimpels vermindert en de huidteint egaliseert. Daarnaast bevat het serum hyaluronzuur en vitamine C, die de huid hydrateren en beschermen tegen schadelijke invloeden van buitenaf. Breng elke avond een paar druppels aan op een gereinigde huid en masseer zachtjes in. Gebruik het serum in combinatie met een SPF overdag, aangezien retinol de huid gevoeliger kan maken voor de zon. Dit serum is geschikt voor alle huidtypes, maar kan irritatie veroorzaken bij een gevoelige huid.\", \"Nivea Labello Protect+ Lippenbalsem SPF 15 Duo 2x4,8g verzorgt droge en gebarsten lippen, kalmeert direct en houdt je lippen 24u lang gehydrateerd. De formule trekt snel in en helpt gebarsten lippen te herstellen, terwijl de SPF 15 beschermt tegen schadelijke zonnestralen. Met actieve ingrediënten zoals sheaboter, jojoba-olie en vitamine E. Gebruik de lippenbalsem regelmatig voor zachte en soepele lippen. Let op: niet gebruiken bij allergie voor een van de ingrediënten.\"]}, {\"source_sentence\": \"Een voedingssupplement met essentiële voedingsstoffen en antioxidanten om de huid van binnenuit te voeden en te beschermen. Ideaal voor vrouwen van 29 jaar die streven naar een gezonde en stralende huid.\", \"sentences\": [\"De Babor Shaping Vitamine ACE Lichaamscrème is een luxe crème die de huid intensief voedt en hydrateert. Dankzij de krachtige combinatie van vitamine A, C en E wordt de huid verstevigd, verhelderd en beschermd tegen vrije radicalen. Deze crème is ideaal voor het verbeteren van de elasticiteit en stevigheid van de huid, waardoor het uiterlijk van cellulitis en striae wordt verminderd. De actieve ingrediënten dringen diep door in de huid om langdurige resultaten te bieden. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in tot het volledig is opgenomen. Vermijd contact met de ogen en gebruik niet op een beschadigde huid.\", \"Ma Provence Shampoo Anti-Roos met Natuurlijke Witte Klei is een handig en milieuvriendelijk product dat perfect is voor op reis. Met een handige maat van 85 g en een centraal gat voor een gemakkelijke grip, komt deze shampoo overeen met 40-50 shampoos, wat gelijk staat aan 2 flessen van 250 ml. Deze shampoo heeft geen conditioner nodig, wat resulteert in minder verpakkingsafval. De formule is 100% natuurlijk en bevat 20% biologische ingrediënten, waardoor het ideaal is voor mensen die op zoek zijn naar natuurlijke producten. Ma Provence Shampoo Anti-Roos is speciaal ontwikkeld om roos te bestrijden en is gecertificeerd als 100% natuurlijk en biologisch. Gebruiksaanwijzing: Breng een kleine hoeveelheid shampoo aan op nat haar, masseer zachtjes in en spoel grondig uit. Gebruik indien nodig. Voorzorgsmaatregelen: Vermijd contact met de ogen en bij eventuele irritatie stop het gebruik.\", \"Avène Men Hydraterende Anti-Agingverzorging is speciaal ontwikkeld voor mannen om de huid te hydrateren, revitaliseren en strakker te maken. Deze verzorging werkt op de tekenen van huidveroudering, zoals de structuur van de dermis en elasticiteit, dankzij actieve ingrediënten zoals hyaluronzuur en antioxidanten. De niet-plakkerige textuur zorgt voor een comfortabel gevoel en is geschikt voor alle gevoelige huidtypes, zelfs na het scheren. Breng de verzorging aan op het gelaat en de hals voor een stevigere en compacter uitziende huid. Gebruik de pompfles van 50 ml volgens de aanbevolen instructies voor optimale resultaten.\"]}, {\"source_sentence\": \"Een milde reiniger speciaal ontwikkeld voor de rijpere huid van vrouwen boven de 60. Verrijkt met antioxidanten en hydraterende ingrediënten om de doffe huid te revitaliseren en onzuiverheden te verwijderen.\", \"sentences\": [\"De Cattier Zuiverende Reinigingsgel is speciaal ontwikkeld voor de vette huid met onzuiverheden en puistjes. Met biologische essentiële oliën van tea tree, rozemarijn en pepermunt reinigt deze gel zacht de huid en verwijdert effectief onzuiverheden en overtollig talg. Verrijkt met aloë vera, biedt het een aangename sensatie van frisheid en laat de huid helder en gezond achter. De actieve ingrediënten, waaronder biologische tea tree olie en aloë vera, helpen de huid te zuiveren, hydrateren en beschermen. Breng de gel aan op een vochtig gezicht, vermijd de oogcontour, masseer in en spoel af met lauw water. Gebruik de gel regelmatig voor een zuivere en gezonde huid.\", \"De Clarins Extra-Firming Anti-Rimpel Rijke Verstevigende Dagcrème is een ideale anti-ageing dagverzorging voor de droge huid, speciaal ontworpen voor vrouwen vanaf 40 jaar. Deze crème zorgt voor een zichtbaar stevigere en energieke huid, vermindert rimpels en herdefinieert het ovaal van het gezicht. De formule bevat actieve ingrediënten zoals shea butter, haverextract en vitamine E die de huid hydrateren, beschermen en verstevigen. Breng 's ochtends aan op een schone huid voor een egale teint en stralende huid. Let op: vermijd contact met de ogen en bij eventuele irritatie stop het gebruik. Pot 50 ml.\", \"La Roche-Posay Effaclar Zuiverende Schuimende Reiningsgel is speciaal ontwikkeld voor de vette huid met neiging tot acne. Deze reinigingsgel reinigt en zuivert het gezicht zachtjes, verwijdert onzuiverheden en overmatig sebum, en houdt de huid schoon en fris. De formule bevat thermaal water van La Roche-Posay voor een kalmerende werking. Geschikt voor volwassenen en adolescenten met een vette huid, oneffenheden of acne. Gebruik 1 à 2 keer per dag, masseer zachtjes in op een vochtig gezicht en spoel grondig af met water. Niet gebruiken bij overgevoeligheid voor een van de ingrediënten.\"]}, {\"source_sentence\": \"Een voedende gezichtsverzorging die diep in de huid doordringt om droogheid te behandelen. Speciaal samengesteld voor vrouwen van 24 jaar, deze crème helpt de huid te hydrateren en te herstellen, waardoor een gezonde gloed wordt bevorderd.\", \"sentences\": [\"Waam Damast Bloemenwater Bio Spray 200ml is een kalmerende en verfrissende spray met de heerlijke geur van Damastroos. Dit bloemenwater hydrateert intensief, verstevigt en zuivert de huid op milde wijze, ideaal voor gecombineerde tot vette huidtypes. Het staat bekend om zijn antiverouderingseigenschappen en helpt bij het voorkomen van huidveroudering. De spray kan gebruikt worden voor het gezicht, lichaam en haar, en is geschikt voor alle huidtypes, vooral rijpere of reactieve huid. De belangrijkste actieve ingrediënten zijn biologisch aloë vera water en Damastroosbloesemwater. Gebruik de spray om make-up te verwijderen, huidirritatie te kalmeren of je huis subtiel te parfumeren. Niet doorslikken en bewaren op kamertemperatuur.\", \"Benecos Natural Care Lippenbalsem Sinaas is een fruitige lipverzorging met een heerlijke zoete sinaasappelgeur, verrijkt met hoogwaardige biologische sinaasappelolie om de lippen zacht en gehydrateerd te houden. Deze lippenbalsem is ideaal voor dagelijks gebruik en biedt bescherming tegen uitdroging en schrale lippen. De natuurlijke ingrediënten zorgen voor een milde verzorging en een aangename geur. Breng de lippenbalsem eenvoudig aan op de lippen voor een langdurige hydratatie. Deze lippenbalsem is geschikt voor alle huidtypes en bevat 4,8 g product. Gebruik het product naar behoefte en geniet van zachte en verzorgde lippen.\", \"De Fleurance Nature Hydraterende Gezichtscreme Met Aloë Vera is speciaal ontwikkeld om de natuurlijke hydratatie van de huid te behouden en te beschermen tegen agressieve invloeden van buitenaf. Deze lichte crème bevat aloë vera, sheaboter en jojobaolie die de huid verzachten, hydrateren en voeden. Geschikt voor alle huidtypes, deze bio gecertificeerde crème trekt snel in en laat de huid soepel en gezond aanvoelen. Breng de crème aan op gezicht en hals met lichte masserende bewegingen voor een gezonde en goed gehydrateerde huid. Bevat 50 ml.\"]}, {\"source_sentence\": \"Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint.\", \"sentences\": [\"Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid.\", \"Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier.\", \"De T.LeClerc Geheime Droge Olie is een voedende olie met een poederige geur van witte iris die de huid hydrateert en een langdurige geur achterlaat. Geschikt voor de gevoelige huid, bevat deze olie 5 plantaardige oliën zoals rijstzemelolie, zoete amandelolie en sesamzaadolie die de huid kalmeren, beschermen en revitaliseren. Ideaal voor het lichaam en haar, kan de olie het hele jaar door worden gebruikt door een paar druppels in de handpalmen te verwarmen en in te masseren. Voor een effectieve werking kan de olie ook als masker op de hoofdhuid en lengten worden aangebracht en na 30 minuten worden uitgespoeld. Gebruik met voorzichtigheid bij allergieën voor een van de ingrediënten.\"]}], \"model-index\": [{\"name\": \"SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned\", \"results\": [{\"task\": {\"type\": \"semantic-similarity\", \"name\": \"Semantic Similarity\"}, \"dataset\": {\"name\": \"dev\", \"type\": \"dev\"}, \"metrics\": [{\"type\": \"pearson_cosine\", \"value\": 0.8646855536832553, \"name\": \"Pearson Cosine\"}, {\"type\": \"spearman_cosine\", \"value\": 0.9307532547762918, \"name\": \"Spearman Cosine\"}, {\"type\": \"pearson_manhattan\", \"value\": 0.8406446921386265, \"name\": \"Pearson Manhattan\"}, {\"type\": \"spearman_manhattan\", \"value\": 0.9304243863652625, \"name\": \"Spearman Manhattan\"}, {\"type\": \"pearson_euclidean\", \"value\": 0.8382514640398003, \"name\": \"Pearson Euclidean\"}, {\"type\": \"spearman_euclidean\", \"value\": 0.9307301686010424, \"name\": \"Spearman Euclidean\"}, {\"type\": \"pearson_dot\", \"value\": 0.8395448144279598, \"name\": \"Pearson Dot\"}, {\"type\": \"spearman_dot\", \"value\": 0.926131782390444, \"name\": \"Spearman Dot\"}, {\"type\": \"pearson_max\", \"value\": 0.8646855536832553, \"name\": \"Pearson Max\"}, {\"type\": \"spearman_max\", \"value\": 0.9307532547762918, \"name\": \"Spearman Max\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION","SEMANTIC_SIMILARITY"],"string":"[\n \"TEXT_CLASSIFICATION\",\n \"SEMANTIC_SIMILARITY\"\n]"},"__index_level_0__":{"kind":"number","value":45550,"string":"45,550"}}},{"rowIdx":43773,"cells":{"id":{"kind":"string","value":"chunwoolee0/klue_ynat_roberta_base_model"},"author":{"kind":"string","value":"chunwoolee0"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","roberta","text-classification","generated_from_trainer","dataset:klue","arxiv:2105.09680","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"roberta\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:klue\",\n \"arxiv:2105.09680\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-07-18T14:43:31Z","string":"2023-07-18T14:43:31Z"},"last_modified":{"kind":"string","value":"2023-09-12T01:28:58+00:00"},"downloads":{"kind":"number","value":25,"string":"25"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- klue\nmetrics:\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: klue_ynat_roberta_base_model\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: klue\n type: klue\n config: ynat\n split: validation\n args: ynat\n metrics:\n - type: f1\n value: 0.872014500465787\n name: F1\n---\n\n\n\n# klue_ynat_roberta_base_model\n\nThis model is a fine-tuned version of [klue/roberta-base](https://huggingface.co/klue/roberta-base) on the klue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.3747\n- F1: 0.8720\n\n## Model description\n\nPretrained RoBERTa Model on Korean Language. See [Github](https://github.com/KLUE-benchmark/KLUE) and [Paper](https://arxiv.org/abs/2105.09680) for more details.\n\n\n## Intended uses & limitations\n\nPretrained RoBERTa Model on Korean Language. See Github and Paper for more details.\n\n## Training and evaluation data\n\n## How to use\n\n_NOTE:_ Use `BertTokenizer` instead of RobertaTokenizer. (`AutoTokenizer` will load `BertTokenizer`)\n\n```python\nfrom transformers import AutoModel, AutoTokenizer\n\nmodel = AutoModel.from_pretrained(\"klue/roberta-base\")\ntokenizer = AutoTokenizer.from_pretrained(\"klue/roberta-base\")\n```\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 256\n- eval_batch_size: 256\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:------:|\n| No log | 1.0 | 179 | 0.4838 | 0.8444 |\n| No log | 2.0 | 358 | 0.3848 | 0.8659 |\n| 0.4203 | 3.0 | 537 | 0.3778 | 0.8690 |\n| 0.4203 | 4.0 | 716 | 0.3762 | 0.8702 |\n| 0.4203 | 5.0 | 895 | 0.3747 | 0.8720 |\n\n\n### Framework versions\n\n- Transformers 4.30.2\n- Pytorch 2.0.1+cu118\n- Datasets 2.13.1\n- Tokenizers 0.13.3\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# klue_ynat_roberta_base_model\n\nThis model is a fine-tuned version of [klue/roberta-base](https://huggingface.co/klue/roberta-base) on the klue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.3747\n- F1: 0.8720\n\n## Model description\n\nPretrained RoBERTa Model on Korean Language. See [Github](https://github.com/KLUE-benchmark/KLUE) and [Paper](https://arxiv.org/abs/2105.09680) for more details.\n\n\n## Intended uses & limitations\n\nPretrained RoBERTa Model on Korean Language. See Github and Paper for more details.\n\n## Training and evaluation data\n\n## How to use\n\n_NOTE:_ Use `BertTokenizer` instead of RobertaTokenizer. (`AutoTokenizer` will load `BertTokenizer`)\n\n```python\nfrom transformers import AutoModel, AutoTokenizer\n\nmodel = AutoModel.from_pretrained(\"klue/roberta-base\")\ntokenizer = AutoTokenizer.from_pretrained(\"klue/roberta-base\")\n```\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 256\n- eval_batch_size: 256\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:------:|\n| No log | 1.0 | 179 | 0.4838 | 0.8444 |\n| No log | 2.0 | 358 | 0.3848 | 0.8659 |\n| 0.4203 | 3.0 | 537 | 0.3778 | 0.8690 |\n| 0.4203 | 4.0 | 716 | 0.3762 | 0.8702 |\n| 0.4203 | 5.0 | 895 | 0.3747 | 0.8720 |\n\n\n### Framework versions\n\n- Transformers 4.30.2\n- Pytorch 2.0.1+cu118\n- Datasets 2.13.1\n- Tokenizers 0.13.3\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"klue\"], \"metrics\": [\"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"klue_ynat_roberta_base_model\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"klue\", \"type\": \"klue\", \"config\": \"ynat\", \"split\": \"validation\", \"args\": \"ynat\"}, \"metrics\": [{\"type\": \"f1\", \"value\": 0.872014500465787, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45551,"string":"45,551"}}},{"rowIdx":43774,"cells":{"id":{"kind":"string","value":"cifope/nllb-fr-wol-wol-fr-3.3B"},"author":{"kind":"string","value":"cifope"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","m2m_100","text2text-generation","translation","wo","fr","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"m2m_100\",\n \"text2text-generation\",\n \"translation\",\n \"wo\",\n \"fr\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-07T22:32:33Z","string":"2024-04-07T22:32:33Z"},"last_modified":{"kind":"string","value":"2024-04-07T23:14:42+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":4,"string":"4"},"README":{"kind":"string","value":"---\nlanguage:\n- wo\n- fr\nlicense: mit\nmetrics:\n- bleu\npipeline_tag: translation\n---\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":""},"metadata":{"kind":"string","value":"{\"language\": [\"wo\", \"fr\"], \"license\": \"mit\", \"metrics\": [\"bleu\"], \"pipeline_tag\": \"translation\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45552,"string":"45,552"}}},{"rowIdx":43775,"cells":{"id":{"kind":"string","value":"lperng/medical-model"},"author":{"kind":"string","value":"lperng"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","llama","unsloth","medical","chinese","license:apache-2.0","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"llama\",\n \"unsloth\",\n \"medical\",\n \"chinese\",\n \"license:apache-2.0\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2025-02-14T21:56:21Z","string":"2025-02-14T21:56:21Z"},"last_modified":{"kind":"string","value":"2025-02-14T22:07:05+00:00"},"downloads":{"kind":"number","value":28,"string":"28"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- unsloth\n- llama\n- medical\n- chinese\n---\n\n# medical-model\n\nThis is a fine-tuned version of the [DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B) model, specifically trained on a medical dataset for enhanced performance in medical question answering and reasoning.\n\n## Model Description\n\n- **Base Model:** DeepSeek-R1-Distill-Llama-8B\n- **Fine-tuning Dataset:** [FreedomIntelligence/medical-o1-reasoning-SFT](https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT)\n- **Fine-tuning Method:** Unsloth's SFTTrainer with LoRA\n- **Language:** Chinese\n\n## Intended Uses & Limitations\n\nThis model is intended for research and educational purposes related to medical question answering. It can provide insightful responses to medical queries, but it is crucial to understand its limitations:\n\n- **Not a Medical Professional:** The model is not a substitute for professional medical advice. Consult with a healthcare provider for any health concerns or before making any decisions based on the model's output.\n- **Potential Biases:** The training data may contain biases that could be reflected in the model's responses. Always critically evaluate the information provided.\n- **Limited Scope:** The model's knowledge is limited to the information present in its training dataset. It may not be able to answer questions outside its domain of expertise.\n\n## How to Use\n\nYou can use this model with the following code:\n```\npython from unsloth import FastLanguageModel import torch\n\nmodel, tokenizer = FastLanguageModel.from_pretrained( model_name=\"lperng/medical-model\", load_in_4bit=True, # Optional: Use 4-bit quantization )\n\nprompt = \"你的医学问题?\" # Replace with your medical question\n\ninputs = tokenizer([prompt], return_tensors=\"pt\").to(\"cuda\") outputs = model.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask) response = tokenizer.batch_decode(outputs)[0]\n\nprint(response)\n```\n## Training and Evaluation Data\n\nThis model was trained on the `FreedomIntelligence/medical-o1-reasoning-SFT` dataset. The dataset contains a collection of medical questions and corresponding answers. Evaluation metrics and results will be added in future updates.\n\n## Ethical Considerations\n\n- **Potential for Misinformation:** The model's output should be carefully reviewed to ensure accuracy and prevent the spread of misinformation.\n- **Privacy:** When using the model with personal health information, ensure compliance with relevant privacy regulations.\n- **Responsible Use:** Use the model responsibly and ethically, considering its potential impact on individuals and society.\n\n## Citation\n\nIf you use this model in your work, please cite it as follows:\n```\n@misc{medical-model, author = {lperng}, title = {medical-model}, year = {2023}, publisher = {Hugging Face}, howpublished = {\\url{[redacted link], }\n```\n## Disclaimer\n\nThis model is provided \"as is\" without warranty of any kind, express or implied. The authors and contributors are not responsible for any damages or liabilities arising from the use of this model.\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"BioNLP"},"model_cards":{"kind":"string","value":"\n# medical-model\n\nThis is a fine-tuned version of the [DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B) model, specifically trained on a medical dataset for enhanced performance in medical question answering and reasoning.\n\n## Model Description\n\n- **Base Model:** DeepSeek-R1-Distill-Llama-8B\n- **Fine-tuning Dataset:** [FreedomIntelligence/medical-o1-reasoning-SFT](https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT)\n- **Fine-tuning Method:** Unsloth's SFTTrainer with LoRA\n- **Language:** Chinese\n\n## Intended Uses & Limitations\n\nThis model is intended for research and educational purposes related to medical question answering. It can provide insightful responses to medical queries, but it is crucial to understand its limitations:\n\n- **Not a Medical Professional:** The model is not a substitute for professional medical advice. Consult with a healthcare provider for any health concerns or before making any decisions based on the model's output.\n- **Potential Biases:** The training data may contain biases that could be reflected in the model's responses. Always critically evaluate the information provided.\n- **Limited Scope:** The model's knowledge is limited to the information present in its training dataset. It may not be able to answer questions outside its domain of expertise.\n\n## How to Use\n\nYou can use this model with the following code:\n```\npython from unsloth import FastLanguageModel import torch\n\nmodel, tokenizer = FastLanguageModel.from_pretrained( model_name=\"lperng/medical-model\", load_in_4bit=True, # Optional: Use 4-bit quantization )\n\nprompt = \"你的医学问题?\" # Replace with your medical question\n\ninputs = tokenizer([prompt], return_tensors=\"pt\").to(\"cuda\") outputs = model.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask) response = tokenizer.batch_decode(outputs)[0]\n\nprint(response)\n```\n## Training and Evaluation Data\n\nThis model was trained on the `FreedomIntelligence/medical-o1-reasoning-SFT` dataset. The dataset contains a collection of medical questions and corresponding answers. Evaluation metrics and results will be added in future updates.\n\n## Ethical Considerations\n\n- **Potential for Misinformation:** The model's output should be carefully reviewed to ensure accuracy and prevent the spread of misinformation.\n- **Privacy:** When using the model with personal health information, ensure compliance with relevant privacy regulations.\n- **Responsible Use:** Use the model responsibly and ethically, considering its potential impact on individuals and society.\n\n## Citation\n\nIf you use this model in your work, please cite it as follows:\n```\n@misc{medical-model, author = {lperng}, title = {medical-model}, year = {2023}, publisher = {Hugging Face}, howpublished = {\\url{[redacted link], }\n```\n## Disclaimer\n\nThis model is provided \"as is\" without warranty of any kind, express or implied. The authors and contributors are not responsible for any damages or liabilities arising from the use of this model.\n\n"},"metadata":{"kind":"string","value":"{\"license\": \"apache-2.0\", \"tags\": [\"unsloth\", \"llama\", \"medical\", \"chinese\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["QUESTION_ANSWERING"],"string":"[\n \"QUESTION_ANSWERING\"\n]"},"__index_level_0__":{"kind":"number","value":45553,"string":"45,553"}}},{"rowIdx":43776,"cells":{"id":{"kind":"string","value":"buskingsue/distilbert-base-uncased-finetuned-emotion"},"author":{"kind":"string","value":"buskingsue"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","distilbert","text-classification","generated_from_trainer","dataset:emotion","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:emotion\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-14T23:34:00Z","string":"2023-10-14T23:34:00Z"},"last_modified":{"kind":"string","value":"2023-10-14T23:45:41+00:00"},"downloads":{"kind":"number","value":91,"string":"91"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: distilbert-base-uncased\ndatasets:\n- emotion\nlicense: apache-2.0\nmetrics:\n- accuracy\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-emotion\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: emotion\n type: emotion\n config: split\n split: validation\n args: split\n metrics:\n - type: accuracy\n value: 0.92\n name: Accuracy\n - type: f1\n value: 0.9199406554171253\n name: F1\n---\n\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2221\n- Accuracy: 0.92\n- F1: 0.9199\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.8424 | 1.0 | 250 | 0.3382 | 0.9015 | 0.8994 |\n| 0.2553 | 2.0 | 500 | 0.2221 | 0.92 | 0.9199 |\n\n\n### Framework versions\n\n- Transformers 4.34.0\n- Pytorch 2.0.1+cu118\n- Datasets 2.14.5\n- Tokenizers 0.14.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2221\n- Accuracy: 0.92\n- F1: 0.9199\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.8424 | 1.0 | 250 | 0.3382 | 0.9015 | 0.8994 |\n| 0.2553 | 2.0 | 500 | 0.2221 | 0.92 | 0.9199 |\n\n\n### Framework versions\n\n- Transformers 4.34.0\n- Pytorch 2.0.1+cu118\n- Datasets 2.14.5\n- Tokenizers 0.14.1\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"distilbert-base-uncased\", \"datasets\": [\"emotion\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\", \"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-emotion\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"emotion\", \"type\": \"emotion\", \"config\": \"split\", \"split\": \"validation\", \"args\": \"split\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.92, \"name\": \"Accuracy\"}, {\"type\": \"f1\", \"value\": 0.9199406554171253, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45554,"string":"45,554"}}},{"rowIdx":43777,"cells":{"id":{"kind":"string","value":"PlanTL-GOB-ES/roberta-base-ca"},"author":{"kind":"string","value":"PlanTL-GOB-ES"},"task_category":{"kind":"string","value":"fill-mask"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","fill-mask","masked-lm","BERTa","catalan","ca","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"fill-mask\",\n \"masked-lm\",\n \"BERTa\",\n \"catalan\",\n \"ca\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-03-02T23:29:04Z","string":"2022-03-02T23:29:04Z"},"last_modified":{"kind":"string","value":"2022-11-15T16:00:37+00:00"},"downloads":{"kind":"number","value":571,"string":"571"},"likes":{"kind":"number","value":5,"string":"5"},"README":{"kind":"string","value":"---\nlanguage: ca\nlicense: apache-2.0\ntags:\n- masked-lm\n- BERTa\n- catalan\nwidget:\n- text: El Català és una llengua molt .\n- text: Salvador Dalí va viure a .\n- text: La Costa Brava té les millors d'Espanya.\n- text: El cacaolat és un batut de .\n- text: és la capital de la Garrotxa.\n- text: Vaig al a buscar bolets.\n- text: Antoni Gaudí vas ser un molt important per la ciutat.\n- text: Catalunya és una referència en a nivell europeu.\n---\n\n# BERTa: RoBERTa-based Catalan language model\n\n## Table of contents\n
\nClick to expand\n\n- [Model description](#model-description)\n- [Intended uses and limitations](#intended-use)\n- [How to use](#how-to-use)\n- [Limitations and bias](#limitations-and-bias)\n- [Training](#training)\n- [Evaluation](#evaluation)\n- [Additional information](#additional-information)\n - [Author](#author)\n - [Contact information](#contact-information)\n - [Copyright](#copyright)\n - [Licensing information](#licensing-information)\n - [Funding](#funding)\n - [Citing information](#citing-information)\n - [Disclaimer](#disclaimer)\n \n
\n\n\n## Model description\nBERTa is a transformer-based masked language model for the Catalan language. \nIt is based on the [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) base model \nand has been trained on a medium-size corpus collected from publicly available corpora and crawlers.\n\nThis model was originally published as [bsc/roberta-base-ca-cased](https://huggingface.co/bsc/roberta-base-ca-cased).\n\n## Intended uses and limitations\nThe model is ready-to-use only for masked language modelling to perform the Fill Mask task (try the inference API or read the next section). \nHowever, it is intended to be fine-tuned on non-generative downstream tasks such as Question Answering, Text Classification or Named Entity Recognition.\n\n\n## How to use\n\n### Load model and tokenizer\n\n``` python\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM\ntokenizer = AutoTokenizer.from_pretrained(\"PlanTL-GOB-ES/roberta-base-ca-cased\")\nmodel = AutoModelForMaskedLM.from_pretrained(\"PlanTL-GOB-ES/roberta-base-ca-cased\")\n```\n\n### Fill Mask task\n\nBelow, an example of how to use the masked language modelling task with a pipeline.\n\n```python\n>>> from transformers import pipeline\n>>> unmasker = pipeline('fill-mask', model='PlanTL-GOB-ES/roberta-base-ca-cased')\n>>> unmasker(\"Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\")\n\n[\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.4177263379096985,\n \"token\": 734,\n \"token_str\": \" Barcelona\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.10696165263652802,\n \"token\": 3849,\n \"token_str\": \" Badalona\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.08135009557008743,\n \"token\": 19349,\n \"token_str\": \" Collserola\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.07330769300460815,\n \"token\": 4974,\n \"token_str\": \" Terrassa\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.03317456692457199,\n \"token\": 14333,\n \"token_str\": \" Gavà\"\n }\n]\n```\n\n\n## Limitations and bias\n\n## Training\n### Training corpora and preprocessing\n\nThe training corpus consists of several corpora gathered from web crawling and public corpora.\n\nThe publicly available corpora are:\n\n 1. the Catalan part of the [DOGC](http://opus.nlpl.eu/DOGC-v2.php) corpus, a set of documents from the Official Gazette of the Catalan Government\n \n 2. the [Catalan Open Subtitles](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.ca.gz), a collection of translated movie subtitles\n \n 3. the non-shuffled version of the Catalan part of the [OSCAR](https://traces1.inria.fr/oscar/) corpus \\\\\\\\cite{suarez2019asynchronous}, \n a collection of monolingual corpora, filtered from [Common Crawl](https://commoncrawl.org/about/)\n \n 4. The [CaWac](http://nlp.ffzg.hr/resources/corpora/cawac/) corpus, a web corpus of Catalan built from the .cat top-level-domain in late 2013\n the non-deduplicated version\n\n 5. the [Catalan Wikipedia articles](https://ftp.acc.umu.se/mirror/wikimedia.org/dumps/cawiki/20200801/) downloaded on 18-08-2020.\n\nThe crawled corpora are:\n\n 6. The Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains\n 7. the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government\n \n 8. the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the [Catalan News Agency](https://www.acn.cat/)\n\nTo obtain a high-quality training corpus, each corpus have preprocessed with a pipeline of operations, including among the others,\nsentence splitting, language detection, filtering of bad-formed sentences and deduplication of repetitive contents.\nDuring the process, we keep document boundaries are kept. \nFinally, the corpora are concatenated and further global deduplication among the corpora is applied.\nThe final training corpus consists of about 1,8B tokens.\n\n\n### Tokenization and pretraining \n\nThe training corpus has been tokenized using a byte version of [Byte-Pair Encoding (BPE)](https://github.com/openai/gpt-2)\nused in the original [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) model with a vocabulary size of 52,000 tokens. \n\nThe BERTa pretraining consists of a masked language model training that follows the approach employed for the RoBERTa base model\nwith the same hyperparameters as in the original work.\n\nThe training lasted a total of 48 hours with 16 NVIDIA V100 GPUs of 16GB DDRAM.\n\n## Evaluation\n\n### CLUB benchmark\n\nThe BERTa model has been fine-tuned on the downstream tasks of the Catalan Language Understanding Evaluation benchmark (CLUB),\nthat has been created along with the model.\n\nIt contains the following tasks and their related datasets:\n\n 1. Part-of-Speech Tagging (POS)\n \n Catalan-Ancora: from the [Universal Dependencies treebank](https://github.com/UniversalDependencies/UD_Catalan-AnCora) of the well-known Ancora corpus\n\n 2. Named Entity Recognition (NER)\n \n **[AnCora Catalan 2.0.0](https://zenodo.org/record/4762031#.YKaFjqGxWUk)**: extracted named entities from the original [Ancora](https://doi.org/10.5281/zenodo.4762030) version,\n filtering out some unconventional ones, like book titles, and transcribed them into a standard CONLL-IOB format\n\n 3. Text Classification (TC)\n \n **[TeCla](https://doi.org/10.5281/zenodo.4627197)**: consisting of 137k news pieces from the Catalan News Agency ([ACN](https://www.acn.cat/)) corpus\n\n 4. Semantic Textual Similarity (STS)\n \n **[Catalan semantic textual similarity](https://doi.org/10.5281/zenodo.4529183)**: consisting of more than 3000 sentence pairs, annotated with the semantic similarity between them, \n scraped from the [Catalan Textual Corpus](https://doi.org/10.5281/zenodo.4519349)\n\n 5. Question Answering (QA):\n \n **[ViquiQuAD](https://doi.org/10.5281/zenodo.4562344)**: consisting of more than 15,000 questions outsourced from Catalan Wikipedia randomly chosen from a set of 596 articles that were originally written in Catalan.\n \n **[XQuAD](https://doi.org/10.5281/zenodo.4526223)**: the Catalan translation of XQuAD, a multilingual collection of manual translations of 1,190 question-answer pairs from English Wikipedia used only as a _test set_\n \nHere are the train/dev/test splits of the datasets:\n\n| Task (Dataset) | Total | Train | Dev | Test |\n|:--|:--|:--|:--|:--|\n| NER (Ancora) |13,581 | 10,628 | 1,427 | 1,526 |\n| POS (Ancora)| 16,678 | 13,123 | 1,709 | 1,846 |\n| STS | 3,073 | 2,073 | 500 | 500 |\n| TC (TeCla) | 137,775 | 110,203 | 13,786 | 13,786|\n| QA (ViquiQuAD) | 14,239 | 11,255 | 1,492 | 1,429 | \n \n\n_The fine-tuning on downstream tasks have been performed with the HuggingFace [**Transformers**](https://github.com/huggingface/transformers) library_\n\n### Results\n\nBelow the evaluation results on the CLUB tasks compared with the multilingual mBERT, XLM-RoBERTa models and \nthe Catalan WikiBERT-ca model\n\n\n| Task | NER (F1) | POS (F1) | STS (Pearson) | TC (accuracy) | QA (ViquiQuAD) (F1/EM) | QA (XQuAD) (F1/EM) | \n| ------------|:-------------:| -----:|:------|:-------|:------|:----|\n| BERTa | **88.13** | **98.97** | **79.73** | **74.16** | **86.97/72.29** | **68.89/48.87** |\n| mBERT | 86.38 | 98.82 | 76.34 | 70.56 | 86.97/72.22 | 67.15/46.51 |\n| XLM-RoBERTa | 87.66 | 98.89 | 75.40 | 71.68 | 85.50/70.47 | 67.10/46.42 |\n| WikiBERT-ca | 77.66 | 97.60 | 77.18 | 73.22 | 85.45/70.75 | 65.21/36.60 |\n\n\n## Additional information\n\n### Author\nText Mining Unit (TeMU) at the Barcelona Supercomputing Center (bsc-temu@bsc.es)\n\n### Contact information\nFor further information, send an email to \n\n### Copyright\nCopyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022)\n\n### Licensing information\n[Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)\n\n### Funding\nThis work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL.\n\n### Citing information\n\nIf you use this model, please cite our latest paper:\n\n```bibtex\n@inproceedings{armengol-estape-etal-2021-multilingual,\n title = \"Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? {A} Comprehensive Assessment for {C}atalan\",\n author = \"Armengol-Estap{\\'e}, Jordi and\n Carrino, Casimiro Pio and\n Rodriguez-Penagos, Carlos and\n de Gibert Bonet, Ona and\n Armentano-Oller, Carme and\n Gonzalez-Agirre, Aitor and\n Melero, Maite and\n Villegas, Marta\",\n booktitle = \"Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021\",\n month = aug,\n year = \"2021\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.findings-acl.437\",\n doi = \"10.18653/v1/2021.findings-acl.437\",\n pages = \"4933--4946\",\n}\n```\n\n### Disclaimer\nThe models published in this repository are intended for a generalist purpose and are available to third parties. These models may have bias and/or any other undesirable distortions.\n\nWhen third parties, deploy or provide systems and/or services to other parties using any of these models (or using systems based on these models) or become users of the models, they should note that it is their responsibility to mitigate the risks arising from their use and, in any event, to comply with applicable regulations, including regulations regarding the use of artificial intelligence.\n\nIn no event shall the owner of the models (SEDIA – State Secretariat for digitalization and artificial intelligence) nor the creator (BSC – Barcelona Supercomputing Center) be liable for any results arising from the use made by third parties of these models.\n\n\nLos modelos publicados en este repositorio tienen una finalidad generalista y están a disposición de terceros. Estos modelos pueden tener sesgos y/u otro tipo de distorsiones indeseables.\n\nCuando terceros desplieguen o proporcionen sistemas y/o servicios a otras partes usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) o se conviertan en usuarios de los modelos, deben tener en cuenta que es su responsabilidad mitigar los riesgos derivados de su uso y, en todo caso, cumplir con la normativa aplicable, incluyendo la normativa en materia de uso de inteligencia artificial.\n\nEn ningún caso el propietario de los modelos (SEDIA – Secretaría de Estado de Digitalización e Inteligencia Artificial) ni el creador (BSC – Barcelona Supercomputing Center) serán responsables de los resultados derivados del uso que hagan terceros de estos modelos."},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# BERTa: RoBERTa-based Catalan language model\n\n## Table of contents\n
\nClick to expand\n\n- [Model description](#model-description)\n- [Intended uses and limitations](#intended-use)\n- [How to use](#how-to-use)\n- [Limitations and bias](#limitations-and-bias)\n- [Training](#training)\n- [Evaluation](#evaluation)\n- [Additional information](#additional-information)\n - [Author](#author)\n - [Contact information](#contact-information)\n - [Copyright](#copyright)\n - [Licensing information](#licensing-information)\n - [Funding](#funding)\n - [Citing information](#citing-information)\n - [Disclaimer](#disclaimer)\n \n
\n\n\n## Model description\nBERTa is a transformer-based masked language model for the Catalan language. \nIt is based on the [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) base model \nand has been trained on a medium-size corpus collected from publicly available corpora and crawlers.\n\nThis model was originally published as [bsc/roberta-base-ca-cased](https://huggingface.co/bsc/roberta-base-ca-cased).\n\n## Intended uses and limitations\nThe model is ready-to-use only for masked language modelling to perform the Fill Mask task (try the inference API or read the next section). \nHowever, it is intended to be fine-tuned on non-generative downstream tasks such as Question Answering, Text Classification or Named Entity Recognition.\n\n\n## How to use\n\n### Load model and tokenizer\n\n``` python\nfrom transformers import AutoTokenizer, AutoModelForMaskedLM\ntokenizer = AutoTokenizer.from_pretrained(\"PlanTL-GOB-ES/roberta-base-ca-cased\")\nmodel = AutoModelForMaskedLM.from_pretrained(\"PlanTL-GOB-ES/roberta-base-ca-cased\")\n```\n\n### Fill Mask task\n\nBelow, an example of how to use the masked language modelling task with a pipeline.\n\n```python\n>>> from transformers import pipeline\n>>> unmasker = pipeline('fill-mask', model='PlanTL-GOB-ES/roberta-base-ca-cased')\n>>> unmasker(\"Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\")\n\n[\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.4177263379096985,\n \"token\": 734,\n \"token_str\": \" Barcelona\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.10696165263652802,\n \"token\": 3849,\n \"token_str\": \" Badalona\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.08135009557008743,\n \"token\": 19349,\n \"token_str\": \" Collserola\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.07330769300460815,\n \"token\": 4974,\n \"token_str\": \" Terrassa\"\n },\n {\n \"sequence\": \" Situada a la costa de la mar Mediterrània, s'assenta en una plana formada \"\n \"entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, \"\n \"i Besòs, al nord-est, i limitada pel sud-est per la línia de costa,\"\n \"i pel nord-oest per la serralada de Collserola \"\n \"(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela \"\n \"la línia de costa encaixant la ciutat en un perímetre molt definit.\",\n \"score\": 0.03317456692457199,\n \"token\": 14333,\n \"token_str\": \" Gavà\"\n }\n]\n```\n\n\n## Limitations and bias\n\n## Training\n### Training corpora and preprocessing\n\nThe training corpus consists of several corpora gathered from web crawling and public corpora.\n\nThe publicly available corpora are:\n\n 1. the Catalan part of the [DOGC](http://opus.nlpl.eu/DOGC-v2.php) corpus, a set of documents from the Official Gazette of the Catalan Government\n \n 2. the [Catalan Open Subtitles](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.ca.gz), a collection of translated movie subtitles\n \n 3. the non-shuffled version of the Catalan part of the [OSCAR](https://traces1.inria.fr/oscar/) corpus \\\\\\\\cite{suarez2019asynchronous}, \n a collection of monolingual corpora, filtered from [Common Crawl](https://commoncrawl.org/about/)\n \n 4. The [CaWac](http://nlp.ffzg.hr/resources/corpora/cawac/) corpus, a web corpus of Catalan built from the .cat top-level-domain in late 2013\n the non-deduplicated version\n\n 5. the [Catalan Wikipedia articles](https://ftp.acc.umu.se/mirror/wikimedia.org/dumps/cawiki/20200801/) downloaded on 18-08-2020.\n\nThe crawled corpora are:\n\n 6. The Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains\n 7. the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government\n \n 8. the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the [Catalan News Agency](https://www.acn.cat/)\n\nTo obtain a high-quality training corpus, each corpus have preprocessed with a pipeline of operations, including among the others,\nsentence splitting, language detection, filtering of bad-formed sentences and deduplication of repetitive contents.\nDuring the process, we keep document boundaries are kept. \nFinally, the corpora are concatenated and further global deduplication among the corpora is applied.\nThe final training corpus consists of about 1,8B tokens.\n\n\n### Tokenization and pretraining \n\nThe training corpus has been tokenized using a byte version of [Byte-Pair Encoding (BPE)](https://github.com/openai/gpt-2)\nused in the original [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) model with a vocabulary size of 52,000 tokens. \n\nThe BERTa pretraining consists of a masked language model training that follows the approach employed for the RoBERTa base model\nwith the same hyperparameters as in the original work.\n\nThe training lasted a total of 48 hours with 16 NVIDIA V100 GPUs of 16GB DDRAM.\n\n## Evaluation\n\n### CLUB benchmark\n\nThe BERTa model has been fine-tuned on the downstream tasks of the Catalan Language Understanding Evaluation benchmark (CLUB),\nthat has been created along with the model.\n\nIt contains the following tasks and their related datasets:\n\n 1. Part-of-Speech Tagging (POS)\n \n Catalan-Ancora: from the [Universal Dependencies treebank](https://github.com/UniversalDependencies/UD_Catalan-AnCora) of the well-known Ancora corpus\n\n 2. Named Entity Recognition (NER)\n \n **[AnCora Catalan 2.0.0](https://zenodo.org/record/4762031#.YKaFjqGxWUk)**: extracted named entities from the original [Ancora](https://doi.org/10.5281/zenodo.4762030) version,\n filtering out some unconventional ones, like book titles, and transcribed them into a standard CONLL-IOB format\n\n 3. Text Classification (TC)\n \n **[TeCla](https://doi.org/10.5281/zenodo.4627197)**: consisting of 137k news pieces from the Catalan News Agency ([ACN](https://www.acn.cat/)) corpus\n\n 4. Semantic Textual Similarity (STS)\n \n **[Catalan semantic textual similarity](https://doi.org/10.5281/zenodo.4529183)**: consisting of more than 3000 sentence pairs, annotated with the semantic similarity between them, \n scraped from the [Catalan Textual Corpus](https://doi.org/10.5281/zenodo.4519349)\n\n 5. Question Answering (QA):\n \n **[ViquiQuAD](https://doi.org/10.5281/zenodo.4562344)**: consisting of more than 15,000 questions outsourced from Catalan Wikipedia randomly chosen from a set of 596 articles that were originally written in Catalan.\n \n **[XQuAD](https://doi.org/10.5281/zenodo.4526223)**: the Catalan translation of XQuAD, a multilingual collection of manual translations of 1,190 question-answer pairs from English Wikipedia used only as a _test set_\n \nHere are the train/dev/test splits of the datasets:\n\n| Task (Dataset) | Total | Train | Dev | Test |\n|:--|:--|:--|:--|:--|\n| NER (Ancora) |13,581 | 10,628 | 1,427 | 1,526 |\n| POS (Ancora)| 16,678 | 13,123 | 1,709 | 1,846 |\n| STS | 3,073 | 2,073 | 500 | 500 |\n| TC (TeCla) | 137,775 | 110,203 | 13,786 | 13,786|\n| QA (ViquiQuAD) | 14,239 | 11,255 | 1,492 | 1,429 | \n \n\n_The fine-tuning on downstream tasks have been performed with the HuggingFace [**Transformers**](https://github.com/huggingface/transformers) library_\n\n### Results\n\nBelow the evaluation results on the CLUB tasks compared with the multilingual mBERT, XLM-RoBERTa models and \nthe Catalan WikiBERT-ca model\n\n\n| Task | NER (F1) | POS (F1) | STS (Pearson) | TC (accuracy) | QA (ViquiQuAD) (F1/EM) | QA (XQuAD) (F1/EM) | \n| ------------|:-------------:| -----:|:------|:-------|:------|:----|\n| BERTa | **88.13** | **98.97** | **79.73** | **74.16** | **86.97/72.29** | **68.89/48.87** |\n| mBERT | 86.38 | 98.82 | 76.34 | 70.56 | 86.97/72.22 | 67.15/46.51 |\n| XLM-RoBERTa | 87.66 | 98.89 | 75.40 | 71.68 | 85.50/70.47 | 67.10/46.42 |\n| WikiBERT-ca | 77.66 | 97.60 | 77.18 | 73.22 | 85.45/70.75 | 65.21/36.60 |\n\n\n## Additional information\n\n### Author\nText Mining Unit (TeMU) at the Barcelona Supercomputing Center (bsc-temu@bsc.es)\n\n### Contact information\nFor further information, send an email to \n\n### Copyright\nCopyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022)\n\n### Licensing information\n[Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)\n\n### Funding\nThis work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL.\n\n### Citing information\n\nIf you use this model, please cite our latest paper:\n\n```bibtex\n@inproceedings{armengol-estape-etal-2021-multilingual,\n title = \"Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? {A} Comprehensive Assessment for {C}atalan\",\n author = \"Armengol-Estap{\\'e}, Jordi and\n Carrino, Casimiro Pio and\n Rodriguez-Penagos, Carlos and\n de Gibert Bonet, Ona and\n Armentano-Oller, Carme and\n Gonzalez-Agirre, Aitor and\n Melero, Maite and\n Villegas, Marta\",\n booktitle = \"Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021\",\n month = aug,\n year = \"2021\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2021.findings-acl.437\",\n doi = \"10.18653/v1/2021.findings-acl.437\",\n pages = \"4933--4946\",\n}\n```\n\n### Disclaimer\nThe models published in this repository are intended for a generalist purpose and are available to third parties. These models may have bias and/or any other undesirable distortions.\n\nWhen third parties, deploy or provide systems and/or services to other parties using any of these models (or using systems based on these models) or become users of the models, they should note that it is their responsibility to mitigate the risks arising from their use and, in any event, to comply with applicable regulations, including regulations regarding the use of artificial intelligence.\n\nIn no event shall the owner of the models (SEDIA – State Secretariat for digitalization and artificial intelligence) nor the creator (BSC – Barcelona Supercomputing Center) be liable for any results arising from the use made by third parties of these models.\n\n\nLos modelos publicados en este repositorio tienen una finalidad generalista y están a disposición de terceros. Estos modelos pueden tener sesgos y/u otro tipo de distorsiones indeseables.\n\nCuando terceros desplieguen o proporcionen sistemas y/o servicios a otras partes usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) o se conviertan en usuarios de los modelos, deben tener en cuenta que es su responsabilidad mitigar los riesgos derivados de su uso y, en todo caso, cumplir con la normativa aplicable, incluyendo la normativa en materia de uso de inteligencia artificial.\n\nEn ningún caso el propietario de los modelos (SEDIA – Secretaría de Estado de Digitalización e Inteligencia Artificial) ni el creador (BSC – Barcelona Supercomputing Center) serán responsables de los resultados derivados del uso que hagan terceros de estos modelos."},"metadata":{"kind":"string","value":"{\"language\": \"ca\", \"license\": \"apache-2.0\", \"tags\": [\"masked-lm\", \"BERTa\", \"catalan\"], \"widget\": [{\"text\": \"El Català és una llengua molt .\"}, {\"text\": \"Salvador Dalí va viure a .\"}, {\"text\": \"La Costa Brava té les millors d'Espanya.\"}, {\"text\": \"El cacaolat és un batut de .\"}, {\"text\": \" és la capital de la Garrotxa.\"}, {\"text\": \"Vaig al a buscar bolets.\"}, {\"text\": \"Antoni Gaudí vas ser un molt important per la ciutat.\"}, {\"text\": \"Catalunya és una referència en a nivell europeu.\"}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["NAMED_ENTITY_RECOGNITION","TEXT_CLASSIFICATION","QUESTION_ANSWERING","SEMANTIC_SIMILARITY","TRANSLATION"],"string":"[\n \"NAMED_ENTITY_RECOGNITION\",\n \"TEXT_CLASSIFICATION\",\n \"QUESTION_ANSWERING\",\n \"SEMANTIC_SIMILARITY\",\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45555,"string":"45,555"}}},{"rowIdx":43778,"cells":{"id":{"kind":"string","value":"lilferrit/ft-wmt14-5"},"author":{"kind":"string","value":"lilferrit"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mt5","text2text-generation","generated_from_trainer","de","en","dataset:lilferrit/wmt14-short","base_model:google/mt5-small","base_model:finetune:google/mt5-small","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mt5\",\n \"text2text-generation\",\n \"generated_from_trainer\",\n \"de\",\n \"en\",\n \"dataset:lilferrit/wmt14-short\",\n \"base_model:google/mt5-small\",\n \"base_model:finetune:google/mt5-small\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-28T11:02:27Z","string":"2024-05-28T11:02:27Z"},"last_modified":{"kind":"string","value":"2024-05-28T22:21:51+00:00"},"downloads":{"kind":"number","value":7,"string":"7"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: google/mt5-small\ndatasets:\n- lilferrit/wmt14-short\nlanguage:\n- de\n- en\nlicense: apache-2.0\nmetrics:\n- bleu\ntags:\n- generated_from_trainer\nmodel-index:\n- name: ft-wmt14-5\n results:\n - task:\n type: translation\n name: Translation\n dataset:\n name: lilferrit/wmt14-short\n type: lilferrit/wmt14-short\n metrics:\n - type: bleu\n value: 20.7584\n name: Bleu\n---\n\n\n\n# ft-wmt14-5\n\nThis model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the lilferrit/wmt14-short dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 2.0604\n- Bleu: 20.7584\n- Gen Len: 30.499\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0005\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 16\n- optimizer: Adafactor\n- lr_scheduler_type: constant\n- training_steps: 100000\n\n### Training results\n\n| Training Loss | Epoch | Step | Bleu | Gen Len | Validation Loss |\n|:-------------:|:------:|:------:|:-------:|:-------:|:---------------:|\n| 1.9166 | 0.2778 | 10000 | 15.8119 | 32.097 | 2.3105 |\n| 1.7184 | 0.5556 | 20000 | 17.5903 | 31.1153 | 2.1993 |\n| 1.6061 | 0.8333 | 30000 | 18.9604 | 30.327 | 2.1380 |\n| 1.516 | 1.1111 | 40000 | 19.1444 | 30.2727 | 2.1366 |\n| 1.4675 | 1.3889 | 50000 | 19.7588 | 30.1127 | 2.1208 |\n| 1.4416 | 1.6667 | 60000 | 19.9263 | 30.4463 | 2.0889 |\n| 1.4111 | 1.9444 | 70000 | 2.0795 | 20.3323 | 30.1207 |\n| 1.3603 | 2.2222 | 80000 | 2.0850 | 20.5373 | 30.5943 |\n| 1.3378 | 2.5 | 90000 | 2.0604 | 20.7584 | 30.499 |\n| 1.3381 | 2.7778 | 100000 | 2.0597 | 20.6113 | 30.701 |\n\n\n### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.2+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# ft-wmt14-5\n\nThis model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the lilferrit/wmt14-short dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 2.0604\n- Bleu: 20.7584\n- Gen Len: 30.499\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0005\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 2\n- total_train_batch_size: 16\n- optimizer: Adafactor\n- lr_scheduler_type: constant\n- training_steps: 100000\n\n### Training results\n\n| Training Loss | Epoch | Step | Bleu | Gen Len | Validation Loss |\n|:-------------:|:------:|:------:|:-------:|:-------:|:---------------:|\n| 1.9166 | 0.2778 | 10000 | 15.8119 | 32.097 | 2.3105 |\n| 1.7184 | 0.5556 | 20000 | 17.5903 | 31.1153 | 2.1993 |\n| 1.6061 | 0.8333 | 30000 | 18.9604 | 30.327 | 2.1380 |\n| 1.516 | 1.1111 | 40000 | 19.1444 | 30.2727 | 2.1366 |\n| 1.4675 | 1.3889 | 50000 | 19.7588 | 30.1127 | 2.1208 |\n| 1.4416 | 1.6667 | 60000 | 19.9263 | 30.4463 | 2.0889 |\n| 1.4111 | 1.9444 | 70000 | 2.0795 | 20.3323 | 30.1207 |\n| 1.3603 | 2.2222 | 80000 | 2.0850 | 20.5373 | 30.5943 |\n| 1.3378 | 2.5 | 90000 | 2.0604 | 20.7584 | 30.499 |\n| 1.3381 | 2.7778 | 100000 | 2.0597 | 20.6113 | 30.701 |\n\n\n### Framework versions\n\n- Transformers 4.40.0\n- Pytorch 2.2.2+cu121\n- Datasets 2.19.0\n- Tokenizers 0.19.1\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"google/mt5-small\", \"datasets\": [\"lilferrit/wmt14-short\"], \"language\": [\"de\", \"en\"], \"license\": \"apache-2.0\", \"metrics\": [\"bleu\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"ft-wmt14-5\", \"results\": [{\"task\": {\"type\": \"translation\", \"name\": \"Translation\"}, \"dataset\": {\"name\": \"lilferrit/wmt14-short\", \"type\": \"lilferrit/wmt14-short\"}, \"metrics\": [{\"type\": \"bleu\", \"value\": 20.7584, \"name\": \"Bleu\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45556,"string":"45,556"}}},{"rowIdx":43779,"cells":{"id":{"kind":"string","value":"Chung-Hsiung/distilbert-implementation-1"},"author":{"kind":"string","value":"Chung-Hsiung"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","distilbert","text-classification","generated_from_trainer","dataset:glue","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:glue\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-20T04:12:22Z","string":"2023-12-20T04:12:22Z"},"last_modified":{"kind":"string","value":"2023-12-20T15:06:11+00:00"},"downloads":{"kind":"number","value":28,"string":"28"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: distilbert-base-uncased\ndatasets:\n- glue\nlicense: apache-2.0\nmetrics:\n- matthews_correlation\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-implementation-1\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: glue\n type: glue\n config: cola\n split: validation\n args: cola\n metrics:\n - type: matthews_correlation\n value: 0.5184630555337263\n name: Matthews Correlation\n---\n\n\n\n# distilbert-implementation-1\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.6945\n- Matthews Correlation: 0.5185\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1.2885166618048824e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 25\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |\n|:-------------:|:-----:|:----:|:---------------:|:--------------------:|\n| 0.5306 | 1.0 | 535 | 0.4702 | 0.4543 |\n| 0.3874 | 2.0 | 1070 | 0.4676 | 0.4977 |\n| 0.2771 | 3.0 | 1605 | 0.5421 | 0.5126 |\n| 0.2174 | 4.0 | 2140 | 0.6595 | 0.5016 |\n| 0.1716 | 5.0 | 2675 | 0.6945 | 0.5185 |\n\n\n### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.0+cu121\n- Datasets 2.15.0\n- Tokenizers 0.15.0\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-implementation-1\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.6945\n- Matthews Correlation: 0.5185\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1.2885166618048824e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 25\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Matthews Correlation |\n|:-------------:|:-----:|:----:|:---------------:|:--------------------:|\n| 0.5306 | 1.0 | 535 | 0.4702 | 0.4543 |\n| 0.3874 | 2.0 | 1070 | 0.4676 | 0.4977 |\n| 0.2771 | 3.0 | 1605 | 0.5421 | 0.5126 |\n| 0.2174 | 4.0 | 2140 | 0.6595 | 0.5016 |\n| 0.1716 | 5.0 | 2675 | 0.6945 | 0.5185 |\n\n\n### Framework versions\n\n- Transformers 4.36.2\n- Pytorch 2.1.0+cu121\n- Datasets 2.15.0\n- Tokenizers 0.15.0\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"distilbert-base-uncased\", \"datasets\": [\"glue\"], \"license\": \"apache-2.0\", \"metrics\": [\"matthews_correlation\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-implementation-1\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"glue\", \"type\": \"glue\", \"config\": \"cola\", \"split\": \"validation\", \"args\": \"cola\"}, \"metrics\": [{\"type\": \"matthews_correlation\", \"value\": 0.5184630555337263, \"name\": \"Matthews Correlation\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45557,"string":"45,557"}}},{"rowIdx":43780,"cells":{"id":{"kind":"string","value":"siby18/legal-truncation-classifier"},"author":{"kind":"string","value":"siby18"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["tensorboard","safetensors","distilbert","autotrain","text-classification","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","region:us"],"string":"[\n \"tensorboard\",\n \"safetensors\",\n \"distilbert\",\n \"autotrain\",\n \"text-classification\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-09-25T05:17:02Z","string":"2024-09-25T05:17:02Z"},"last_modified":{"kind":"string","value":"2024-09-25T05:18:30+00:00"},"downloads":{"kind":"number","value":5,"string":"5"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: distilbert/distilbert-base-uncased\ntags:\n- autotrain\n- text-classification\nwidget:\n- text: I love AutoTrain\n---\n\n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification\n\n## Validation Metrics\nloss: 0.10830292105674744\n\nf1: 1.0\n\nprecision: 1.0\n\nrecall: 1.0\n\nauc: 1.0\n\naccuracy: 1.0\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Model Trained Using AutoTrain\n\n- Problem type: Text Classification\n\n## Validation Metrics\nloss: 0.10830292105674744\n\nf1: 1.0\n\nprecision: 1.0\n\nrecall: 1.0\n\nauc: 1.0\n\naccuracy: 1.0\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"distilbert/distilbert-base-uncased\", \"tags\": [\"autotrain\", \"text-classification\"], \"widget\": [{\"text\": \"I love AutoTrain\"}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45558,"string":"45,558"}}},{"rowIdx":43781,"cells":{"id":{"kind":"string","value":"Helsinki-NLP/opus-mt-tc-big-en-it"},"author":{"kind":"string","value":"Helsinki-NLP"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","tf","safetensors","marian","text2text-generation","translation","opus-mt-tc","en","it","license:cc-by-4.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tf\",\n \"safetensors\",\n \"marian\",\n \"text2text-generation\",\n \"translation\",\n \"opus-mt-tc\",\n \"en\",\n \"it\",\n \"license:cc-by-4.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-04-13T14:27:31Z","string":"2022-04-13T14:27:31Z"},"last_modified":{"kind":"string","value":"2023-10-10T10:33:03+00:00"},"downloads":{"kind":"number","value":801,"string":"801"},"likes":{"kind":"number","value":5,"string":"5"},"README":{"kind":"string","value":"---\nlanguage:\n- en\n- it\nlicense: cc-by-4.0\ntags:\n- translation\n- opus-mt-tc\nmodel-index:\n- name: opus-mt-tc-big-en-it\n results:\n - task:\n type: translation\n name: Translation eng-ita\n dataset:\n name: flores101-devtest\n type: flores_101\n args: eng ita devtest\n metrics:\n - type: bleu\n value: 29.6\n name: BLEU\n - task:\n type: translation\n name: Translation eng-ita\n dataset:\n name: tatoeba-test-v2021-08-07\n type: tatoeba_mt\n args: eng-ita\n metrics:\n - type: bleu\n value: 53.9\n name: BLEU\n - task:\n type: translation\n name: Translation eng-ita\n dataset:\n name: newstest2009\n type: wmt-2009-news\n args: eng-ita\n metrics:\n - type: bleu\n value: 31.6\n name: BLEU\n---\n# opus-mt-tc-big-en-it\n\nNeural machine translation model for translating from English (en) to Italian (it).\n\nThis model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train).\n\n* Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.)\n\n```\n@inproceedings{tiedemann-thottingal-2020-opus,\n title = \"{OPUS}-{MT} {--} Building open translation services for the World\",\n author = {Tiedemann, J{\\\"o}rg and Thottingal, Santhosh},\n booktitle = \"Proceedings of the 22nd Annual Conference of the European Association for Machine Translation\",\n month = nov,\n year = \"2020\",\n address = \"Lisboa, Portugal\",\n publisher = \"European Association for Machine Translation\",\n url = \"https://aclanthology.org/2020.eamt-1.61\",\n pages = \"479--480\",\n}\n\n@inproceedings{tiedemann-2020-tatoeba,\n title = \"The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}\",\n author = {Tiedemann, J{\\\"o}rg},\n booktitle = \"Proceedings of the Fifth Conference on Machine Translation\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2020.wmt-1.139\",\n pages = \"1174--1182\",\n}\n```\n\n## Model info\n\n* Release: 2022-03-13\n* source language(s): eng\n* target language(s): ita\n* model: transformer-big\n* data: opusTCv20210807+bt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge))\n* tokenization: SentencePiece (spm32k,spm32k)\n* original model: [opusTCv20210807+bt_transformer-big_2022-03-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.zip)\n* more information released models: [OPUS-MT eng-ita README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-ita/README.md)\n\n## Usage\n\nA short example code:\n\n```python\nfrom transformers import MarianMTModel, MarianTokenizer\n\nsrc_text = [\n \"He was always very respectful.\",\n \"This cat is black. Is the dog, too?\"\n]\n\nmodel_name = \"pytorch-models/opus-mt-tc-big-en-it\"\ntokenizer = MarianTokenizer.from_pretrained(model_name)\nmodel = MarianMTModel.from_pretrained(model_name)\ntranslated = model.generate(**tokenizer(src_text, return_tensors=\"pt\", padding=True))\n\nfor t in translated:\n print( tokenizer.decode(t, skip_special_tokens=True) )\n\n# expected output:\n# Era sempre molto rispettoso.\n# Questo gatto e' nero, e' anche il cane?\n```\n\nYou can also use OPUS-MT models with the transformers pipelines, for example:\n\n```python\nfrom transformers import pipeline\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-tc-big-en-it\")\nprint(pipe(\"He was always very respectful.\"))\n\n# expected output: Era sempre molto rispettoso.\n```\n\n## Benchmarks\n\n* test set translations: [opusTCv20210807+bt_transformer-big_2022-03-13.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.test.txt)\n* test set scores: [opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt)\n* benchmark results: [benchmark_results.txt](benchmark_results.txt)\n* benchmark output: [benchmark_translations.zip](benchmark_translations.zip)\n\n| langpair | testset | chr-F | BLEU | #sent | #words |\n|----------|---------|-------|-------|-------|--------|\n| eng-ita | tatoeba-test-v2021-08-07 | 0.72539 | 53.9 | 17320 | 116336 |\n| eng-ita | flores101-devtest | 0.59002 | 29.6 | 1012 | 27306 |\n| eng-ita | newssyscomb2009 | 0.60759 | 31.2 | 502 | 11551 |\n| eng-ita | newstest2009 | 0.60441 | 31.6 | 2525 | 63466 |\n\n## Acknowledgements\n\nThe work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland.\n\n## Model conversion info\n\n* transformers version: 4.16.2\n* OPUS-MT git hash: 3405783\n* port time: Wed Apr 13 17:27:22 EEST 2022\n* port machine: LM0-400-22516.local\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"# opus-mt-tc-big-en-it\n\nNeural machine translation model for translating from English (en) to Italian (it).\n\nThis model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train).\n\n* Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.)\n\n```\n@inproceedings{tiedemann-thottingal-2020-opus,\n title = \"{OPUS}-{MT} {--} Building open translation services for the World\",\n author = {Tiedemann, J{\\\"o}rg and Thottingal, Santhosh},\n booktitle = \"Proceedings of the 22nd Annual Conference of the European Association for Machine Translation\",\n month = nov,\n year = \"2020\",\n address = \"Lisboa, Portugal\",\n publisher = \"European Association for Machine Translation\",\n url = \"https://aclanthology.org/2020.eamt-1.61\",\n pages = \"479--480\",\n}\n\n@inproceedings{tiedemann-2020-tatoeba,\n title = \"The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}\",\n author = {Tiedemann, J{\\\"o}rg},\n booktitle = \"Proceedings of the Fifth Conference on Machine Translation\",\n month = nov,\n year = \"2020\",\n address = \"Online\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://aclanthology.org/2020.wmt-1.139\",\n pages = \"1174--1182\",\n}\n```\n\n## Model info\n\n* Release: 2022-03-13\n* source language(s): eng\n* target language(s): ita\n* model: transformer-big\n* data: opusTCv20210807+bt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge))\n* tokenization: SentencePiece (spm32k,spm32k)\n* original model: [opusTCv20210807+bt_transformer-big_2022-03-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.zip)\n* more information released models: [OPUS-MT eng-ita README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-ita/README.md)\n\n## Usage\n\nA short example code:\n\n```python\nfrom transformers import MarianMTModel, MarianTokenizer\n\nsrc_text = [\n \"He was always very respectful.\",\n \"This cat is black. Is the dog, too?\"\n]\n\nmodel_name = \"pytorch-models/opus-mt-tc-big-en-it\"\ntokenizer = MarianTokenizer.from_pretrained(model_name)\nmodel = MarianMTModel.from_pretrained(model_name)\ntranslated = model.generate(**tokenizer(src_text, return_tensors=\"pt\", padding=True))\n\nfor t in translated:\n print( tokenizer.decode(t, skip_special_tokens=True) )\n\n# expected output:\n# Era sempre molto rispettoso.\n# Questo gatto e' nero, e' anche il cane?\n```\n\nYou can also use OPUS-MT models with the transformers pipelines, for example:\n\n```python\nfrom transformers import pipeline\npipe = pipeline(\"translation\", model=\"Helsinki-NLP/opus-mt-tc-big-en-it\")\nprint(pipe(\"He was always very respectful.\"))\n\n# expected output: Era sempre molto rispettoso.\n```\n\n## Benchmarks\n\n* test set translations: [opusTCv20210807+bt_transformer-big_2022-03-13.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.test.txt)\n* test set scores: [opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt)\n* benchmark results: [benchmark_results.txt](benchmark_results.txt)\n* benchmark output: [benchmark_translations.zip](benchmark_translations.zip)\n\n| langpair | testset | chr-F | BLEU | #sent | #words |\n|----------|---------|-------|-------|-------|--------|\n| eng-ita | tatoeba-test-v2021-08-07 | 0.72539 | 53.9 | 17320 | 116336 |\n| eng-ita | flores101-devtest | 0.59002 | 29.6 | 1012 | 27306 |\n| eng-ita | newssyscomb2009 | 0.60759 | 31.2 | 502 | 11551 |\n| eng-ita | newstest2009 | 0.60441 | 31.6 | 2525 | 63466 |\n\n## Acknowledgements\n\nThe work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland.\n\n## Model conversion info\n\n* transformers version: 4.16.2\n* OPUS-MT git hash: 3405783\n* port time: Wed Apr 13 17:27:22 EEST 2022\n* port machine: LM0-400-22516.local\n"},"metadata":{"kind":"string","value":"{\"language\": [\"en\", \"it\"], \"license\": \"cc-by-4.0\", \"tags\": [\"translation\", \"opus-mt-tc\"], \"model-index\": [{\"name\": \"opus-mt-tc-big-en-it\", \"results\": [{\"task\": {\"type\": \"translation\", \"name\": \"Translation eng-ita\"}, \"dataset\": {\"name\": \"flores101-devtest\", \"type\": \"flores_101\", \"args\": \"eng ita devtest\"}, \"metrics\": [{\"type\": \"bleu\", \"value\": 29.6, \"name\": \"BLEU\"}]}, {\"task\": {\"type\": \"translation\", \"name\": \"Translation eng-ita\"}, \"dataset\": {\"name\": \"tatoeba-test-v2021-08-07\", \"type\": \"tatoeba_mt\", \"args\": \"eng-ita\"}, \"metrics\": [{\"type\": \"bleu\", \"value\": 53.9, \"name\": \"BLEU\"}]}, {\"task\": {\"type\": \"translation\", \"name\": \"Translation eng-ita\"}, \"dataset\": {\"name\": \"newstest2009\", \"type\": \"wmt-2009-news\", \"args\": \"eng-ita\"}, \"metrics\": [{\"type\": \"bleu\", \"value\": 31.6, \"name\": \"BLEU\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45559,"string":"45,559"}}},{"rowIdx":43782,"cells":{"id":{"kind":"string","value":"fumakurata/distilbert-base-uncased-finetuned-emotion"},"author":{"kind":"string","value":"fumakurata"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","distilbert","text-classification","generated_from_trainer","dataset:emotion","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:emotion\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-07-18T07:22:20Z","string":"2022-07-18T07:22:20Z"},"last_modified":{"kind":"string","value":"2022-07-18T10:12:18+00:00"},"downloads":{"kind":"number","value":114,"string":"114"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- emotion\nlicense: apache-2.0\nmetrics:\n- accuracy\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-emotion\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: emotion\n type: emotion\n args: default\n metrics:\n - type: accuracy\n value: 0.834\n name: Accuracy\n - type: f1\n value: 0.8171742650957551\n name: F1\n---\n\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.5401\n- Accuracy: 0.834\n- F1: 0.8172\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 192\n- eval_batch_size: 192\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| No log | 1.0 | 84 | 0.7993 | 0.74 | 0.6827 |\n| No log | 2.0 | 168 | 0.5401 | 0.834 | 0.8172 |\n\n\n### Framework versions\n\n- Transformers 4.20.1\n- Pytorch 1.12.0+cu113\n- Datasets 2.3.2\n- Tokenizers 0.12.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.5401\n- Accuracy: 0.834\n- F1: 0.8172\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 192\n- eval_batch_size: 192\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| No log | 1.0 | 84 | 0.7993 | 0.74 | 0.6827 |\n| No log | 2.0 | 168 | 0.5401 | 0.834 | 0.8172 |\n\n\n### Framework versions\n\n- Transformers 4.20.1\n- Pytorch 1.12.0+cu113\n- Datasets 2.3.2\n- Tokenizers 0.12.1\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"emotion\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\", \"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-emotion\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"emotion\", \"type\": \"emotion\", \"args\": \"default\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.834, \"name\": \"Accuracy\"}, {\"type\": \"f1\", \"value\": 0.8171742650957551, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45560,"string":"45,560"}}},{"rowIdx":43783,"cells":{"id":{"kind":"string","value":"facebook/bart-large-mnli"},"author":{"kind":"string","value":"facebook"},"task_category":{"kind":"string","value":"zero-shot-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","jax","rust","safetensors","bart","text-classification","zero-shot-classification","dataset:multi_nli","arxiv:1910.13461","arxiv:1909.00161","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"jax\",\n \"rust\",\n \"safetensors\",\n \"bart\",\n \"text-classification\",\n \"zero-shot-classification\",\n \"dataset:multi_nli\",\n \"arxiv:1910.13461\",\n \"arxiv:1909.00161\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-03-02T23:29:05Z","string":"2022-03-02T23:29:05Z"},"last_modified":{"kind":"string","value":"2023-09-05T14:49:34+00:00"},"downloads":{"kind":"number","value":2735868,"string":"2,735,868"},"likes":{"kind":"number","value":1326,"string":"1,326"},"README":{"kind":"string","value":"---\ndatasets:\n- multi_nli\nlicense: mit\npipeline_tag: zero-shot-classification\nthumbnail: https://huggingface.co/front/thumbnails/facebook.png\n---\n\n# bart-large-mnli\n\nThis is the checkpoint for [bart-large](https://huggingface.co/facebook/bart-large) after being trained on the [MultiNLI (MNLI)](https://huggingface.co/datasets/multi_nli) dataset.\n\nAdditional information about this model:\n- The [bart-large](https://huggingface.co/facebook/bart-large) model page\n- [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension\n](https://arxiv.org/abs/1910.13461)\n- [BART fairseq implementation](https://github.com/pytorch/fairseq/tree/master/fairseq/models/bart)\n\n## NLI-based Zero Shot Text Classification\n\n[Yin et al.](https://arxiv.org/abs/1909.00161) proposed a method for using pre-trained NLI models as a ready-made zero-shot sequence classifiers. The method works by posing the sequence to be classified as the NLI premise and to construct a hypothesis from each candidate label. For example, if we want to evaluate whether a sequence belongs to the class \"politics\", we could construct a hypothesis of `This text is about politics.`. The probabilities for entailment and contradiction are then converted to label probabilities.\n\nThis method is surprisingly effective in many cases, particularly when used with larger pre-trained models like BART and Roberta. See [this blog post](https://joeddav.github.io/blog/2020/05/29/ZSL.html) for a more expansive introduction to this and other zero shot methods, and see the code snippets below for examples of using this model for zero-shot classification both with Hugging Face's built-in pipeline and with native Transformers/PyTorch code.\n\n#### With the zero-shot classification pipeline\n\nThe model can be loaded with the `zero-shot-classification` pipeline like so:\n\n```python\nfrom transformers import pipeline\nclassifier = pipeline(\"zero-shot-classification\",\n model=\"facebook/bart-large-mnli\")\n```\n\nYou can then use this pipeline to classify sequences into any of the class names you specify.\n\n```python\nsequence_to_classify = \"one day I will see the world\"\ncandidate_labels = ['travel', 'cooking', 'dancing']\nclassifier(sequence_to_classify, candidate_labels)\n#{'labels': ['travel', 'dancing', 'cooking'],\n# 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289],\n# 'sequence': 'one day I will see the world'}\n```\n\nIf more than one candidate label can be correct, pass `multi_label=True` to calculate each class independently:\n\n```python\ncandidate_labels = ['travel', 'cooking', 'dancing', 'exploration']\nclassifier(sequence_to_classify, candidate_labels, multi_label=True)\n#{'labels': ['travel', 'exploration', 'dancing', 'cooking'],\n# 'scores': [0.9945111274719238,\n# 0.9383890628814697,\n# 0.0057061901316046715,\n# 0.0018193122232332826],\n# 'sequence': 'one day I will see the world'}\n```\n\n\n#### With manual PyTorch\n\n```python\n# pose sequence as a NLI premise and label as a hypothesis\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\nnli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli')\ntokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli')\n\npremise = sequence\nhypothesis = f'This example is {label}.'\n\n# run through model pre-trained on MNLI\nx = tokenizer.encode(premise, hypothesis, return_tensors='pt',\n truncation_strategy='only_first')\nlogits = nli_model(x.to(device))[0]\n\n# we throw away \"neutral\" (dim 1) and take the probability of\n# \"entailment\" (2) as the probability of the label being true \nentail_contradiction_logits = logits[:,[0,2]]\nprobs = entail_contradiction_logits.softmax(dim=1)\nprob_label_is_true = probs[:,1]\n```\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# bart-large-mnli\n\nThis is the checkpoint for [bart-large](https://huggingface.co/facebook/bart-large) after being trained on the [MultiNLI (MNLI)](https://huggingface.co/datasets/multi_nli) dataset.\n\nAdditional information about this model:\n- The [bart-large](https://huggingface.co/facebook/bart-large) model page\n- [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension\n](https://arxiv.org/abs/1910.13461)\n- [BART fairseq implementation](https://github.com/pytorch/fairseq/tree/master/fairseq/models/bart)\n\n## NLI-based Zero Shot Text Classification\n\n[Yin et al.](https://arxiv.org/abs/1909.00161) proposed a method for using pre-trained NLI models as a ready-made zero-shot sequence classifiers. The method works by posing the sequence to be classified as the NLI premise and to construct a hypothesis from each candidate label. For example, if we want to evaluate whether a sequence belongs to the class \"politics\", we could construct a hypothesis of `This text is about politics.`. The probabilities for entailment and contradiction are then converted to label probabilities.\n\nThis method is surprisingly effective in many cases, particularly when used with larger pre-trained models like BART and Roberta. See [this blog post](https://joeddav.github.io/blog/2020/05/29/ZSL.html) for a more expansive introduction to this and other zero shot methods, and see the code snippets below for examples of using this model for zero-shot classification both with Hugging Face's built-in pipeline and with native Transformers/PyTorch code.\n\n#### With the zero-shot classification pipeline\n\nThe model can be loaded with the `zero-shot-classification` pipeline like so:\n\n```python\nfrom transformers import pipeline\nclassifier = pipeline(\"zero-shot-classification\",\n model=\"facebook/bart-large-mnli\")\n```\n\nYou can then use this pipeline to classify sequences into any of the class names you specify.\n\n```python\nsequence_to_classify = \"one day I will see the world\"\ncandidate_labels = ['travel', 'cooking', 'dancing']\nclassifier(sequence_to_classify, candidate_labels)\n#{'labels': ['travel', 'dancing', 'cooking'],\n# 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289],\n# 'sequence': 'one day I will see the world'}\n```\n\nIf more than one candidate label can be correct, pass `multi_label=True` to calculate each class independently:\n\n```python\ncandidate_labels = ['travel', 'cooking', 'dancing', 'exploration']\nclassifier(sequence_to_classify, candidate_labels, multi_label=True)\n#{'labels': ['travel', 'exploration', 'dancing', 'cooking'],\n# 'scores': [0.9945111274719238,\n# 0.9383890628814697,\n# 0.0057061901316046715,\n# 0.0018193122232332826],\n# 'sequence': 'one day I will see the world'}\n```\n\n\n#### With manual PyTorch\n\n```python\n# pose sequence as a NLI premise and label as a hypothesis\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\nnli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli')\ntokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli')\n\npremise = sequence\nhypothesis = f'This example is {label}.'\n\n# run through model pre-trained on MNLI\nx = tokenizer.encode(premise, hypothesis, return_tensors='pt',\n truncation_strategy='only_first')\nlogits = nli_model(x.to(device))[0]\n\n# we throw away \"neutral\" (dim 1) and take the probability of\n# \"entailment\" (2) as the probability of the label being true \nentail_contradiction_logits = logits[:,[0,2]]\nprobs = entail_contradiction_logits.softmax(dim=1)\nprob_label_is_true = probs[:,1]\n```\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"multi_nli\"], \"license\": \"mit\", \"pipeline_tag\": \"zero-shot-classification\", \"thumbnail\": \"https://huggingface.co/front/thumbnails/facebook.png\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION","TRANSLATION"],"string":"[\n \"TEXT_CLASSIFICATION\",\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45561,"string":"45,561"}}},{"rowIdx":43784,"cells":{"id":{"kind":"string","value":"KarelDO/roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44"},"author":{"kind":"string","value":"KarelDO"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","generated_from_trainer","en","dataset:OpenTable","license:mit","model-index","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"generated_from_trainer\",\n \"en\",\n \"dataset:OpenTable\",\n \"license:mit\",\n \"model-index\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-10-14T02:26:43Z","string":"2022-10-14T02:26:43Z"},"last_modified":{"kind":"string","value":"2022-10-14T02:29:05+00:00"},"downloads":{"kind":"number","value":17,"string":"17"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- OpenTable\nlanguage:\n- en\nlicense: mit\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: OpenTable OPENTABLE\n type: OpenTable\n args: opentable\n metrics:\n - type: accuracy\n value: 0.7471607890017932\n name: Accuracy\n---\n\n\n\n# roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44\n\nThis model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.6246\n- Accuracy: 0.7472\n- Macro-f1: 0.7303\n- Weighted-macro-f1: 0.7434\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 44\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5.0\n\n### Training results\n\n\n\n### Framework versions\n\n- Transformers 4.18.0\n- Pytorch 1.10.2+cu102\n- Datasets 2.5.2\n- Tokenizers 0.12.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44\n\nThis model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.6246\n- Accuracy: 0.7472\n- Macro-f1: 0.7303\n- Weighted-macro-f1: 0.7434\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 44\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 5.0\n\n### Training results\n\n\n\n### Framework versions\n\n- Transformers 4.18.0\n- Pytorch 1.10.2+cu102\n- Datasets 2.5.2\n- Tokenizers 0.12.1\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"OpenTable\"], \"language\": [\"en\"], \"license\": \"mit\", \"metrics\": [\"accuracy\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"OpenTable OPENTABLE\", \"type\": \"OpenTable\", \"args\": \"opentable\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.7471607890017932, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45562,"string":"45,562"}}},{"rowIdx":43785,"cells":{"id":{"kind":"string","value":"facebook/seamless-m4t-unity-small"},"author":{"kind":"string","value":"facebook"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["fairseq2","SeamlessM4T","license:cc-by-nc-4.0","region:us"],"string":"[\n \"fairseq2\",\n \"SeamlessM4T\",\n \"license:cc-by-nc-4.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-20T15:54:46Z","string":"2023-08-20T15:54:46Z"},"last_modified":{"kind":"string","value":"2023-08-24T13:35:49+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":30,"string":"30"},"README":{"kind":"string","value":"---\nlibrary_name: fairseq2\nlicense: cc-by-nc-4.0\ntags:\n- SeamlessM4T\ninference: false\n---\n\n# SeamlessM4T - On-Device\nSeamlessM4T is designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text. \n\nSeamlessM4T covers:\n- 📥 101 languages for speech input\n- ⌨️ 96 Languages for text input/output\n- 🗣️ 35 languages for speech output. \n\nApart from [SeamlessM4T-LARGE (2.3B)](https://huggingface.co/facebook/seamless-m4t-large) and [SeamlessM4T-MEDIUM (1.2B)](https://huggingface.co/facebook/seamless-m4t-medium) models, we are also developing a small model (281M) targeting for on-device inference.\n\nThis README contains an example to run an exported small model covering most tasks (ASR/S2TT/S2ST). The model could be executed on popular mobile devices with Pytorch Mobile (https://pytorch.org/mobile/home/).\n\n## Overview\n| Model | Checkpoint | Num Params | Disk Size | Supported Tasks | Supported Languages|\n|---------|------------|----------|-------------|------------|-------------------------|\n| UnitY-Small|[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small/resolve/main/unity_on_device.ptl) | 281M | 862MB | S2ST, S2TT, ASR |eng, fra, hin, por, spa|\n| UnitY-Small-S2T |[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t/resolve/main/unity_on_device_s2t.ptl) | 235M | 637MB | S2TT, ASR |eng, fra,hin, por, spa|\n\nUnitY-Small-S2T is a pruned version of UnitY-Small without 2nd pass unit decoding.\n\n## Inference\nTo use exported model, users don't need seamless_communication or fairseq2 dependency.\n\n```python\nimport torchaudio\nimport torch\n\naudio_input, _ = torchaudio.load(TEST_AUDIO_PATH) # Load waveform using torchaudio\n\ns2st_model = torch.jit.load(\"unity_on_device.ptl\")\n\nwith torch.no_grad():\n text, units, waveform = s2st_model(audio_input, tgt_lang=TGT_LANG) # S2ST model also returns waveform\n\nprint(text)\ntorchaudio.save(f\"{OUTPUT_FOLDER}/result.wav\", waveform.unsqueeze(0), sample_rate=16000) # Save output waveform to local file\n```\n\nAlso running the exported model doesn't need python runtime. For example, you could load this model in C++ following [this tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html), or building your own on-device applications similar to [this example](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition)\n\n# Citation\nIf you use SeamlessM4T in your work or any models/datasets/artifacts published in SeamlessM4T, please cite:\n\n```bibtex\n@article{seamlessm4t2023,\n title={SeamlessM4T—Massively Multilingual \\& Multimodal Machine Translation},\n author={{Seamless Communication}, Lo\\\"{i}c Barrault, Yu-An Chung, Mariano Cora Meglioli, David Dale, Ning Dong, Paul-Ambroise Duquenne, Hady Elsahar, Hongyu Gong, Kevin Heffernan, John Hoffman, Christopher Klaiber, Pengwei Li, Daniel Licht, Jean Maillard, Alice Rakotoarison, Kaushik Ram Sadagopan, Guillaume Wenzek, Ethan Ye, Bapi Akula, Peng-Jen Chen, Naji El Hachem, Brian Ellis, Gabriel Mejia Gonzalez, Justin Haaheim, Prangthip Hansanti, Russ Howes, Bernie Huang, Min-Jae Hwang, Hirofumi Inaguma, Somya Jain, Elahe Kalbassi, Amanda Kallet, Ilia Kulikov, Janice Lam, Daniel Li, Xutai Ma, Ruslan Mavlyutov, Benjamin Peloquin, Mohamed Ramadan, Abinesh Ramakrishnan, Anna Sun, Kevin Tran, Tuan Tran, Igor Tufanov, Vish Vogeti, Carleigh Wood, Yilin Yang, Bokai Yu, Pierre Andrews, Can Balioglu, Marta R. Costa-juss\\`{a} \\footnotemark[3], Onur \\,{C}elebi,Maha Elbayad,Cynthia Gao, Francisco Guzm\\'an, Justine Kao, Ann Lee, Alexandre Mourachko, Juan Pino, Sravya Popuri, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Paden Tomasello, Changhan Wang, Jeff Wang, Skyler Wang},\n journal={ArXiv},\n year={2023}\n}\n```\n# License\n\nseamless_communication is CC-BY-NC 4.0 licensed\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# SeamlessM4T - On-Device\nSeamlessM4T is designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text. \n\nSeamlessM4T covers:\n- 📥 101 languages for speech input\n- ⌨️ 96 Languages for text input/output\n- 🗣️ 35 languages for speech output. \n\nApart from [SeamlessM4T-LARGE (2.3B)](https://huggingface.co/facebook/seamless-m4t-large) and [SeamlessM4T-MEDIUM (1.2B)](https://huggingface.co/facebook/seamless-m4t-medium) models, we are also developing a small model (281M) targeting for on-device inference.\n\nThis README contains an example to run an exported small model covering most tasks (ASR/S2TT/S2ST). The model could be executed on popular mobile devices with Pytorch Mobile (https://pytorch.org/mobile/home/).\n\n## Overview\n| Model | Checkpoint | Num Params | Disk Size | Supported Tasks | Supported Languages|\n|---------|------------|----------|-------------|------------|-------------------------|\n| UnitY-Small|[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small/resolve/main/unity_on_device.ptl) | 281M | 862MB | S2ST, S2TT, ASR |eng, fra, hin, por, spa|\n| UnitY-Small-S2T |[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t/resolve/main/unity_on_device_s2t.ptl) | 235M | 637MB | S2TT, ASR |eng, fra,hin, por, spa|\n\nUnitY-Small-S2T is a pruned version of UnitY-Small without 2nd pass unit decoding.\n\n## Inference\nTo use exported model, users don't need seamless_communication or fairseq2 dependency.\n\n```python\nimport torchaudio\nimport torch\n\naudio_input, _ = torchaudio.load(TEST_AUDIO_PATH) # Load waveform using torchaudio\n\ns2st_model = torch.jit.load(\"unity_on_device.ptl\")\n\nwith torch.no_grad():\n text, units, waveform = s2st_model(audio_input, tgt_lang=TGT_LANG) # S2ST model also returns waveform\n\nprint(text)\ntorchaudio.save(f\"{OUTPUT_FOLDER}/result.wav\", waveform.unsqueeze(0), sample_rate=16000) # Save output waveform to local file\n```\n\nAlso running the exported model doesn't need python runtime. For example, you could load this model in C++ following [this tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html), or building your own on-device applications similar to [this example](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition)\n\n# Citation\nIf you use SeamlessM4T in your work or any models/datasets/artifacts published in SeamlessM4T, please cite:\n\n```bibtex\n@article{seamlessm4t2023,\n title={SeamlessM4T—Massively Multilingual \\& Multimodal Machine Translation},\n author={{Seamless Communication}, Lo\\\"{i}c Barrault, Yu-An Chung, Mariano Cora Meglioli, David Dale, Ning Dong, Paul-Ambroise Duquenne, Hady Elsahar, Hongyu Gong, Kevin Heffernan, John Hoffman, Christopher Klaiber, Pengwei Li, Daniel Licht, Jean Maillard, Alice Rakotoarison, Kaushik Ram Sadagopan, Guillaume Wenzek, Ethan Ye, Bapi Akula, Peng-Jen Chen, Naji El Hachem, Brian Ellis, Gabriel Mejia Gonzalez, Justin Haaheim, Prangthip Hansanti, Russ Howes, Bernie Huang, Min-Jae Hwang, Hirofumi Inaguma, Somya Jain, Elahe Kalbassi, Amanda Kallet, Ilia Kulikov, Janice Lam, Daniel Li, Xutai Ma, Ruslan Mavlyutov, Benjamin Peloquin, Mohamed Ramadan, Abinesh Ramakrishnan, Anna Sun, Kevin Tran, Tuan Tran, Igor Tufanov, Vish Vogeti, Carleigh Wood, Yilin Yang, Bokai Yu, Pierre Andrews, Can Balioglu, Marta R. Costa-juss\\`{a} \\footnotemark[3], Onur \\,{C}elebi,Maha Elbayad,Cynthia Gao, Francisco Guzm\\'an, Justine Kao, Ann Lee, Alexandre Mourachko, Juan Pino, Sravya Popuri, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Paden Tomasello, Changhan Wang, Jeff Wang, Skyler Wang},\n journal={ArXiv},\n year={2023}\n}\n```\n# License\n\nseamless_communication is CC-BY-NC 4.0 licensed\n"},"metadata":{"kind":"string","value":"{\"library_name\": \"fairseq2\", \"license\": \"cc-by-nc-4.0\", \"tags\": [\"SeamlessM4T\"], \"inference\": false}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45563,"string":"45,563"}}},{"rowIdx":43786,"cells":{"id":{"kind":"string","value":"gokulsrinivasagan/distilbert_lda_5_v1_book_sst2"},"author":{"kind":"string","value":"gokulsrinivasagan"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","distilbert","text-classification","generated_from_trainer","en","dataset:glue","base_model:gokulsrinivasagan/distilbert_lda_5_v1_book","base_model:finetune:gokulsrinivasagan/distilbert_lda_5_v1_book","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"en\",\n \"dataset:glue\",\n \"base_model:gokulsrinivasagan/distilbert_lda_5_v1_book\",\n \"base_model:finetune:gokulsrinivasagan/distilbert_lda_5_v1_book\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-12-09T16:33:32Z","string":"2024-12-09T16:33:32Z"},"last_modified":{"kind":"string","value":"2024-12-09T16:38:33+00:00"},"downloads":{"kind":"number","value":4,"string":"4"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: gokulsrinivasagan/distilbert_lda_5_v1_book\ndatasets:\n- glue\nlanguage:\n- en\nlibrary_name: transformers\nmetrics:\n- accuracy\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert_lda_5_v1_book_sst2\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: GLUE SST2\n type: glue\n args: sst2\n metrics:\n - type: accuracy\n value: 0.8956422018348624\n name: Accuracy\n---\n\n\n\n# distilbert_lda_5_v1_book_sst2\n\nThis model is a fine-tuned version of [gokulsrinivasagan/distilbert_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/distilbert_lda_5_v1_book) on the GLUE SST2 dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2822\n- Accuracy: 0.8956\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 256\n- eval_batch_size: 256\n- seed: 10\n- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments\n- lr_scheduler_type: linear\n- num_epochs: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 0.3005 | 1.0 | 264 | 0.2822 | 0.8956 |\n| 0.1522 | 2.0 | 528 | 0.3279 | 0.8979 |\n| 0.1013 | 3.0 | 792 | 0.3850 | 0.8853 |\n| 0.0747 | 4.0 | 1056 | 0.3563 | 0.8945 |\n| 0.0542 | 5.0 | 1320 | 0.3817 | 0.8865 |\n| 0.045 | 6.0 | 1584 | 0.3982 | 0.8888 |\n\n\n### Framework versions\n\n- Transformers 4.46.1\n- Pytorch 2.2.0+cu121\n- Datasets 3.1.0\n- Tokenizers 0.20.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert_lda_5_v1_book_sst2\n\nThis model is a fine-tuned version of [gokulsrinivasagan/distilbert_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/distilbert_lda_5_v1_book) on the GLUE SST2 dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2822\n- Accuracy: 0.8956\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 256\n- eval_batch_size: 256\n- seed: 10\n- optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments\n- lr_scheduler_type: linear\n- num_epochs: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|\n| 0.3005 | 1.0 | 264 | 0.2822 | 0.8956 |\n| 0.1522 | 2.0 | 528 | 0.3279 | 0.8979 |\n| 0.1013 | 3.0 | 792 | 0.3850 | 0.8853 |\n| 0.0747 | 4.0 | 1056 | 0.3563 | 0.8945 |\n| 0.0542 | 5.0 | 1320 | 0.3817 | 0.8865 |\n| 0.045 | 6.0 | 1584 | 0.3982 | 0.8888 |\n\n\n### Framework versions\n\n- Transformers 4.46.1\n- Pytorch 2.2.0+cu121\n- Datasets 3.1.0\n- Tokenizers 0.20.1\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"gokulsrinivasagan/distilbert_lda_5_v1_book\", \"datasets\": [\"glue\"], \"language\": [\"en\"], \"library_name\": \"transformers\", \"metrics\": [\"accuracy\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert_lda_5_v1_book_sst2\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"GLUE SST2\", \"type\": \"glue\", \"args\": \"sst2\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.8956422018348624, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45564,"string":"45,564"}}},{"rowIdx":43787,"cells":{"id":{"kind":"string","value":"NLPclass/parsBERT_text_emotion_classification"},"author":{"kind":"string","value":"NLPclass"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","safetensors","bert","text-classification","fa","dataset:SeyedAli/Persian-Text-Emotion","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"bert\",\n \"text-classification\",\n \"fa\",\n \"dataset:SeyedAli/Persian-Text-Emotion\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-08T15:51:23Z","string":"2024-07-08T15:51:23Z"},"last_modified":{"kind":"string","value":"2024-07-08T16:00:12+00:00"},"downloads":{"kind":"number","value":17,"string":"17"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- SeyedAli/Persian-Text-Emotion\nlanguage:\n- fa\nlibrary_name: transformers\nlicense: mit\npipeline_tag: text-classification\n---\n\n# Model Card for Model ID\n\n\nusing ParsBert for emotion text classification in persian dataSet\n\n\n### Model Description\n\n\n\nThis model is designed to emotion text classification in persian dataSet, which uses the pre-trained model ParsBert in persian language.\n\n- **Model type:** pars-bert.\n- **Language(s) (NLP):** persian.\n\n\n\n## Training Details\n\n### Training Data\n\n\n\nthis model train with 5558 text from SeyedAli/Persian-Text-Emotion dataSet\n\n#### Training Hyperparameters\n\n- **Number of Epochs:** 5 \n- **Training Batch Size:** 16\n- **evaluation Batch Size:** 16\n\n\n### Testing Data, Factors & Metrics\n\n#### Testing Data\n\n\n\nthis model test with 1399 text from SeyedAli/Persian-Text-Emotion dataSet with 6 class\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Model Card for Model ID\n\n\nusing ParsBert for emotion text classification in persian dataSet\n\n\n### Model Description\n\n\n\nThis model is designed to emotion text classification in persian dataSet, which uses the pre-trained model ParsBert in persian language.\n\n- **Model type:** pars-bert.\n- **Language(s) (NLP):** persian.\n\n\n\n## Training Details\n\n### Training Data\n\n\n\nthis model train with 5558 text from SeyedAli/Persian-Text-Emotion dataSet\n\n#### Training Hyperparameters\n\n- **Number of Epochs:** 5 \n- **Training Batch Size:** 16\n- **evaluation Batch Size:** 16\n\n\n### Testing Data, Factors & Metrics\n\n#### Testing Data\n\n\n\nthis model test with 1399 text from SeyedAli/Persian-Text-Emotion dataSet with 6 class\n"},"metadata":{"kind":"string","value":"{\"datasets\": [\"SeyedAli/Persian-Text-Emotion\"], \"language\": [\"fa\"], \"library_name\": \"transformers\", \"license\": \"mit\", \"pipeline_tag\": \"text-classification\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45565,"string":"45,565"}}},{"rowIdx":43788,"cells":{"id":{"kind":"string","value":"eligapris/rn-tokenizer"},"author":{"kind":"string","value":"eligapris"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["transformers","bert","rn","dataset:eligapris/kirundi-english","license:mit","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"bert\",\n \"rn\",\n \"dataset:eligapris/kirundi-english\",\n \"license:mit\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-12-06T03:52:51Z","string":"2024-12-06T03:52:51Z"},"last_modified":{"kind":"string","value":"2024-12-06T04:04:27+00:00"},"downloads":{"kind":"number","value":8,"string":"8"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- eligapris/kirundi-english\nlanguage:\n- rn\nlibrary_name: transformers\nlicense: mit\n---\n# eligapris/rn-tokenizer\n\n## Model Description\n\nThis repository contains a BPE tokenizer trained specifically for the Kirundi language (ISO code: run).\n\n### Tokenizer Details\n- **Type**: BPE (Byte-Pair Encoding)\n- **Vocabulary Size**: 30,000 tokens\n- **Special Tokens**: [UNK], [CLS], [SEP], [PAD], [MASK]\n- **Pre-tokenization**: Whitespace-based\n\n## Intended Uses & Limitations\n\n### Intended Uses\n- Text processing for Kirundi language\n- Pre-processing for NLP tasks involving Kirundi\n- Foundation for developing Kirundi language applications\n\n### Limitations\n- The tokenizer is trained on a specific corpus and may not cover all Kirundi dialects\n- Limited to the vocabulary observed in the training data\n- Performance may vary on domain-specific text\n\n## Training Data\n\nThe tokenizer was trained on the Kirundi-English parallel corpus:\n- **Dataset**: eligapris/kirundi-english\n- **Size**: 21.4k sentence pairs\n- **Nature**: Parallel corpus with Kirundi and English translations\n- **Domain**: Mixed domain including religious, general, and conversational text\n\n## Installation\n\nYou can use this tokenizer in your project by first installing the required dependencies:\n\n```bash\npip install transformers\n```\n\nThen load the tokenizer directly from the Hugging Face Hub:\n\n```python\nfrom transformers import AutoTokenizer\ntokenizer = AutoTokenizer.from_pretrained(\"eligapris/rn-tokenizer\")\n```\n\nOr if you have downloaded the tokenizer files locally:\n\n```python\nfrom transformers import PreTrainedTokenizerFast\ntokenizer = PreTrainedTokenizerFast(tokenizer_file=\"tokenizer.json\")\n```\n\n## Usage Examples\n\n### Loading and Using the Tokenizer\n\nYou can load the tokenizer in two ways:\n\n```python\n# Method 1: Using AutoTokenizer (recommended)\nfrom transformers import AutoTokenizer\ntokenizer = AutoTokenizer.from_pretrained(\"eligapris/rn-tokenizer\")\n\n# Method 2: Using PreTrainedTokenizerFast with local file\nfrom transformers import PreTrainedTokenizerFast\ntokenizer = PreTrainedTokenizerFast(tokenizer_file=\"tokenizer.json\")\n```\n\n#### Basic Usage Examples\n\n1. Tokenize a single sentence:\n```python\n# Basic tokenization\ntext = \"ab'umudugudu hafi ya bose bateranira kumva ijambo ry'Imana.\"\nencoded = tokenizer(text)\nprint(f\"Input IDs: {encoded['input_ids']}\")\nprint(f\"Tokens: {tokenizer.convert_ids_to_tokens(encoded['input_ids'])}\")\n```\n\n2. Batch tokenization:\n```python\n# Process multiple sentences at once\ntexts = [\n \"ifumbire mvaruganda.\",\n \"aba azi gukora kandi afite ubushobozi\"\n]\nencoded = tokenizer(texts, padding=True, truncation=True)\nprint(\"Batch encoding:\", encoded)\n```\n\n3. Get token IDs with special tokens:\n```python\n# Add special tokens like [CLS] and [SEP]\nencoded = tokenizer(text, add_special_tokens=True)\ntokens = tokenizer.convert_ids_to_tokens(encoded['input_ids'])\nprint(f\"Tokens with special tokens: {tokens}\")\n```\n\n4. Decode tokenized text:\n```python\n# Convert token IDs back to text\nids = encoded['input_ids']\ndecoded_text = tokenizer.decode(ids)\nprint(f\"Decoded text: {decoded_text}\")\n```\n\n5. Padding and truncation:\n```python\n# Pad or truncate sequences to a specific length\nencoded = tokenizer(\n texts,\n padding='max_length',\n max_length=32,\n truncation=True,\n return_tensors='pt' # Return PyTorch tensors\n)\nprint(\"Padded sequences:\", encoded['input_ids'].shape)\n```\n\n## Future Development\nThis tokenizer is intended to serve as a foundation for future Kirundi language model development, including potential fine-tuning with techniques like LoRA (Low-Rank Adaptation).\n\n## Technical Specifications\n\n### Software Requirements\n```python\ndependencies = {\n \"transformers\": \">=4.30.0\",\n \"tokenizers\": \">=0.13.0\"\n}\n```\n\n\n## Contact\n\neligrapris\n\n---\n\n## Updates and Versions\n\n- v1.0.0 (Initial Release)\n - Base tokenizer implementation\n - Trained on Kirundi-English parallel corpus\n - Basic functionality and documentation\n\n## Acknowledgments\n\n- Dataset provided by eligapris\n- Hugging Face's Transformers and Tokenizers libraries"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"# eligapris/rn-tokenizer\n\n## Model Description\n\nThis repository contains a BPE tokenizer trained specifically for the Kirundi language (ISO code: run).\n\n### Tokenizer Details\n- **Type**: BPE (Byte-Pair Encoding)\n- **Vocabulary Size**: 30,000 tokens\n- **Special Tokens**: [UNK], [CLS], [SEP], [PAD], [MASK]\n- **Pre-tokenization**: Whitespace-based\n\n## Intended Uses & Limitations\n\n### Intended Uses\n- Text processing for Kirundi language\n- Pre-processing for NLP tasks involving Kirundi\n- Foundation for developing Kirundi language applications\n\n### Limitations\n- The tokenizer is trained on a specific corpus and may not cover all Kirundi dialects\n- Limited to the vocabulary observed in the training data\n- Performance may vary on domain-specific text\n\n## Training Data\n\nThe tokenizer was trained on the Kirundi-English parallel corpus:\n- **Dataset**: eligapris/kirundi-english\n- **Size**: 21.4k sentence pairs\n- **Nature**: Parallel corpus with Kirundi and English translations\n- **Domain**: Mixed domain including religious, general, and conversational text\n\n## Installation\n\nYou can use this tokenizer in your project by first installing the required dependencies:\n\n```bash\npip install transformers\n```\n\nThen load the tokenizer directly from the Hugging Face Hub:\n\n```python\nfrom transformers import AutoTokenizer\ntokenizer = AutoTokenizer.from_pretrained(\"eligapris/rn-tokenizer\")\n```\n\nOr if you have downloaded the tokenizer files locally:\n\n```python\nfrom transformers import PreTrainedTokenizerFast\ntokenizer = PreTrainedTokenizerFast(tokenizer_file=\"tokenizer.json\")\n```\n\n## Usage Examples\n\n### Loading and Using the Tokenizer\n\nYou can load the tokenizer in two ways:\n\n```python\n# Method 1: Using AutoTokenizer (recommended)\nfrom transformers import AutoTokenizer\ntokenizer = AutoTokenizer.from_pretrained(\"eligapris/rn-tokenizer\")\n\n# Method 2: Using PreTrainedTokenizerFast with local file\nfrom transformers import PreTrainedTokenizerFast\ntokenizer = PreTrainedTokenizerFast(tokenizer_file=\"tokenizer.json\")\n```\n\n#### Basic Usage Examples\n\n1. Tokenize a single sentence:\n```python\n# Basic tokenization\ntext = \"ab'umudugudu hafi ya bose bateranira kumva ijambo ry'Imana.\"\nencoded = tokenizer(text)\nprint(f\"Input IDs: {encoded['input_ids']}\")\nprint(f\"Tokens: {tokenizer.convert_ids_to_tokens(encoded['input_ids'])}\")\n```\n\n2. Batch tokenization:\n```python\n# Process multiple sentences at once\ntexts = [\n \"ifumbire mvaruganda.\",\n \"aba azi gukora kandi afite ubushobozi\"\n]\nencoded = tokenizer(texts, padding=True, truncation=True)\nprint(\"Batch encoding:\", encoded)\n```\n\n3. Get token IDs with special tokens:\n```python\n# Add special tokens like [CLS] and [SEP]\nencoded = tokenizer(text, add_special_tokens=True)\ntokens = tokenizer.convert_ids_to_tokens(encoded['input_ids'])\nprint(f\"Tokens with special tokens: {tokens}\")\n```\n\n4. Decode tokenized text:\n```python\n# Convert token IDs back to text\nids = encoded['input_ids']\ndecoded_text = tokenizer.decode(ids)\nprint(f\"Decoded text: {decoded_text}\")\n```\n\n5. Padding and truncation:\n```python\n# Pad or truncate sequences to a specific length\nencoded = tokenizer(\n texts,\n padding='max_length',\n max_length=32,\n truncation=True,\n return_tensors='pt' # Return PyTorch tensors\n)\nprint(\"Padded sequences:\", encoded['input_ids'].shape)\n```\n\n## Future Development\nThis tokenizer is intended to serve as a foundation for future Kirundi language model development, including potential fine-tuning with techniques like LoRA (Low-Rank Adaptation).\n\n## Technical Specifications\n\n### Software Requirements\n```python\ndependencies = {\n \"transformers\": \">=4.30.0\",\n \"tokenizers\": \">=0.13.0\"\n}\n```\n\n\n## Contact\n\neligrapris\n\n---\n\n## Updates and Versions\n\n- v1.0.0 (Initial Release)\n - Base tokenizer implementation\n - Trained on Kirundi-English parallel corpus\n - Basic functionality and documentation\n\n## Acknowledgments\n\n- Dataset provided by eligapris\n- Hugging Face's Transformers and Tokenizers libraries"},"metadata":{"kind":"string","value":"{\"datasets\": [\"eligapris/kirundi-english\"], \"language\": [\"rn\"], \"library_name\": \"transformers\", \"license\": \"mit\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45566,"string":"45,566"}}},{"rowIdx":43789,"cells":{"id":{"kind":"string","value":"TroyDoesAI/Mermaid-Llama-3-5B-Pruned"},"author":{"kind":"string","value":"TroyDoesAI"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","conversational","license:cc-by-4.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"conversational\",\n \"license:cc-by-4.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-24T03:19:27Z","string":"2024-04-24T03:19:27Z"},"last_modified":{"kind":"string","value":"2024-05-03T02:19:45+00:00"},"downloads":{"kind":"number","value":17,"string":"17"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nlicense: cc-by-4.0\n---\n\n# Mermaid-Llama-3-5B\n\nIntroducing Mermaid-LLama-3-5B, a language model designed for Python code understanding and crafting captivating story flow maps.\n\n![MermaidLlama GIF](Mermaid_ShowCase/MermaidLlama.webp)\n\n## Key Features\n\n1. **Code Understanding:**\n - Masters Python intricacies with finesse.\n - Generates clear and accurate Mermaid Diagram Flow Charts.\n - Ideal for developers seeking visual representations of their code logic.\n\n2. **Storytelling Capabilities:**\n - Converts narrative inputs into captivating Mermaid Diagrams.\n - Maps character interactions, plot developments, and narrative arcs.\n\n3. **Unmatched Performance:**\n - Surpasses GPT-4 in generating well-organized Mermaid Diagrams.\n\n4. **Training Insights:**\n - Trained on a diverse dataset, including 800 unique, hand-curated Mermaid Graph examples utilizing 478 complete Python programs.\n - Exhibits emergent properties in story-to-flow map translations and step-by-step instruction flow maps.\n\n## Collaboration\n\nInterested in enhancing Mermaid's capabilities? Contact troydoesai@gmail.com for collaboration opportunities.\n\n## Example Use Cases\n- **Retrieval-Augmented Generation (RAG):** Utilize Mermaid-LLama-3-8B to create condensed knowledge graphs. This model excels in generating flow diagrams that enhance the retrieval process. These knowledge graphs are stored in a vector database, which allows for quick and efficient retrieval of contextually relevant information. When a query is received, the system retrieves a pertinent knowledge graph, appending it as context to the model. This enriched context enables Mermaid-LLama-3-8B to deliver more accurate and nuanced responses. This approach is particularly beneficial in applications requiring deep, context-aware interactions, such as sophisticated Q&A systems, dynamic data analysis, and complex decision-making tasks.\n- **Code Documentation:** Automatic visual flow charts from Python code.\n- **Storyboarding:** Visually appealing diagrams for storytelling.\n- **Project Planning:** Visual project flow maps for effective team communication.\n- **Learning Python:** Helps students visually understand Python code structures.\n- **Game Design:** Visualizing game storylines for coherent narrative structure.\n\n## Proof of Concept\n\nStay tuned for the release of the VSCode Extension that displays the Live Flow Map every time a user stops typing for more than 10 seconds.\n\n## Training Specifications\n\n- **LoRA Rank:** 2048\n- **LoRA Alpha:** 4096\n- **Batch Size:** 1\n- **Micro Batch Size:** 1\n- **Cutoff Length:** 4096\n- **Save every n steps:** 1000\n- **Epochs:** 3\n- **Learning Rate:** 1e-6\n- **LR Scheduler:** Cosine\n\n**Target Modules:**\n- Enable q_proj\n- Enable v_proj\n- Enable k_proj\n- Enable o_proj\n- Enable gate_proj\n- Enable down_proj\n- Enable up_proj\n\n## Getting Started\n\nStart by downloading one of my models.\n\n![0 TroyDoesAI GIF](Mermaid_ShowCase/0_TroyDoesAI.gif)\n\nLoad the model.\n\n![1 Load Model in 4-bit Show Example Use GIF](Mermaid_ShowCase/1_LoadModel_in_4bit_Show_Example_Use.gif)\n\nUse my prompt template to generate a Mermaid code block, which can be viewed in the Mermaid Live Editor or using the Mermaid CLI tool.\n\n![2 Loaded Model in Full Precision 16-bit Show Inference and Mermaid Live Editor GIF](Mermaid_ShowCase/2_Loaded_Model_in_Full_Precision_16bit_Show_Inference_and_Mermaid_Live_editor.gif)\n\nHere we open the VLLM GUI Program while still running in Vram the Mermaid-Llama-8B to compare the flow diagram to the actual program and show the lightweight capabilites of small models on consumer hardware.\n\n![3 Open The Program VLLM Program With Full Precision Mermaid-Llama-8B Running to Evaluate Flow Map GIF](Mermaid_ShowCase/3_Open_The_Program_VLLM_Program_With_Full_Precision_Mermaid-Llama-8B-Running_to_evaluate_flow_map.gif)\n\n## More on my VLLM Class and inference GUI : https://github.com/Troys-Code/VLLM\n\n![Python RtdBsaz8gy GIF](Mermaid_ShowCase/python_RtdBsaz8gy.gif)\n---\n\nNote: This model should be treated as an Auto-Complete Model, Do not try talking to it in chat you are gonna get garbage, those layers have been pruned and replaced, that is all you will hear of my secret sauce on training on small < 1000 entry datasets."},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Mermaid-Llama-3-5B\n\nIntroducing Mermaid-LLama-3-5B, a language model designed for Python code understanding and crafting captivating story flow maps.\n\n![MermaidLlama GIF](Mermaid_ShowCase/MermaidLlama.webp)\n\n## Key Features\n\n1. **Code Understanding:**\n - Masters Python intricacies with finesse.\n - Generates clear and accurate Mermaid Diagram Flow Charts.\n - Ideal for developers seeking visual representations of their code logic.\n\n2. **Storytelling Capabilities:**\n - Converts narrative inputs into captivating Mermaid Diagrams.\n - Maps character interactions, plot developments, and narrative arcs.\n\n3. **Unmatched Performance:**\n - Surpasses GPT-4 in generating well-organized Mermaid Diagrams.\n\n4. **Training Insights:**\n - Trained on a diverse dataset, including 800 unique, hand-curated Mermaid Graph examples utilizing 478 complete Python programs.\n - Exhibits emergent properties in story-to-flow map translations and step-by-step instruction flow maps.\n\n## Collaboration\n\nInterested in enhancing Mermaid's capabilities? Contact troydoesai@gmail.com for collaboration opportunities.\n\n## Example Use Cases\n- **Retrieval-Augmented Generation (RAG):** Utilize Mermaid-LLama-3-8B to create condensed knowledge graphs. This model excels in generating flow diagrams that enhance the retrieval process. These knowledge graphs are stored in a vector database, which allows for quick and efficient retrieval of contextually relevant information. When a query is received, the system retrieves a pertinent knowledge graph, appending it as context to the model. This enriched context enables Mermaid-LLama-3-8B to deliver more accurate and nuanced responses. This approach is particularly beneficial in applications requiring deep, context-aware interactions, such as sophisticated Q&A systems, dynamic data analysis, and complex decision-making tasks.\n- **Code Documentation:** Automatic visual flow charts from Python code.\n- **Storyboarding:** Visually appealing diagrams for storytelling.\n- **Project Planning:** Visual project flow maps for effective team communication.\n- **Learning Python:** Helps students visually understand Python code structures.\n- **Game Design:** Visualizing game storylines for coherent narrative structure.\n\n## Proof of Concept\n\nStay tuned for the release of the VSCode Extension that displays the Live Flow Map every time a user stops typing for more than 10 seconds.\n\n## Training Specifications\n\n- **LoRA Rank:** 2048\n- **LoRA Alpha:** 4096\n- **Batch Size:** 1\n- **Micro Batch Size:** 1\n- **Cutoff Length:** 4096\n- **Save every n steps:** 1000\n- **Epochs:** 3\n- **Learning Rate:** 1e-6\n- **LR Scheduler:** Cosine\n\n**Target Modules:**\n- Enable q_proj\n- Enable v_proj\n- Enable k_proj\n- Enable o_proj\n- Enable gate_proj\n- Enable down_proj\n- Enable up_proj\n\n## Getting Started\n\nStart by downloading one of my models.\n\n![0 TroyDoesAI GIF](Mermaid_ShowCase/0_TroyDoesAI.gif)\n\nLoad the model.\n\n![1 Load Model in 4-bit Show Example Use GIF](Mermaid_ShowCase/1_LoadModel_in_4bit_Show_Example_Use.gif)\n\nUse my prompt template to generate a Mermaid code block, which can be viewed in the Mermaid Live Editor or using the Mermaid CLI tool.\n\n![2 Loaded Model in Full Precision 16-bit Show Inference and Mermaid Live Editor GIF](Mermaid_ShowCase/2_Loaded_Model_in_Full_Precision_16bit_Show_Inference_and_Mermaid_Live_editor.gif)\n\nHere we open the VLLM GUI Program while still running in Vram the Mermaid-Llama-8B to compare the flow diagram to the actual program and show the lightweight capabilites of small models on consumer hardware.\n\n![3 Open The Program VLLM Program With Full Precision Mermaid-Llama-8B Running to Evaluate Flow Map GIF](Mermaid_ShowCase/3_Open_The_Program_VLLM_Program_With_Full_Precision_Mermaid-Llama-8B-Running_to_evaluate_flow_map.gif)\n\n## More on my VLLM Class and inference GUI : https://github.com/Troys-Code/VLLM\n\n![Python RtdBsaz8gy GIF](Mermaid_ShowCase/python_RtdBsaz8gy.gif)\n---\n\nNote: This model should be treated as an Auto-Complete Model, Do not try talking to it in chat you are gonna get garbage, those layers have been pruned and replaced, that is all you will hear of my secret sauce on training on small < 1000 entry datasets."},"metadata":{"kind":"string","value":"{\"license\": \"cc-by-4.0\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45567,"string":"45,567"}}},{"rowIdx":43790,"cells":{"id":{"kind":"string","value":"IsmaelMousa/modernbert-ner-conll2003"},"author":{"kind":"string","value":"IsmaelMousa"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","modernbert","token-classification","generated_from_trainer","en","dataset:conll2003","base_model:answerdotai/ModernBERT-base","base_model:finetune:answerdotai/ModernBERT-base","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"modernbert\",\n \"token-classification\",\n \"generated_from_trainer\",\n \"en\",\n \"dataset:conll2003\",\n \"base_model:answerdotai/ModernBERT-base\",\n \"base_model:finetune:answerdotai/ModernBERT-base\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-01-07T20:24:39Z","string":"2025-01-07T20:24:39Z"},"last_modified":{"kind":"string","value":"2025-02-20T21:53:42+00:00"},"downloads":{"kind":"number","value":281,"string":"281"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: answerdotai/ModernBERT-base\ndatasets:\n- conll2003\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\nmetrics:\n- precision\n- recall\n- f1\n- accuracy\npipeline_tag: token-classification\ntags:\n- generated_from_trainer\nmodel-index:\n- name: modernbert-ner-conll2003\n results:\n - task:\n type: token-classification\n name: Token Classification\n dataset:\n name: conll2003\n type: conll2003\n config: conll2003\n split: validation\n args: conll2003\n metrics:\n - type: precision\n value: 0.8349195930423368\n name: Precision\n - type: recall\n value: 0.856277347694379\n name: Recall\n - type: f1\n value: 0.8454636091724825\n name: F1\n - type: accuracy\n value: 0.9751567306569059\n name: Accuracy\n---\n\n# ModernBERT NER (CoNLL2003)\n\nThis model is a fine-tuned version of [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on the conll2003 dataset for Named Entity Recognition (NER).\n\nRobust performance on tasks involving the recognition of `Persons`, `Organizations`, and `Locations`.\n\nIt achieves the following results on the evaluation set:\n- Loss: 0.0992\n- Precision: 0.8349\n- Recall: 0.8563\n- F1: 0.8455\n- Accuracy: 0.9752\n\n## Model Details\n\n- **Base Model:** ModernBERT: [https://doi.org/10.48550/arXiv.2412.13663](https://doi.org/10.48550/arXiv.2412.13663).\n- **Fine-tuning Dataset:** CoNLL2003: [https://huggingface.co/datasets/eriktks/conll2003](https://huggingface.co/datasets/eriktks/conll2003).\n- **Task:** Named Entity Recognition (NER)\n\n## Training Data\n\nThe model is fine-tuned on the CoNLL2003 dataset, a well-known benchmark for NER.\nThis dataset provides a solid foundation for the model to generalize on general English text.\n\n## Example Usage\n\nBelow is an example of how to use the model with the Hugging Face Transformers library:\n\n```python\nfrom transformers import pipeline\n\nner = pipeline(task=\"token-classification\", model=\"IsmaelMousa/modernbert-ner-conll2003\", aggregation_strategy=\"max\")\n\nresults = ner(\"Hi, I'm Ismael Mousa from Palestine working for NVIDIA inc.\")\n\nfor entity in results:\n for key, value in entity.items():\n if key == \"entity_group\":\n print(f\"{entity['word']} => {entity[key]}\")\n```\n\nResults:\n\n```\nIsmael Mousa => PER\nPalestine => LOC\nNVIDIA => ORG\n```\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments\n- lr_scheduler_type: linear\n- num_epochs: 10\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |\n|:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|\n| 0.2306 | 1.0 | 1756 | 0.2243 | 0.6074 | 0.6483 | 0.6272 | 0.9406 |\n| 0.1415 | 2.0 | 3512 | 0.1583 | 0.7258 | 0.7536 | 0.7394 | 0.9583 |\n| 0.1143 | 3.0 | 5268 | 0.1335 | 0.7731 | 0.7989 | 0.7858 | 0.9657 |\n| 0.0913 | 4.0 | 7024 | 0.1145 | 0.7958 | 0.8256 | 0.8104 | 0.9699 |\n| 0.0848 | 5.0 | 8780 | 0.1079 | 0.8120 | 0.8408 | 0.8261 | 0.9720 |\n| 0.0728 | 6.0 | 10536 | 0.1036 | 0.8214 | 0.8452 | 0.8331 | 0.9730 |\n| 0.0623 | 7.0 | 12292 | 0.1032 | 0.8258 | 0.8487 | 0.8371 | 0.9737 |\n| 0.0599 | 8.0 | 14048 | 0.0990 | 0.8289 | 0.8527 | 0.8406 | 0.9745 |\n| 0.0558 | 9.0 | 15804 | 0.0998 | 0.8331 | 0.8541 | 0.8434 | 0.9750 |\n| 0.0559 | 10.0 | 17560 | 0.0992 | 0.8349 | 0.8563 | 0.8455 | 0.9752 |\n\n\n### Framework versions\n\n- Transformers 4.48.0.dev0\n- Pytorch 2.2.1+cu121\n- Datasets 3.2.0\n- Tokenizers 0.21.0"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# ModernBERT NER (CoNLL2003)\n\nThis model is a fine-tuned version of [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on the conll2003 dataset for Named Entity Recognition (NER).\n\nRobust performance on tasks involving the recognition of `Persons`, `Organizations`, and `Locations`.\n\nIt achieves the following results on the evaluation set:\n- Loss: 0.0992\n- Precision: 0.8349\n- Recall: 0.8563\n- F1: 0.8455\n- Accuracy: 0.9752\n\n## Model Details\n\n- **Base Model:** ModernBERT: [https://doi.org/10.48550/arXiv.2412.13663](https://doi.org/10.48550/arXiv.2412.13663).\n- **Fine-tuning Dataset:** CoNLL2003: [https://huggingface.co/datasets/eriktks/conll2003](https://huggingface.co/datasets/eriktks/conll2003).\n- **Task:** Named Entity Recognition (NER)\n\n## Training Data\n\nThe model is fine-tuned on the CoNLL2003 dataset, a well-known benchmark for NER.\nThis dataset provides a solid foundation for the model to generalize on general English text.\n\n## Example Usage\n\nBelow is an example of how to use the model with the Hugging Face Transformers library:\n\n```python\nfrom transformers import pipeline\n\nner = pipeline(task=\"token-classification\", model=\"IsmaelMousa/modernbert-ner-conll2003\", aggregation_strategy=\"max\")\n\nresults = ner(\"Hi, I'm Ismael Mousa from Palestine working for NVIDIA inc.\")\n\nfor entity in results:\n for key, value in entity.items():\n if key == \"entity_group\":\n print(f\"{entity['word']} => {entity[key]}\")\n```\n\nResults:\n\n```\nIsmael Mousa => PER\nPalestine => LOC\nNVIDIA => ORG\n```\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 1e-06\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments\n- lr_scheduler_type: linear\n- num_epochs: 10\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy |\n|:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:|\n| 0.2306 | 1.0 | 1756 | 0.2243 | 0.6074 | 0.6483 | 0.6272 | 0.9406 |\n| 0.1415 | 2.0 | 3512 | 0.1583 | 0.7258 | 0.7536 | 0.7394 | 0.9583 |\n| 0.1143 | 3.0 | 5268 | 0.1335 | 0.7731 | 0.7989 | 0.7858 | 0.9657 |\n| 0.0913 | 4.0 | 7024 | 0.1145 | 0.7958 | 0.8256 | 0.8104 | 0.9699 |\n| 0.0848 | 5.0 | 8780 | 0.1079 | 0.8120 | 0.8408 | 0.8261 | 0.9720 |\n| 0.0728 | 6.0 | 10536 | 0.1036 | 0.8214 | 0.8452 | 0.8331 | 0.9730 |\n| 0.0623 | 7.0 | 12292 | 0.1032 | 0.8258 | 0.8487 | 0.8371 | 0.9737 |\n| 0.0599 | 8.0 | 14048 | 0.0990 | 0.8289 | 0.8527 | 0.8406 | 0.9745 |\n| 0.0558 | 9.0 | 15804 | 0.0998 | 0.8331 | 0.8541 | 0.8434 | 0.9750 |\n| 0.0559 | 10.0 | 17560 | 0.0992 | 0.8349 | 0.8563 | 0.8455 | 0.9752 |\n\n\n### Framework versions\n\n- Transformers 4.48.0.dev0\n- Pytorch 2.2.1+cu121\n- Datasets 3.2.0\n- Tokenizers 0.21.0"},"metadata":{"kind":"string","value":"{\"base_model\": \"answerdotai/ModernBERT-base\", \"datasets\": [\"conll2003\"], \"language\": [\"en\"], \"library_name\": \"transformers\", \"license\": \"apache-2.0\", \"metrics\": [\"precision\", \"recall\", \"f1\", \"accuracy\"], \"pipeline_tag\": \"token-classification\", \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"modernbert-ner-conll2003\", \"results\": [{\"task\": {\"type\": \"token-classification\", \"name\": \"Token Classification\"}, \"dataset\": {\"name\": \"conll2003\", \"type\": \"conll2003\", \"config\": \"conll2003\", \"split\": \"validation\", \"args\": \"conll2003\"}, \"metrics\": [{\"type\": \"precision\", \"value\": 0.8349195930423368, \"name\": \"Precision\"}, {\"type\": \"recall\", \"value\": 0.856277347694379, \"name\": \"Recall\"}, {\"type\": \"f1\", \"value\": 0.8454636091724825, \"name\": \"F1\"}, {\"type\": \"accuracy\", \"value\": 0.9751567306569059, \"name\": \"Accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["NAMED_ENTITY_RECOGNITION"],"string":"[\n \"NAMED_ENTITY_RECOGNITION\"\n]"},"__index_level_0__":{"kind":"number","value":45568,"string":"45,568"}}},{"rowIdx":43791,"cells":{"id":{"kind":"string","value":"LoneStriker/Nous-Hermes-2-Yi-34B-6.0bpw-h6-exl2"},"author":{"kind":"string","value":"LoneStriker"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","yi","instruct","finetune","chatml","gpt4","synthetic data","distillation","conversational","en","base_model:01-ai/Yi-34B","base_model:finetune:01-ai/Yi-34B","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"yi\",\n \"instruct\",\n \"finetune\",\n \"chatml\",\n \"gpt4\",\n \"synthetic data\",\n \"distillation\",\n \"conversational\",\n \"en\",\n \"base_model:01-ai/Yi-34B\",\n \"base_model:finetune:01-ai/Yi-34B\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-26T08:15:29Z","string":"2023-12-26T08:15:29Z"},"last_modified":{"kind":"string","value":"2023-12-26T08:58:12+00:00"},"downloads":{"kind":"number","value":4,"string":"4"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: 01-ai/Yi-34B\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- yi\n- instruct\n- finetune\n- chatml\n- gpt4\n- synthetic data\n- distillation\nmodel-index:\n- name: Nous-Hermes-2-Yi-34B\n results: []\n---\n\n# Nous Hermes 2 - Yi-34B\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oOqrUeAQejuQOra7fNlzG.png)\n\n## Model description\n\nNous Hermes 2 - Yi-34B is a state of the art Yi Fine-tune.\n\nNous Hermes 2 Yi 34B was trained on 1,000,000 entries of primarily GPT-4 generated data, as well as other high quality data from open datasets across the AI landscape.\n\n# Table of Contents\n1. [Example Outputs](#example-outputs)\n - Discussing the Laws of Gravity\n - Create a Flask based FTP Server\n3. [Benchmark Results](#benchmark-results)\n - GPT4All\n - AGIEval\n - BigBench\n - Averages Compared\n4. [Prompt Format](#prompt-format)\n5. [Quantized Models](#quantized-models)\n\n\n## Example Outputs\n\n### Discussions about the Law of Gravity:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/J6Rmdj1VOVN7ry_uGL1PK.png)\n\n### Create an FTP Server in FLASK:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/B5eu8OvQlg8rINBJGxbB7.png)\n\n## Benchmark Results\n\nNous-Hermes 2 on Yi 34B outperforms all Nous-Hermes & Open-Hermes models of the past, achieving new heights in all benchmarks for a Nous Research LLM as well as surpassing many popular finetunes. \n\n# Benchmarks Compared\n\n### GPT4All:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/91onORUcUrAqTb3b9mG5e.png)\n\n### AGIEval:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/hqDpMlKpINfDf4PmB31uW.png)\n\n### BigBench:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/uh8mZZg_wZinFysxcfLSF.png)\n\n### TruthfulQA:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/N_cX6YAWjJsvClotuoPdH.png)\n\n\n\n## GPT4All\nGPT-4All Benchmark Set\n```\n| Task |Version| Metric |Value | |Stderr|\n|-------------|------:|--------|-----:|---|-----:|\n|arc_challenge| 0|acc |0.6067|_ |0.0143|\n| | |acc_norm|0.6416|_ |0.0140|\n|arc_easy | 0|acc |0.8594|_ |0.0071|\n| | |acc_norm|0.8569|_ |0.0072|\n|boolq | 1|acc |0.8859|_ |0.0056|\n|hellaswag | 0|acc |0.6407|_ |0.0048|\n| | |acc_norm|0.8388|_ |0.0037|\n|openbookqa | 0|acc |0.3520|_ |0.0214|\n| | |acc_norm|0.4760|_ |0.0224|\n|piqa | 0|acc |0.8215|_ |0.0089|\n| | |acc_norm|0.8303|_ |0.0088|\n|winogrande | 0|acc |0.7908|_ |0.0114|\nAverage: 76.00%\n``` \n\nAGI-Eval\n```\n| Task |Version| Metric |Value | |Stderr|\n|------------------------------|------:|--------|-----:|---|-----:|\n|agieval_aqua_rat | 0|acc |0.3189|_ |0.0293|\n| | |acc_norm|0.2953|_ |0.0287|\n|agieval_logiqa_en | 0|acc |0.5438|_ |0.0195|\n| | |acc_norm|0.4977|_ |0.0196|\n|agieval_lsat_ar | 0|acc |0.2696|_ |0.0293|\n| | |acc_norm|0.2087|_ |0.0269|\n|agieval_lsat_lr | 0|acc |0.7078|_ |0.0202|\n| | |acc_norm|0.6255|_ |0.0215|\n|agieval_lsat_rc | 0|acc |0.7807|_ |0.0253|\n| | |acc_norm|0.7063|_ |0.0278|\n|agieval_sat_en | 0|acc |0.8689|_ |0.0236|\n| | |acc_norm|0.8447|_ |0.0253|\n|agieval_sat_en_without_passage| 0|acc |0.5194|_ |0.0349|\n| | |acc_norm|0.4612|_ |0.0348|\n|agieval_sat_math | 0|acc |0.4409|_ |0.0336|\n| | |acc_norm|0.3818|_ |0.0328|\nAverage: 50.27%\n``` \n\nBigBench Reasoning Test\n```\n| Task |Version| Metric |Value | |Stderr|\n|------------------------------------------------|------:|---------------------|-----:|---|-----:|\n|bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|_ |0.0360|\n|bigbench_date_understanding | 0|multiple_choice_grade|0.7263|_ |0.0232|\n|bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3953|_ |0.0305|\n|bigbench_geometric_shapes | 0|multiple_choice_grade|0.4457|_ |0.0263|\n| | |exact_str_match |0.0000|_ |0.0000|\n|bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2820|_ |0.0201|\n|bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2186|_ |0.0156|\n|bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.4733|_ |0.0289|\n|bigbench_movie_recommendation | 0|multiple_choice_grade|0.5200|_ |0.0224|\n|bigbench_navigate | 0|multiple_choice_grade|0.4910|_ |0.0158|\n|bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7495|_ |0.0097|\n|bigbench_ruin_names | 0|multiple_choice_grade|0.5938|_ |0.0232|\n|bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.3808|_ |0.0154|\n|bigbench_snarks | 0|multiple_choice_grade|0.8066|_ |0.0294|\n|bigbench_sports_understanding | 0|multiple_choice_grade|0.5101|_ |0.0159|\n|bigbench_temporal_sequences | 0|multiple_choice_grade|0.3850|_ |0.0154|\n|bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2160|_ |0.0116|\n|bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1634|_ |0.0088|\n|bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.4733|_ |0.0289|\nAverage: 46.69%\n``` \n\nTruthfulQA:\n```\n| Task |Version|Metric|Value | |Stderr|\n|-------------|------:|------|-----:|---|-----:|\n|truthfulqa_mc| 1|mc1 |0.4333|_ |0.0173|\n| | |mc2 |0.6034|_ |0.0149|\n```\n\nAverage Score Comparison between OpenHermes-1 Llama-2 13B and OpenHermes-2 Mistral 7B against OpenHermes-2.5 on Mistral-7B:\n```\n| Bench | OpenHermes-2.5 Mistral 7B | Nous-Hermes-2-Yi-34B | Change/OpenHermes2 |\n|---------------|---------------------------|----------------------|--------------------|\n|GPT4All | 73.12| 76.00| +2.88|\n|---------------------------------------------------------------------------------------|\n|BigBench | 40.96| 46.69| +5.73|\n|---------------------------------------------------------------------------------------|\n|AGI Eval | 43.07| 50.27| +7.20|\n|---------------------------------------------------------------------------------------|\n|TruthfulQA | 53.04| 60.34| +7.30|\n|---------------------------------------------------------------------------------------|\n|Total Score | 210.19| 233.30| +23.11|\n|---------------------------------------------------------------------------------------|\n|Average Total | 52.38| 58.33| +5.95|\n```\n\n# Prompt Format\n\nNous Hermes 2 uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue.\n\nSystem prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model.\n\nThis is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns.\n\nThis format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI.\n\nPrompt with system instruction (Use whatever system prompt you like, this is just an example!):\n```\n<|im_start|>system\nYou are \"Hermes 2\", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|>\n<|im_start|>user\nHello, who are you?<|im_end|>\n<|im_start|>assistant\nHi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|>\n```\n\nThis prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the\n`tokenizer.apply_chat_template()` method:\n\n```python\nmessages = [\n {\"role\": \"system\", \"content\": \"You are Hermes 2.\"},\n {\"role\": \"user\", \"content\": \"Hello, who are you?\"}\n]\ngen_input = tokenizer.apply_chat_template(message, return_tensors=\"pt\")\nmodel.generate(**gen_input)\n```\n\nWhen tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\\n` to your prompt, to ensure\nthat the model continues with an assistant response.\n\nTo utilize the prompt format without a system prompt, simply leave the line out.\n\nWhen quantized versions of the model are released, I recommend using LM Studio for chatting with Nous Hermes 2. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box.\nIn LM-Studio, simply select the ChatML Prefix on the settings side pane:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png)\n\n# Quantized Models:\n\n[todo]\n\n[\"Built](https://github.com/OpenAccess-AI-Collective/axolotl)\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# Nous Hermes 2 - Yi-34B\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oOqrUeAQejuQOra7fNlzG.png)\n\n## Model description\n\nNous Hermes 2 - Yi-34B is a state of the art Yi Fine-tune.\n\nNous Hermes 2 Yi 34B was trained on 1,000,000 entries of primarily GPT-4 generated data, as well as other high quality data from open datasets across the AI landscape.\n\n# Table of Contents\n1. [Example Outputs](#example-outputs)\n - Discussing the Laws of Gravity\n - Create a Flask based FTP Server\n3. [Benchmark Results](#benchmark-results)\n - GPT4All\n - AGIEval\n - BigBench\n - Averages Compared\n4. [Prompt Format](#prompt-format)\n5. [Quantized Models](#quantized-models)\n\n\n## Example Outputs\n\n### Discussions about the Law of Gravity:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/J6Rmdj1VOVN7ry_uGL1PK.png)\n\n### Create an FTP Server in FLASK:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/B5eu8OvQlg8rINBJGxbB7.png)\n\n## Benchmark Results\n\nNous-Hermes 2 on Yi 34B outperforms all Nous-Hermes & Open-Hermes models of the past, achieving new heights in all benchmarks for a Nous Research LLM as well as surpassing many popular finetunes. \n\n# Benchmarks Compared\n\n### GPT4All:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/91onORUcUrAqTb3b9mG5e.png)\n\n### AGIEval:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/hqDpMlKpINfDf4PmB31uW.png)\n\n### BigBench:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/uh8mZZg_wZinFysxcfLSF.png)\n\n### TruthfulQA:\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/N_cX6YAWjJsvClotuoPdH.png)\n\n\n\n## GPT4All\nGPT-4All Benchmark Set\n```\n| Task |Version| Metric |Value | |Stderr|\n|-------------|------:|--------|-----:|---|-----:|\n|arc_challenge| 0|acc |0.6067|_ |0.0143|\n| | |acc_norm|0.6416|_ |0.0140|\n|arc_easy | 0|acc |0.8594|_ |0.0071|\n| | |acc_norm|0.8569|_ |0.0072|\n|boolq | 1|acc |0.8859|_ |0.0056|\n|hellaswag | 0|acc |0.6407|_ |0.0048|\n| | |acc_norm|0.8388|_ |0.0037|\n|openbookqa | 0|acc |0.3520|_ |0.0214|\n| | |acc_norm|0.4760|_ |0.0224|\n|piqa | 0|acc |0.8215|_ |0.0089|\n| | |acc_norm|0.8303|_ |0.0088|\n|winogrande | 0|acc |0.7908|_ |0.0114|\nAverage: 76.00%\n``` \n\nAGI-Eval\n```\n| Task |Version| Metric |Value | |Stderr|\n|------------------------------|------:|--------|-----:|---|-----:|\n|agieval_aqua_rat | 0|acc |0.3189|_ |0.0293|\n| | |acc_norm|0.2953|_ |0.0287|\n|agieval_logiqa_en | 0|acc |0.5438|_ |0.0195|\n| | |acc_norm|0.4977|_ |0.0196|\n|agieval_lsat_ar | 0|acc |0.2696|_ |0.0293|\n| | |acc_norm|0.2087|_ |0.0269|\n|agieval_lsat_lr | 0|acc |0.7078|_ |0.0202|\n| | |acc_norm|0.6255|_ |0.0215|\n|agieval_lsat_rc | 0|acc |0.7807|_ |0.0253|\n| | |acc_norm|0.7063|_ |0.0278|\n|agieval_sat_en | 0|acc |0.8689|_ |0.0236|\n| | |acc_norm|0.8447|_ |0.0253|\n|agieval_sat_en_without_passage| 0|acc |0.5194|_ |0.0349|\n| | |acc_norm|0.4612|_ |0.0348|\n|agieval_sat_math | 0|acc |0.4409|_ |0.0336|\n| | |acc_norm|0.3818|_ |0.0328|\nAverage: 50.27%\n``` \n\nBigBench Reasoning Test\n```\n| Task |Version| Metric |Value | |Stderr|\n|------------------------------------------------|------:|---------------------|-----:|---|-----:|\n|bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|_ |0.0360|\n|bigbench_date_understanding | 0|multiple_choice_grade|0.7263|_ |0.0232|\n|bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3953|_ |0.0305|\n|bigbench_geometric_shapes | 0|multiple_choice_grade|0.4457|_ |0.0263|\n| | |exact_str_match |0.0000|_ |0.0000|\n|bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2820|_ |0.0201|\n|bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2186|_ |0.0156|\n|bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.4733|_ |0.0289|\n|bigbench_movie_recommendation | 0|multiple_choice_grade|0.5200|_ |0.0224|\n|bigbench_navigate | 0|multiple_choice_grade|0.4910|_ |0.0158|\n|bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7495|_ |0.0097|\n|bigbench_ruin_names | 0|multiple_choice_grade|0.5938|_ |0.0232|\n|bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.3808|_ |0.0154|\n|bigbench_snarks | 0|multiple_choice_grade|0.8066|_ |0.0294|\n|bigbench_sports_understanding | 0|multiple_choice_grade|0.5101|_ |0.0159|\n|bigbench_temporal_sequences | 0|multiple_choice_grade|0.3850|_ |0.0154|\n|bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2160|_ |0.0116|\n|bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1634|_ |0.0088|\n|bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.4733|_ |0.0289|\nAverage: 46.69%\n``` \n\nTruthfulQA:\n```\n| Task |Version|Metric|Value | |Stderr|\n|-------------|------:|------|-----:|---|-----:|\n|truthfulqa_mc| 1|mc1 |0.4333|_ |0.0173|\n| | |mc2 |0.6034|_ |0.0149|\n```\n\nAverage Score Comparison between OpenHermes-1 Llama-2 13B and OpenHermes-2 Mistral 7B against OpenHermes-2.5 on Mistral-7B:\n```\n| Bench | OpenHermes-2.5 Mistral 7B | Nous-Hermes-2-Yi-34B | Change/OpenHermes2 |\n|---------------|---------------------------|----------------------|--------------------|\n|GPT4All | 73.12| 76.00| +2.88|\n|---------------------------------------------------------------------------------------|\n|BigBench | 40.96| 46.69| +5.73|\n|---------------------------------------------------------------------------------------|\n|AGI Eval | 43.07| 50.27| +7.20|\n|---------------------------------------------------------------------------------------|\n|TruthfulQA | 53.04| 60.34| +7.30|\n|---------------------------------------------------------------------------------------|\n|Total Score | 210.19| 233.30| +23.11|\n|---------------------------------------------------------------------------------------|\n|Average Total | 52.38| 58.33| +5.95|\n```\n\n# Prompt Format\n\nNous Hermes 2 uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue.\n\nSystem prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model.\n\nThis is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns.\n\nThis format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI.\n\nPrompt with system instruction (Use whatever system prompt you like, this is just an example!):\n```\n<|im_start|>system\nYou are \"Hermes 2\", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|>\n<|im_start|>user\nHello, who are you?<|im_end|>\n<|im_start|>assistant\nHi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|>\n```\n\nThis prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the\n`tokenizer.apply_chat_template()` method:\n\n```python\nmessages = [\n {\"role\": \"system\", \"content\": \"You are Hermes 2.\"},\n {\"role\": \"user\", \"content\": \"Hello, who are you?\"}\n]\ngen_input = tokenizer.apply_chat_template(message, return_tensors=\"pt\")\nmodel.generate(**gen_input)\n```\n\nWhen tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\\n` to your prompt, to ensure\nthat the model continues with an assistant response.\n\nTo utilize the prompt format without a system prompt, simply leave the line out.\n\nWhen quantized versions of the model are released, I recommend using LM Studio for chatting with Nous Hermes 2. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box.\nIn LM-Studio, simply select the ChatML Prefix on the settings side pane:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png)\n\n# Quantized Models:\n\n[todo]\n\n[\"Built](https://github.com/OpenAccess-AI-Collective/axolotl)\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"01-ai/Yi-34B\", \"language\": [\"en\"], \"license\": \"apache-2.0\", \"tags\": [\"yi\", \"instruct\", \"finetune\", \"chatml\", \"gpt4\", \"synthetic data\", \"distillation\"], \"model-index\": [{\"name\": \"Nous-Hermes-2-Yi-34B\", \"results\": []}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45569,"string":"45,569"}}},{"rowIdx":43792,"cells":{"id":{"kind":"string","value":"mqy/mt5-small-finetuned-2"},"author":{"kind":"string","value":"mqy"},"task_category":{"kind":"string","value":"summarization"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","mt5","text2text-generation","summarization","generated_from_trainer","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"mt5\",\n \"text2text-generation\",\n \"summarization\",\n \"generated_from_trainer\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-03-10T06:43:19Z","string":"2023-03-10T06:43:19Z"},"last_modified":{"kind":"string","value":"2023-03-10T09:56:09+00:00"},"downloads":{"kind":"number","value":16,"string":"16"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\nmetrics:\n- rouge\ntags:\n- summarization\n- generated_from_trainer\nmodel-index:\n- name: mt5-small-finetuned-2\n results: []\n---\n\n\n\n# mt5-small-finetuned-2\n\nThis model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 2.3527\n- Rouge1: 19.64\n- Rouge2: 5.91\n- Rougel: 19.32\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 12\n- eval_batch_size: 12\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 40\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel |\n|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|\n| 4.4337 | 1.93 | 500 | 2.5989 | 15.87 | 4.85 | 15.76 |\n| 3.0621 | 3.86 | 1000 | 2.4750 | 18.99 | 5.94 | 18.78 |\n| 2.8411 | 5.79 | 1500 | 2.4011 | 18.31 | 5.73 | 18.22 |\n| 2.6947 | 7.72 | 2000 | 2.3851 | 19.17 | 5.79 | 19.0 |\n| 2.5872 | 9.65 | 2500 | 2.3918 | 18.99 | 6.04 | 18.78 |\n| 2.4996 | 11.58 | 3000 | 2.3689 | 18.77 | 5.83 | 18.38 |\n| 2.4192 | 13.51 | 3500 | 2.3604 | 19.84 | 5.98 | 19.5 |\n| 2.3524 | 15.44 | 4000 | 2.3694 | 20.13 | 6.52 | 19.84 |\n| 2.3012 | 17.37 | 4500 | 2.3637 | 19.25 | 5.5 | 18.85 |\n| 2.2408 | 19.31 | 5000 | 2.3374 | 19.79 | 6.25 | 19.33 |\n| 2.2004 | 21.24 | 5500 | 2.3400 | 19.72 | 6.06 | 19.26 |\n| 2.1631 | 23.17 | 6000 | 2.3584 | 19.95 | 6.08 | 19.46 |\n| 2.123 | 25.1 | 6500 | 2.3527 | 19.64 | 5.91 | 19.32 |\n\n\n### Framework versions\n\n- Transformers 4.26.1\n- Pytorch 1.13.1+cu116\n- Datasets 2.10.1\n- Tokenizers 0.13.2\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# mt5-small-finetuned-2\n\nThis model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 2.3527\n- Rouge1: 19.64\n- Rouge2: 5.91\n- Rougel: 19.32\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0001\n- train_batch_size: 12\n- eval_batch_size: 12\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 40\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel |\n|:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:|\n| 4.4337 | 1.93 | 500 | 2.5989 | 15.87 | 4.85 | 15.76 |\n| 3.0621 | 3.86 | 1000 | 2.4750 | 18.99 | 5.94 | 18.78 |\n| 2.8411 | 5.79 | 1500 | 2.4011 | 18.31 | 5.73 | 18.22 |\n| 2.6947 | 7.72 | 2000 | 2.3851 | 19.17 | 5.79 | 19.0 |\n| 2.5872 | 9.65 | 2500 | 2.3918 | 18.99 | 6.04 | 18.78 |\n| 2.4996 | 11.58 | 3000 | 2.3689 | 18.77 | 5.83 | 18.38 |\n| 2.4192 | 13.51 | 3500 | 2.3604 | 19.84 | 5.98 | 19.5 |\n| 2.3524 | 15.44 | 4000 | 2.3694 | 20.13 | 6.52 | 19.84 |\n| 2.3012 | 17.37 | 4500 | 2.3637 | 19.25 | 5.5 | 18.85 |\n| 2.2408 | 19.31 | 5000 | 2.3374 | 19.79 | 6.25 | 19.33 |\n| 2.2004 | 21.24 | 5500 | 2.3400 | 19.72 | 6.06 | 19.26 |\n| 2.1631 | 23.17 | 6000 | 2.3584 | 19.95 | 6.08 | 19.46 |\n| 2.123 | 25.1 | 6500 | 2.3527 | 19.64 | 5.91 | 19.32 |\n\n\n### Framework versions\n\n- Transformers 4.26.1\n- Pytorch 1.13.1+cu116\n- Datasets 2.10.1\n- Tokenizers 0.13.2\n"},"metadata":{"kind":"string","value":"{\"license\": \"apache-2.0\", \"metrics\": [\"rouge\"], \"tags\": [\"summarization\", \"generated_from_trainer\"], \"model-index\": [{\"name\": \"mt5-small-finetuned-2\", \"results\": []}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["SUMMARIZATION"],"string":"[\n \"SUMMARIZATION\"\n]"},"__index_level_0__":{"kind":"number","value":45570,"string":"45,570"}}},{"rowIdx":43793,"cells":{"id":{"kind":"string","value":"Helsinki-NLP/opus-mt-ny-es"},"author":{"kind":"string","value":"Helsinki-NLP"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","pytorch","tf","marian","text2text-generation","translation","ny","es","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tf\",\n \"marian\",\n \"text2text-generation\",\n \"translation\",\n \"ny\",\n \"es\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-03-02T23:29:04Z","string":"2022-03-02T23:29:04Z"},"last_modified":{"kind":"string","value":"2023-08-16T12:02:11+00:00"},"downloads":{"kind":"number","value":41,"string":"41"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- translation\n---\n\n### opus-mt-ny-es\n\n* source languages: ny\n* target languages: es\n* OPUS readme: [ny-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ny-es/README.md)\n\n* dataset: opus\n* model: transformer-align\n* pre-processing: normalization + SentencePiece\n* download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.zip)\n* test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.test.txt)\n* test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.eval.txt)\n\n## Benchmarks\n\n| testset | BLEU | chr-F |\n|-----------------------|-------|-------|\n| JW300.ny.es \t| 27.9 \t| 0.457 |\n\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n### opus-mt-ny-es\n\n* source languages: ny\n* target languages: es\n* OPUS readme: [ny-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ny-es/README.md)\n\n* dataset: opus\n* model: transformer-align\n* pre-processing: normalization + SentencePiece\n* download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.zip)\n* test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.test.txt)\n* test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.eval.txt)\n\n## Benchmarks\n\n| testset | BLEU | chr-F |\n|-----------------------|-------|-------|\n| JW300.ny.es \t| 27.9 \t| 0.457 |\n\n"},"metadata":{"kind":"string","value":"{\"license\": \"apache-2.0\", \"tags\": [\"translation\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45571,"string":"45,571"}}},{"rowIdx":43794,"cells":{"id":{"kind":"string","value":"esuriddick/distilbert-base-uncased-finetuned-emotion"},"author":{"kind":"string","value":"esuriddick"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","safetensors","distilbert","text-classification","generated_from_trainer","en","dataset:dair-ai/emotion","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"safetensors\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"en\",\n \"dataset:dair-ai/emotion\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-19T16:55:35Z","string":"2023-08-19T16:55:35Z"},"last_modified":{"kind":"string","value":"2023-09-12T07:53:07+00:00"},"downloads":{"kind":"number","value":280,"string":"280"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\nbase_model: distilbert-base-uncased\ndatasets:\n- dair-ai/emotion\nlanguage:\n- en\nlicense: apache-2.0\nmetrics:\n- accuracy\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-emotion\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: emotion\n type: emotion\n config: split\n split: validation\n args: split\n metrics:\n - type: accuracy\n value: 0.9375\n name: Accuracy\n - type: f1\n value: 0.937890467332837\n name: F1\n---\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.1448\n- Accuracy: 0.9375\n- F1: 0.9379\n\nThe notebook used to fine-tune this model may be found [HERE](https://www.kaggle.com/marcoloureno/distilbert-base-uncased-finetuned-emotion).\n\n## Model description\n\nDistilBERT is a transformers model, smaller and faster than BERT, which was pretrained on the same corpus in a\nself-supervised fashion, using the BERT base model as a teacher. This means it was pretrained on the raw texts only,\nwith no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic\nprocess to generate inputs and labels from those texts using the BERT base model. More precisely, it was pretrained\nwith three objectives:\n\n- Distillation loss: the model was trained to return the same probabilities as the BERT base model.\n- Masked language modeling (MLM): this is part of the original training loss of the BERT base model. When taking a\n sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the\n model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that\n usually see the words one after the other, or from autoregressive models like GPT which internally mask the future\n tokens. It allows the model to learn a bidirectional representation of the sentence.\n- Cosine embedding loss: the model was also trained to generate hidden states as close as possible as the BERT base\n model.\n\nThis way, the model learns the same inner representation of the English language than its teacher model, while being\nfaster for inference or downstream tasks.\n\n## Intended uses & limitations\n[Emotion](https://huggingface.co/datasets/dair-ai/emotion) is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. This dataset was developed for the paper entitled \"CARER: Contextualized Affect Representations for Emotion Recognition\" (Saravia et al.) through noisy labels, annotated via distant\nsupervision as in the paper\"Twitter sentiment classification using distant supervision\" (Go et al).\n\nThe DistilBERT model was fine-tuned to this dataset, allowing for the classification of sentences into one of the six basic emotions (anger, fear, joy, love, sadness, and surprise).\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.5337 | 1.0 | 250 | 0.1992 | 0.927 | 0.9262 |\n| 0.1405 | 2.0 | 500 | 0.1448 | 0.9375 | 0.9379 |\n\n\n### Framework versions\n\n- Transformers 4.30.2\n- Pytorch 2.0.0\n- Datasets 2.1.0\n- Tokenizers 0.13.3"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.1448\n- Accuracy: 0.9375\n- F1: 0.9379\n\nThe notebook used to fine-tune this model may be found [HERE](https://www.kaggle.com/marcoloureno/distilbert-base-uncased-finetuned-emotion).\n\n## Model description\n\nDistilBERT is a transformers model, smaller and faster than BERT, which was pretrained on the same corpus in a\nself-supervised fashion, using the BERT base model as a teacher. This means it was pretrained on the raw texts only,\nwith no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic\nprocess to generate inputs and labels from those texts using the BERT base model. More precisely, it was pretrained\nwith three objectives:\n\n- Distillation loss: the model was trained to return the same probabilities as the BERT base model.\n- Masked language modeling (MLM): this is part of the original training loss of the BERT base model. When taking a\n sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the\n model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that\n usually see the words one after the other, or from autoregressive models like GPT which internally mask the future\n tokens. It allows the model to learn a bidirectional representation of the sentence.\n- Cosine embedding loss: the model was also trained to generate hidden states as close as possible as the BERT base\n model.\n\nThis way, the model learns the same inner representation of the English language than its teacher model, while being\nfaster for inference or downstream tasks.\n\n## Intended uses & limitations\n[Emotion](https://huggingface.co/datasets/dair-ai/emotion) is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. This dataset was developed for the paper entitled \"CARER: Contextualized Affect Representations for Emotion Recognition\" (Saravia et al.) through noisy labels, annotated via distant\nsupervision as in the paper\"Twitter sentiment classification using distant supervision\" (Go et al).\n\nThe DistilBERT model was fine-tuned to this dataset, allowing for the classification of sentences into one of the six basic emotions (anger, fear, joy, love, sadness, and surprise).\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 5e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.5337 | 1.0 | 250 | 0.1992 | 0.927 | 0.9262 |\n| 0.1405 | 2.0 | 500 | 0.1448 | 0.9375 | 0.9379 |\n\n\n### Framework versions\n\n- Transformers 4.30.2\n- Pytorch 2.0.0\n- Datasets 2.1.0\n- Tokenizers 0.13.3"},"metadata":{"kind":"string","value":"{\"base_model\": \"distilbert-base-uncased\", \"datasets\": [\"dair-ai/emotion\"], \"language\": [\"en\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\", \"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-emotion\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"emotion\", \"type\": \"emotion\", \"config\": \"split\", \"split\": \"validation\", \"args\": \"split\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.9375, \"name\": \"Accuracy\"}, {\"type\": \"f1\", \"value\": 0.937890467332837, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45572,"string":"45,572"}}},{"rowIdx":43795,"cells":{"id":{"kind":"string","value":"TransferGraph/zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate"},"author":{"kind":"string","value":"TransferGraph"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["peft","safetensors","parquet","text-classification","dataset:tweet_eval","base_model:zenkri/autotrain-Arabic_Poetry_by_Subject-920730230","base_model:adapter:zenkri/autotrain-Arabic_Poetry_by_Subject-920730230","model-index","region:us"],"string":"[\n \"peft\",\n \"safetensors\",\n \"parquet\",\n \"text-classification\",\n \"dataset:tweet_eval\",\n \"base_model:zenkri/autotrain-Arabic_Poetry_by_Subject-920730230\",\n \"base_model:adapter:zenkri/autotrain-Arabic_Poetry_by_Subject-920730230\",\n \"model-index\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-29T13:50:53Z","string":"2024-02-29T13:50:53Z"},"last_modified":{"kind":"string","value":"2024-02-29T13:50:55+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: zenkri/autotrain-Arabic_Poetry_by_Subject-920730230\ndatasets:\n- tweet_eval\nlibrary_name: peft\nmetrics:\n- accuracy\ntags:\n- parquet\n- text-classification\nmodel-index:\n- name: zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: tweet_eval\n type: tweet_eval\n config: hate\n split: validation\n args: hate\n metrics:\n - type: accuracy\n value: 0.727\n name: accuracy\n---\n\n\n\n# zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate\n\nThis model is a fine-tuned version of [zenkri/autotrain-Arabic_Poetry_by_Subject-920730230](https://huggingface.co/zenkri/autotrain-Arabic_Poetry_by_Subject-920730230) on the tweet_eval dataset.\nIt achieves the following results on the evaluation set:\n- accuracy: 0.727\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0004\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 4\n\n### Training results\n\n| accuracy | train_loss | epoch |\n|:--------:|:----------:|:-----:|\n| 0.457 | None | 0 |\n| 0.716 | 0.5969 | 0 |\n| 0.729 | 0.5011 | 1 |\n| 0.726 | 0.4713 | 2 |\n| 0.727 | 0.4559 | 3 |\n\n\n### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.37.2\n- Pytorch 2.2.0\n- Datasets 2.16.1\n- Tokenizers 0.15.2"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate\n\nThis model is a fine-tuned version of [zenkri/autotrain-Arabic_Poetry_by_Subject-920730230](https://huggingface.co/zenkri/autotrain-Arabic_Poetry_by_Subject-920730230) on the tweet_eval dataset.\nIt achieves the following results on the evaluation set:\n- accuracy: 0.727\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0004\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 4\n\n### Training results\n\n| accuracy | train_loss | epoch |\n|:--------:|:----------:|:-----:|\n| 0.457 | None | 0 |\n| 0.716 | 0.5969 | 0 |\n| 0.729 | 0.5011 | 1 |\n| 0.726 | 0.4713 | 2 |\n| 0.727 | 0.4559 | 3 |\n\n\n### Framework versions\n\n- PEFT 0.8.2\n- Transformers 4.37.2\n- Pytorch 2.2.0\n- Datasets 2.16.1\n- Tokenizers 0.15.2"},"metadata":{"kind":"string","value":"{\"base_model\": \"zenkri/autotrain-Arabic_Poetry_by_Subject-920730230\", \"datasets\": [\"tweet_eval\"], \"library_name\": \"peft\", \"metrics\": [\"accuracy\"], \"tags\": [\"parquet\", \"text-classification\"], \"model-index\": [{\"name\": \"zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"tweet_eval\", \"type\": \"tweet_eval\", \"config\": \"hate\", \"split\": \"validation\", \"args\": \"hate\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.727, \"name\": \"accuracy\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45573,"string":"45,573"}}},{"rowIdx":43796,"cells":{"id":{"kind":"string","value":"YakovElm/Jira20SetFitModel_Train_balance_ratio_Half"},"author":{"kind":"string","value":"YakovElm"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["sentence-transformers","pytorch","mpnet","setfit","text-classification","arxiv:2209.11055","license:apache-2.0","region:us"],"string":"[\n \"sentence-transformers\",\n \"pytorch\",\n \"mpnet\",\n \"setfit\",\n \"text-classification\",\n \"arxiv:2209.11055\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-10T18:17:33Z","string":"2023-06-10T18:17:33Z"},"last_modified":{"kind":"string","value":"2023-06-10T18:18:06+00:00"},"downloads":{"kind":"number","value":10,"string":"10"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\npipeline_tag: text-classification\ntags:\n- setfit\n- sentence-transformers\n- text-classification\n---\n\n# YakovElm/Jira20SetFitModel_Train_balance_ratio_Half\n\nThis is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:\n\n1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.\n2. Training a classification head with features from the fine-tuned Sentence Transformer.\n\n## Usage\n\nTo use this model for inference, first install the SetFit library:\n\n```bash\npython -m pip install setfit\n```\n\nYou can then run inference as follows:\n\n```python\nfrom setfit import SetFitModel\n\n# Download from Hub and run inference\nmodel = SetFitModel.from_pretrained(\"YakovElm/Jira20SetFitModel_Train_balance_ratio_Half\")\n# Run inference\npreds = model([\"i loved the spiderman movie!\", \"pineapple on pizza is the worst 🤮\"])\n```\n\n## BibTeX entry and citation info\n\n```bibtex\n@article{https://doi.org/10.48550/arxiv.2209.11055,\ndoi = {10.48550/ARXIV.2209.11055},\nurl = {https://arxiv.org/abs/2209.11055},\nauthor = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},\nkeywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},\ntitle = {Efficient Few-Shot Learning Without Prompts},\npublisher = {arXiv},\nyear = {2022},\ncopyright = {Creative Commons Attribution 4.0 International}\n}\n```\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n# YakovElm/Jira20SetFitModel_Train_balance_ratio_Half\n\nThis is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves:\n\n1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning.\n2. Training a classification head with features from the fine-tuned Sentence Transformer.\n\n## Usage\n\nTo use this model for inference, first install the SetFit library:\n\n```bash\npython -m pip install setfit\n```\n\nYou can then run inference as follows:\n\n```python\nfrom setfit import SetFitModel\n\n# Download from Hub and run inference\nmodel = SetFitModel.from_pretrained(\"YakovElm/Jira20SetFitModel_Train_balance_ratio_Half\")\n# Run inference\npreds = model([\"i loved the spiderman movie!\", \"pineapple on pizza is the worst 🤮\"])\n```\n\n## BibTeX entry and citation info\n\n```bibtex\n@article{https://doi.org/10.48550/arxiv.2209.11055,\ndoi = {10.48550/ARXIV.2209.11055},\nurl = {https://arxiv.org/abs/2209.11055},\nauthor = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren},\nkeywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences},\ntitle = {Efficient Few-Shot Learning Without Prompts},\npublisher = {arXiv},\nyear = {2022},\ncopyright = {Creative Commons Attribution 4.0 International}\n}\n```\n"},"metadata":{"kind":"string","value":"{\"license\": \"apache-2.0\", \"pipeline_tag\": \"text-classification\", \"tags\": [\"setfit\", \"sentence-transformers\", \"text-classification\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45574,"string":"45,574"}}},{"rowIdx":43797,"cells":{"id":{"kind":"string","value":"gaudi/opus-mt-en-tn-ctranslate2"},"author":{"kind":"string","value":"gaudi"},"task_category":{"kind":"string","value":"translation"},"tags":{"kind":"list like","value":["transformers","marian","ctranslate2","translation","license:apache-2.0","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"marian\",\n \"ctranslate2\",\n \"translation\",\n \"license:apache-2.0\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-18T15:05:03Z","string":"2024-07-18T15:05:03Z"},"last_modified":{"kind":"string","value":"2024-10-19T00:32:09+00:00"},"downloads":{"kind":"number","value":5,"string":"5"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- ctranslate2\n- translation\n---\n# Repository General Information\n## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)!\n- Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn)\n- This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2).\n- This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil).\n\n# What is CTranslate2?\n[CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models.\n\nCTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU.\n\nCTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include:\n- Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper\n- Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon\n- Encoder-only models: BERT, DistilBERT, XLM-RoBERTa\n\nThe project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration.\n\n# CTranslate2 Benchmarks\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset.\n\nThe benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers.\n\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings.\n\n## CPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 |\n| Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 |\n| Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 |\n| CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 |\n| CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 |\n\n## GPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 |\n| Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 |\n| CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 |\n| CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 |\n\n`Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.`\n\n**Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**
\n**Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn).**\n\n## Internal Benchmarks\nInternal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality.\n\n\n# CTranslate2 Installation\n```bash\npip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0\n```\n### ct2-transformers-converter Command Used:\n```bash\nct2-transformers-converter --model Helsinki-NLP/opus-mt-en-tn --output_dir ./ctranslate2/opus-mt-en-tn-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16\n```\n# CTranslate2 Converted Checkpoint Information:\n**Compatible With:**\n- [ctranslate2](https://github.com/OpenNMT/CTranslate2)\n- [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2)\n\n**Compute Type:**\n- `compute_type=int8_float16` for `device=\"cuda\"`\n- `compute_type=int8` for `device=\"cpu\"`\n\n# Sample Code - ctranslate2\n#### Clone the repository to the working directory or wherever you wish to store the model artifacts. ####\n```bash\ngit clone https://huggingface.co/gaudi/opus-mt-en-tn-ctranslate2\n```\n#### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. ####\n```python\nfrom ctranslate2 import Translator\nimport transformers\n\nmodel_dir = \"./opus-mt-en-tn-ctranslate2\" # Path to model directory.\ntranslator = Translator(\n model_path=model_dir,\n device=\"cuda\", # cpu, cuda, or auto.\n inter_threads=1, # Maximum number of parallel translations.\n intra_threads=4, # Number of OpenMP threads per translator.\n compute_type=\"int8_float16\", # int8 for cpu or int8_float16 for cuda.\n)\n\ntokenizer = transformers.AutoTokenizer.from_pretrained(model_dir)\n\nsource = tokenizer.convert_ids_to_tokens(tokenizer.encode(\"XXXXXX, XXX XX XXXXXX.\"))\nresults = translator.translate_batch([source])\ntarget = results[0].hypotheses[0]\n\nprint(tokenizer.decode(tokenizer.convert_tokens_to_ids(target)))\n```\n# Sample Code - hf-hub-ctranslate2\n**Derived From [michaelfeil](https://huggingface.co/michaelfeil):**\n```python\nfrom hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub\nfrom transformers import AutoTokenizer\n\nmodel_name = \"gaudi/opus-mt-en-tn-ctranslate2\"\nmodel = TranslatorCT2fromHfHub(\n model_name_or_path=model_name,\n device=\"cuda\",\n compute_type=\"int8_float16\",\n tokenizer=AutoTokenizer.from_pretrained(model_name)\n)\noutputs = model.generate(\n text=[\"XXX XX XXX XXXXXXX XXXX?\", \"XX XX XXXX XX XXX!\"],\n)\nprint(outputs)\n```\n# License and other remarks:\nLicense conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn) by Helsinki-NLP.\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"# Repository General Information\n## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)!\n- Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn)\n- This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2).\n- This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil).\n\n# What is CTranslate2?\n[CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models.\n\nCTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU.\n\nCTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include:\n- Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper\n- Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon\n- Encoder-only models: BERT, DistilBERT, XLM-RoBERTa\n\nThe project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration.\n\n# CTranslate2 Benchmarks\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset.\n\nThe benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers.\n\nPlease note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings.\n\n## CPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 |\n| Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 |\n| Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 |\n| CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 |\n| CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 |\n\n## GPU Benchmarks for Generic Opus-MT Models\n| Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU |\n| :----: | :----: | :----: | :----: | :----: |\n| Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 |\n| Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 |\n| CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 |\n| CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 |\n\n`Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.`\n\n**Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**
\n**Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn).**\n\n## Internal Benchmarks\nInternal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality.\n\n\n# CTranslate2 Installation\n```bash\npip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0\n```\n### ct2-transformers-converter Command Used:\n```bash\nct2-transformers-converter --model Helsinki-NLP/opus-mt-en-tn --output_dir ./ctranslate2/opus-mt-en-tn-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16\n```\n# CTranslate2 Converted Checkpoint Information:\n**Compatible With:**\n- [ctranslate2](https://github.com/OpenNMT/CTranslate2)\n- [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2)\n\n**Compute Type:**\n- `compute_type=int8_float16` for `device=\"cuda\"`\n- `compute_type=int8` for `device=\"cpu\"`\n\n# Sample Code - ctranslate2\n#### Clone the repository to the working directory or wherever you wish to store the model artifacts. ####\n```bash\ngit clone https://huggingface.co/gaudi/opus-mt-en-tn-ctranslate2\n```\n#### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. ####\n```python\nfrom ctranslate2 import Translator\nimport transformers\n\nmodel_dir = \"./opus-mt-en-tn-ctranslate2\" # Path to model directory.\ntranslator = Translator(\n model_path=model_dir,\n device=\"cuda\", # cpu, cuda, or auto.\n inter_threads=1, # Maximum number of parallel translations.\n intra_threads=4, # Number of OpenMP threads per translator.\n compute_type=\"int8_float16\", # int8 for cpu or int8_float16 for cuda.\n)\n\ntokenizer = transformers.AutoTokenizer.from_pretrained(model_dir)\n\nsource = tokenizer.convert_ids_to_tokens(tokenizer.encode(\"XXXXXX, XXX XX XXXXXX.\"))\nresults = translator.translate_batch([source])\ntarget = results[0].hypotheses[0]\n\nprint(tokenizer.decode(tokenizer.convert_tokens_to_ids(target)))\n```\n# Sample Code - hf-hub-ctranslate2\n**Derived From [michaelfeil](https://huggingface.co/michaelfeil):**\n```python\nfrom hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub\nfrom transformers import AutoTokenizer\n\nmodel_name = \"gaudi/opus-mt-en-tn-ctranslate2\"\nmodel = TranslatorCT2fromHfHub(\n model_name_or_path=model_name,\n device=\"cuda\",\n compute_type=\"int8_float16\",\n tokenizer=AutoTokenizer.from_pretrained(model_name)\n)\noutputs = model.generate(\n text=[\"XXX XX XXX XXXXXXX XXXX?\", \"XX XX XXXX XX XXX!\"],\n)\nprint(outputs)\n```\n# License and other remarks:\nLicense conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn) by Helsinki-NLP.\n"},"metadata":{"kind":"string","value":"{\"license\": \"apache-2.0\", \"tags\": [\"ctranslate2\", \"translation\"]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45575,"string":"45,575"}}},{"rowIdx":43798,"cells":{"id":{"kind":"string","value":"clemyam/distilbert-base-uncased-finetuned-emotion"},"author":{"kind":"string","value":"clemyam"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","distilbert","text-classification","generated_from_trainer","dataset:emotion","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"distilbert\",\n \"text-classification\",\n \"generated_from_trainer\",\n \"dataset:emotion\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-11-08T14:49:00Z","string":"2023-11-08T14:49:00Z"},"last_modified":{"kind":"string","value":"2023-11-09T19:32:28+00:00"},"downloads":{"kind":"number","value":8,"string":"8"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: distilbert-base-uncased\ndatasets:\n- emotion\nlicense: apache-2.0\nmetrics:\n- accuracy\n- f1\ntags:\n- generated_from_trainer\nmodel-index:\n- name: distilbert-base-uncased-finetuned-emotion\n results:\n - task:\n type: text-classification\n name: Text Classification\n dataset:\n name: emotion\n type: emotion\n config: split\n split: validation\n args: split\n metrics:\n - type: accuracy\n value: 0.9235\n name: Accuracy\n - type: f1\n value: 0.9229917619966129\n name: F1\n---\n\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2158\n- Accuracy: 0.9235\n- F1: 0.9230\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.8073 | 1.0 | 250 | 0.3193 | 0.911 | 0.9105 |\n| 0.2524 | 2.0 | 500 | 0.2158 | 0.9235 | 0.9230 |\n\n\n### Framework versions\n\n- Transformers 4.35.0\n- Pytorch 2.1.0+cpu\n- Datasets 2.14.6\n- Tokenizers 0.14.1\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\n\n\n# distilbert-base-uncased-finetuned-emotion\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.2158\n- Accuracy: 0.9235\n- F1: 0.9230\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 2\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:------:|\n| 0.8073 | 1.0 | 250 | 0.3193 | 0.911 | 0.9105 |\n| 0.2524 | 2.0 | 500 | 0.2158 | 0.9235 | 0.9230 |\n\n\n### Framework versions\n\n- Transformers 4.35.0\n- Pytorch 2.1.0+cpu\n- Datasets 2.14.6\n- Tokenizers 0.14.1\n"},"metadata":{"kind":"string","value":"{\"base_model\": \"distilbert-base-uncased\", \"datasets\": [\"emotion\"], \"license\": \"apache-2.0\", \"metrics\": [\"accuracy\", \"f1\"], \"tags\": [\"generated_from_trainer\"], \"model-index\": [{\"name\": \"distilbert-base-uncased-finetuned-emotion\", \"results\": [{\"task\": {\"type\": \"text-classification\", \"name\": \"Text Classification\"}, \"dataset\": {\"name\": \"emotion\", \"type\": \"emotion\", \"config\": \"split\", \"split\": \"validation\", \"args\": \"split\"}, \"metrics\": [{\"type\": \"accuracy\", \"value\": 0.9235, \"name\": \"Accuracy\"}, {\"type\": \"f1\", \"value\": 0.9229917619966129, \"name\": \"F1\"}]}]}]}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TEXT_CLASSIFICATION"],"string":"[\n \"TEXT_CLASSIFICATION\"\n]"},"__index_level_0__":{"kind":"number","value":45576,"string":"45,576"}}},{"rowIdx":43799,"cells":{"id":{"kind":"string","value":"haonan-li/bactrian-ro-llama-7b-lora"},"author":{"kind":"string","value":"haonan-li"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["arxiv:2305.15011","license:mit","region:us"],"string":"[\n \"arxiv:2305.15011\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-05-30T12:20:30Z","string":"2023-05-30T12:20:30Z"},"last_modified":{"kind":"string","value":"2023-05-30T12:21:07+00:00"},"downloads":{"kind":"number","value":0,"string":"0"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: mit\n---\n\nThis repo contains a low-rank adapter (LoRA) for LLaMA-7b \nfit on the [Stanford-Alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca)\nand [databricks-dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data) data in Romanian.\n\n### Dataset Creation\n\n1. English Instructions: The English instuctions are obtained from [alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca), and [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data).\n2. Instruction Translation: The instructions (and inputs) are translated into the target languages using Google Translation API (conducted on April 2023).\n3. Output Generation: We generate output from `gpt-3.5-turbo` for each language (conducted on April 2023).\n\n

\n\n

\n\n### Training Parameters\n\nThe code for training the model is provided in our [github](https://github.com/mbzuai-nlp/Bactrian-X), which is adapted from [Alpaca-LoRA](https://github.com/tloen/alpaca-lora).\nThis version of the weights was trained with the following hyperparameters:\n\n- Epochs: 8\n- Batch size: 128\n- Cutoff length: 512\n- Learning rate: 3e-4\n- Lora _r_: 16\n- Lora target modules: q_proj, v_proj,\n\n\nThat is:\n\n```\npython finetune.py \\\n --base_model='decapoda-research/llama-7b-hf' \\\n --num_epochs=8 \\\n --cutoff_len=1024 \\\n --group_by_length \\\n --output_dir='./bactrian-ro-7b-lora' \\\n --lora_target_modules='[q_proj,v_proj]' \\\n --lora_r=16 \\\n --micro_batch_size=32\n```\n\nInstructions for running it can be found at https://github.com/MBZUAI-nlp/Bactrian-X.\n\n### Discussion of Biases\n\n(1) Translation bias; (2) Potential English-culture bias in the translated dataset.\n\n\n### Citation Information\n\n```\n@misc{li2023bactrianx,\n title={Bactrian-X : A Multilingual Replicable Instruction-Following Model with Low-Rank Adaptation}, \n author={Haonan Li and Fajri Koto and Minghao Wu and Alham Fikri Aji and Timothy Baldwin},\n year={2023},\n eprint={2305.15011},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n\n```\n"},"matched_bigbio_names":{"kind":"null"},"is_bionlp":{"kind":"string","value":"Non_BioNLP"},"model_cards":{"kind":"string","value":"\nThis repo contains a low-rank adapter (LoRA) for LLaMA-7b \nfit on the [Stanford-Alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca)\nand [databricks-dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data) data in Romanian.\n\n### Dataset Creation\n\n1. English Instructions: The English instuctions are obtained from [alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca), and [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data).\n2. Instruction Translation: The instructions (and inputs) are translated into the target languages using Google Translation API (conducted on April 2023).\n3. Output Generation: We generate output from `gpt-3.5-turbo` for each language (conducted on April 2023).\n\n

\n\n

\n\n### Training Parameters\n\nThe code for training the model is provided in our [github](https://github.com/mbzuai-nlp/Bactrian-X), which is adapted from [Alpaca-LoRA](https://github.com/tloen/alpaca-lora).\nThis version of the weights was trained with the following hyperparameters:\n\n- Epochs: 8\n- Batch size: 128\n- Cutoff length: 512\n- Learning rate: 3e-4\n- Lora _r_: 16\n- Lora target modules: q_proj, v_proj,\n\n\nThat is:\n\n```\npython finetune.py \\\n --base_model='decapoda-research/llama-7b-hf' \\\n --num_epochs=8 \\\n --cutoff_len=1024 \\\n --group_by_length \\\n --output_dir='./bactrian-ro-7b-lora' \\\n --lora_target_modules='[q_proj,v_proj]' \\\n --lora_r=16 \\\n --micro_batch_size=32\n```\n\nInstructions for running it can be found at https://github.com/MBZUAI-nlp/Bactrian-X.\n\n### Discussion of Biases\n\n(1) Translation bias; (2) Potential English-culture bias in the translated dataset.\n\n\n### Citation Information\n\n```\n@misc{li2023bactrianx,\n title={Bactrian-X : A Multilingual Replicable Instruction-Following Model with Low-Rank Adaptation}, \n author={Haonan Li and Fajri Koto and Minghao Wu and Alham Fikri Aji and Timothy Baldwin},\n year={2023},\n eprint={2305.15011},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n\n```\n"},"metadata":{"kind":"string","value":"{\"license\": \"mit\"}"},"source":{"kind":"string","value":"task"},"matched_task":{"kind":"list like","value":["TRANSLATION"],"string":"[\n \"TRANSLATION\"\n]"},"__index_level_0__":{"kind":"number","value":45579,"string":"45,579"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":437,"numItemsPerPage":100,"numTotalItems":45038,"offset":43700,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODkzMDIyNiwic3ViIjoiL2RhdGFzZXRzL0V1YW55dS9jb21iaW5lZF9iaW9ubHBfdGFza19kYXRhc2V0X21vZGVsX2NhcmRzIiwiZXhwIjoxNzU4OTMzODI2LCJpc3MiOiJodHRwczovL2h1Z2dpbmdmYWNlLmNvIn0.QI6Tp3Yk0OPVwH2mgidQqPx7insVXdAdrhgEYfnciRQqMcPe-Ve1Rkr4y2OPDCKUg6eEAkO_YHirrVTHqHnbCw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
stringlengths
6
113
author
stringlengths
2
36
task_category
stringclasses
42 values
tags
listlengths
1
4.05k
created_time
timestamp[ns, tz=UTC]date
2022-03-02 23:29:04
2025-04-10 08:38:38
last_modified
stringdate
2020-05-14 13:13:12
2025-04-19 04:15:39
downloads
int64
0
118M
likes
int64
0
4.86k
README
stringlengths
30
1.01M
matched_bigbio_names
listlengths
1
8
is_bionlp
stringclasses
3 values
model_cards
stringlengths
0
1M
metadata
stringlengths
2
698k
source
stringclasses
2 values
matched_task
listlengths
1
10
__index_level_0__
int64
0
46.9k
SKDevil9377/manga
SKDevil9377
translation
[ "adapter-transformers", "art", "translation", "en", "dataset:fka/awesome-chatgpt-prompts", "arxiv:1910.09700", "license:apache-2.0", "region:us" ]
2024-08-22T16:21:24Z
2024-08-22T16:25:20+00:00
0
0
--- datasets: - fka/awesome-chatgpt-prompts language: - en library_name: adapter-transformers license: apache-2.0 metrics: - accuracy pipeline_tag: translation tags: - art --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
null
Non_BioNLP
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> This modelcard aims to be a base template for new models. It has been generated using [this raw template](https://github.com/huggingface/huggingface_hub/blob/main/src/huggingface_hub/templates/modelcard_template.md?plain=1). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> - **Developed by:** [More Information Needed] - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** [More Information Needed] - **Language(s) (NLP):** [More Information Needed] - **License:** [More Information Needed] - **Finetuned from model [optional]:** [More Information Needed] ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [More Information Needed] - **Paper [optional]:** [More Information Needed] - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** [More Information Needed] **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"datasets": ["fka/awesome-chatgpt-prompts"], "language": ["en"], "library_name": "adapter-transformers", "license": "apache-2.0", "metrics": ["accuracy"], "pipeline_tag": "translation", "tags": ["art"]}
task
[ "TRANSLATION" ]
45,472
Helsinki-NLP/opus-mt-zh-sv
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "zh", "sv", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T12:09:17+00:00
1,119
0
--- language: - zh - sv license: apache-2.0 tags: - translation --- ### zho-swe * source group: Chinese * target group: Swedish * OPUS readme: [zho-swe](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zho-swe/README.md) * model: transformer-align * source language(s): cmn cmn_Bopo cmn_Hani cmn_Latn * target language(s): swe * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-06-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.zip) * test set translations: [opus-2020-06-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.test.txt) * test set scores: [opus-2020-06-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.zho.swe | 46.1 | 0.621 | ### System Info: - hf_name: zho-swe - source_languages: zho - target_languages: swe - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zho-swe/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['zh', 'sv'] - src_constituents: {'cmn_Hans', 'nan', 'nan_Hani', 'gan', 'yue', 'cmn_Kana', 'yue_Hani', 'wuu_Bopo', 'cmn_Latn', 'yue_Hira', 'cmn_Hani', 'cjy_Hans', 'cmn', 'lzh_Hang', 'lzh_Hira', 'cmn_Hant', 'lzh_Bopo', 'zho', 'zho_Hans', 'zho_Hant', 'lzh_Hani', 'yue_Hang', 'wuu', 'yue_Kana', 'wuu_Latn', 'yue_Bopo', 'cjy_Hant', 'yue_Hans', 'lzh', 'cmn_Hira', 'lzh_Yiii', 'lzh_Hans', 'cmn_Bopo', 'cmn_Hang', 'hak_Hani', 'cmn_Yiii', 'yue_Hant', 'lzh_Kana', 'wuu_Hani'} - tgt_constituents: {'swe'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.test.txt - src_alpha3: zho - tgt_alpha3: swe - short_pair: zh-sv - chrF2_score: 0.621 - bleu: 46.1 - brevity_penalty: 0.956 - ref_len: 6223.0 - src_name: Chinese - tgt_name: Swedish - train_date: 2020-06-17 - src_alpha2: zh - tgt_alpha2: sv - prefer_old: False - long_pair: zho-swe - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
null
Non_BioNLP
### zho-swe * source group: Chinese * target group: Swedish * OPUS readme: [zho-swe](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zho-swe/README.md) * model: transformer-align * source language(s): cmn cmn_Bopo cmn_Hani cmn_Latn * target language(s): swe * model: transformer-align * pre-processing: normalization + SentencePiece (spm32k,spm32k) * download original weights: [opus-2020-06-17.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.zip) * test set translations: [opus-2020-06-17.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.test.txt) * test set scores: [opus-2020-06-17.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | Tatoeba-test.zho.swe | 46.1 | 0.621 | ### System Info: - hf_name: zho-swe - source_languages: zho - target_languages: swe - opus_readme_url: https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/zho-swe/README.md - original_repo: Tatoeba-Challenge - tags: ['translation'] - languages: ['zh', 'sv'] - src_constituents: {'cmn_Hans', 'nan', 'nan_Hani', 'gan', 'yue', 'cmn_Kana', 'yue_Hani', 'wuu_Bopo', 'cmn_Latn', 'yue_Hira', 'cmn_Hani', 'cjy_Hans', 'cmn', 'lzh_Hang', 'lzh_Hira', 'cmn_Hant', 'lzh_Bopo', 'zho', 'zho_Hans', 'zho_Hant', 'lzh_Hani', 'yue_Hang', 'wuu', 'yue_Kana', 'wuu_Latn', 'yue_Bopo', 'cjy_Hant', 'yue_Hans', 'lzh', 'cmn_Hira', 'lzh_Yiii', 'lzh_Hans', 'cmn_Bopo', 'cmn_Hang', 'hak_Hani', 'cmn_Yiii', 'yue_Hant', 'lzh_Kana', 'wuu_Hani'} - tgt_constituents: {'swe'} - src_multilingual: False - tgt_multilingual: False - prepro: normalization + SentencePiece (spm32k,spm32k) - url_model: https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.zip - url_test_set: https://object.pouta.csc.fi/Tatoeba-MT-models/zho-swe/opus-2020-06-17.test.txt - src_alpha3: zho - tgt_alpha3: swe - short_pair: zh-sv - chrF2_score: 0.621 - bleu: 46.1 - brevity_penalty: 0.956 - ref_len: 6223.0 - src_name: Chinese - tgt_name: Swedish - train_date: 2020-06-17 - src_alpha2: zh - tgt_alpha2: sv - prefer_old: False - long_pair: zho-swe - helsinki_git_sha: 480fcbe0ee1bf4774bcbe6226ad9f58e63f6c535 - transformers_git_sha: 2207e5d8cb224e954a7cba69fa4ac2309e9ff30b - port_machine: brutasse - port_time: 2020-08-21-14:41
{"language": ["zh", "sv"], "license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
45,474
infgrad/stella-large-zh
infgrad
feature-extraction
[ "transformers", "pytorch", "bert", "feature-extraction", "mteb", "arxiv:1612.00796", "model-index", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-09-10T07:51:33Z
2024-04-06T02:48:23+00:00
29
26
--- tags: - mteb model-index: - name: stella-large-zh results: - task: type: STS dataset: name: MTEB AFQMC type: C-MTEB/AFQMC config: default split: validation revision: None metrics: - type: cos_sim_pearson value: 51.61327712288466 - type: cos_sim_spearman value: 54.48753880097122 - type: euclidean_pearson value: 52.68387289931342 - type: euclidean_spearman value: 54.48753879487172 - type: manhattan_pearson value: 52.635406372350026 - type: manhattan_spearman value: 54.447390526317044 - task: type: STS dataset: name: MTEB ATEC type: C-MTEB/ATEC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 53.39178036427897 - type: cos_sim_spearman value: 54.450028472876134 - type: euclidean_pearson value: 56.87300033777842 - type: euclidean_spearman value: 54.45002622056799 - type: manhattan_pearson value: 56.84326996138951 - type: manhattan_spearman value: 54.433880144849375 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.574000000000005 - type: f1 value: 38.87775700245793 - task: type: STS dataset: name: MTEB BQ type: C-MTEB/BQ config: default split: test revision: None metrics: - type: cos_sim_pearson value: 60.80957921870066 - type: cos_sim_spearman value: 62.37707350882237 - type: euclidean_pearson value: 61.29032932843765 - type: euclidean_spearman value: 62.37707350713817 - type: manhattan_pearson value: 61.23028102541801 - type: manhattan_spearman value: 62.31280056582247 - task: type: Clustering dataset: name: MTEB CLSClusteringP2P type: C-MTEB/CLSClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 40.27066616318565 - task: type: Clustering dataset: name: MTEB CLSClusteringS2S type: C-MTEB/CLSClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 37.503323644484716 - task: type: Reranking dataset: name: MTEB CMedQAv1 type: C-MTEB/CMedQAv1-reranking config: default split: test revision: None metrics: - type: map value: 84.69295191328456 - type: mrr value: 87.08992063492063 - task: type: Reranking dataset: name: MTEB CMedQAv2 type: C-MTEB/CMedQAv2-reranking config: default split: test revision: None metrics: - type: map value: 85.22650690364465 - type: mrr value: 87.72158730158729 - task: type: Retrieval dataset: name: MTEB CmedqaRetrieval type: C-MTEB/CmedqaRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 23.54 - type: map_at_10 value: 35.591 - type: map_at_100 value: 37.549 - type: map_at_1000 value: 37.663000000000004 - type: map_at_3 value: 31.405 - type: map_at_5 value: 33.792 - type: mrr_at_1 value: 36.359 - type: mrr_at_10 value: 44.624 - type: mrr_at_100 value: 45.660000000000004 - type: mrr_at_1000 value: 45.707 - type: mrr_at_3 value: 42.002 - type: mrr_at_5 value: 43.535000000000004 - type: ndcg_at_1 value: 36.359 - type: ndcg_at_10 value: 42.28 - type: ndcg_at_100 value: 49.997 - type: ndcg_at_1000 value: 51.966 - type: ndcg_at_3 value: 36.851 - type: ndcg_at_5 value: 39.249 - type: precision_at_1 value: 36.359 - type: precision_at_10 value: 9.542 - type: precision_at_100 value: 1.582 - type: precision_at_1000 value: 0.183 - type: precision_at_3 value: 20.913999999999998 - type: precision_at_5 value: 15.404000000000002 - type: recall_at_1 value: 23.54 - type: recall_at_10 value: 53.005 - type: recall_at_100 value: 85.085 - type: recall_at_1000 value: 98.21 - type: recall_at_3 value: 36.944 - type: recall_at_5 value: 44.137 - task: type: PairClassification dataset: name: MTEB Cmnli type: C-MTEB/CMNLI config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 76.16355983162958 - type: cos_sim_ap value: 85.14228023901842 - type: cos_sim_f1 value: 77.86752827140549 - type: cos_sim_precision value: 72.18450479233228 - type: cos_sim_recall value: 84.5218611176058 - type: dot_accuracy value: 76.16355983162958 - type: dot_ap value: 85.16266644596179 - type: dot_f1 value: 77.86752827140549 - type: dot_precision value: 72.18450479233228 - type: dot_recall value: 84.5218611176058 - type: euclidean_accuracy value: 76.16355983162958 - type: euclidean_ap value: 85.14227717790371 - type: euclidean_f1 value: 77.86752827140549 - type: euclidean_precision value: 72.18450479233228 - type: euclidean_recall value: 84.5218611176058 - type: manhattan_accuracy value: 75.99518941671678 - type: manhattan_ap value: 85.10764940972825 - type: manhattan_f1 value: 77.80804694048618 - type: manhattan_precision value: 70.49553825707233 - type: manhattan_recall value: 86.81318681318682 - type: max_accuracy value: 76.16355983162958 - type: max_ap value: 85.16266644596179 - type: max_f1 value: 77.86752827140549 - task: type: Retrieval dataset: name: MTEB CovidRetrieval type: C-MTEB/CovidRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 73.762 - type: map_at_10 value: 81.76299999999999 - type: map_at_100 value: 81.974 - type: map_at_1000 value: 81.977 - type: map_at_3 value: 80.23400000000001 - type: map_at_5 value: 81.189 - type: mrr_at_1 value: 74.18299999999999 - type: mrr_at_10 value: 81.792 - type: mrr_at_100 value: 81.994 - type: mrr_at_1000 value: 81.997 - type: mrr_at_3 value: 80.277 - type: mrr_at_5 value: 81.221 - type: ndcg_at_1 value: 74.078 - type: ndcg_at_10 value: 85.195 - type: ndcg_at_100 value: 86.041 - type: ndcg_at_1000 value: 86.111 - type: ndcg_at_3 value: 82.171 - type: ndcg_at_5 value: 83.90100000000001 - type: precision_at_1 value: 74.078 - type: precision_at_10 value: 9.684 - type: precision_at_100 value: 1.004 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 29.470000000000002 - type: precision_at_5 value: 18.567 - type: recall_at_1 value: 73.762 - type: recall_at_10 value: 95.785 - type: recall_at_100 value: 99.368 - type: recall_at_1000 value: 99.895 - type: recall_at_3 value: 87.724 - type: recall_at_5 value: 91.93900000000001 - task: type: Retrieval dataset: name: MTEB DuRetrieval type: C-MTEB/DuRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 25.911 - type: map_at_10 value: 80.656 - type: map_at_100 value: 83.446 - type: map_at_1000 value: 83.485 - type: map_at_3 value: 55.998000000000005 - type: map_at_5 value: 70.577 - type: mrr_at_1 value: 90.14999999999999 - type: mrr_at_10 value: 93.35900000000001 - type: mrr_at_100 value: 93.419 - type: mrr_at_1000 value: 93.423 - type: mrr_at_3 value: 93.133 - type: mrr_at_5 value: 93.26100000000001 - type: ndcg_at_1 value: 90.14999999999999 - type: ndcg_at_10 value: 87.806 - type: ndcg_at_100 value: 90.4 - type: ndcg_at_1000 value: 90.776 - type: ndcg_at_3 value: 86.866 - type: ndcg_at_5 value: 85.619 - type: precision_at_1 value: 90.14999999999999 - type: precision_at_10 value: 42.045 - type: precision_at_100 value: 4.814 - type: precision_at_1000 value: 0.49 - type: precision_at_3 value: 78.0 - type: precision_at_5 value: 65.62 - type: recall_at_1 value: 25.911 - type: recall_at_10 value: 88.942 - type: recall_at_100 value: 97.56700000000001 - type: recall_at_1000 value: 99.62 - type: recall_at_3 value: 58.361 - type: recall_at_5 value: 75.126 - task: type: Retrieval dataset: name: MTEB EcomRetrieval type: C-MTEB/EcomRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 46.2 - type: map_at_10 value: 56.309 - type: map_at_100 value: 56.977 - type: map_at_1000 value: 56.995 - type: map_at_3 value: 53.55 - type: map_at_5 value: 55.19 - type: mrr_at_1 value: 46.2 - type: mrr_at_10 value: 56.309 - type: mrr_at_100 value: 56.977 - type: mrr_at_1000 value: 56.995 - type: mrr_at_3 value: 53.55 - type: mrr_at_5 value: 55.19 - type: ndcg_at_1 value: 46.2 - type: ndcg_at_10 value: 61.656 - type: ndcg_at_100 value: 64.714 - type: ndcg_at_1000 value: 65.217 - type: ndcg_at_3 value: 56.022000000000006 - type: ndcg_at_5 value: 58.962 - type: precision_at_1 value: 46.2 - type: precision_at_10 value: 7.86 - type: precision_at_100 value: 0.9249999999999999 - type: precision_at_1000 value: 0.097 - type: precision_at_3 value: 21.067 - type: precision_at_5 value: 14.06 - type: recall_at_1 value: 46.2 - type: recall_at_10 value: 78.60000000000001 - type: recall_at_100 value: 92.5 - type: recall_at_1000 value: 96.5 - type: recall_at_3 value: 63.2 - type: recall_at_5 value: 70.3 - task: type: Classification dataset: name: MTEB IFlyTek type: C-MTEB/IFlyTek-classification config: default split: validation revision: None metrics: - type: accuracy value: 47.03347441323585 - type: f1 value: 35.50895794566714 - task: type: Classification dataset: name: MTEB JDReview type: C-MTEB/JDReview-classification config: default split: test revision: None metrics: - type: accuracy value: 86.73545966228893 - type: ap value: 55.43694740493539 - type: f1 value: 81.47218440859787 - task: type: STS dataset: name: MTEB LCQMC type: C-MTEB/LCQMC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 70.49478085579923 - type: cos_sim_spearman value: 76.28442852235379 - type: euclidean_pearson value: 74.90910715249527 - type: euclidean_spearman value: 76.28443517178847 - type: manhattan_pearson value: 74.90744903779758 - type: manhattan_spearman value: 76.2886829916495 - task: type: Retrieval dataset: name: MTEB MMarcoRetrieval type: C-MTEB/MMarcoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 64.798 - type: map_at_10 value: 74.263 - type: map_at_100 value: 74.59 - type: map_at_1000 value: 74.601 - type: map_at_3 value: 72.382 - type: map_at_5 value: 73.59700000000001 - type: mrr_at_1 value: 67.049 - type: mrr_at_10 value: 74.86500000000001 - type: mrr_at_100 value: 75.155 - type: mrr_at_1000 value: 75.165 - type: mrr_at_3 value: 73.21600000000001 - type: mrr_at_5 value: 74.259 - type: ndcg_at_1 value: 67.049 - type: ndcg_at_10 value: 78.104 - type: ndcg_at_100 value: 79.56400000000001 - type: ndcg_at_1000 value: 79.85600000000001 - type: ndcg_at_3 value: 74.54499999999999 - type: ndcg_at_5 value: 76.587 - type: precision_at_1 value: 67.049 - type: precision_at_10 value: 9.493 - type: precision_at_100 value: 1.022 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 28.189999999999998 - type: precision_at_5 value: 18.003 - type: recall_at_1 value: 64.798 - type: recall_at_10 value: 89.328 - type: recall_at_100 value: 95.916 - type: recall_at_1000 value: 98.223 - type: recall_at_3 value: 79.93599999999999 - type: recall_at_5 value: 84.789 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.01815736381977 - type: f1 value: 61.07806329750582 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.94754539340954 - type: f1 value: 68.76446930296682 - task: type: Retrieval dataset: name: MTEB MedicalRetrieval type: C-MTEB/MedicalRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 50.1 - type: map_at_10 value: 56.406 - type: map_at_100 value: 56.958 - type: map_at_1000 value: 57.007 - type: map_at_3 value: 55.083000000000006 - type: map_at_5 value: 55.952999999999996 - type: mrr_at_1 value: 50.1 - type: mrr_at_10 value: 56.401999999999994 - type: mrr_at_100 value: 56.955 - type: mrr_at_1000 value: 57.004 - type: mrr_at_3 value: 55.05 - type: mrr_at_5 value: 55.95 - type: ndcg_at_1 value: 50.1 - type: ndcg_at_10 value: 59.384 - type: ndcg_at_100 value: 62.339 - type: ndcg_at_1000 value: 63.756 - type: ndcg_at_3 value: 56.657999999999994 - type: ndcg_at_5 value: 58.267 - type: precision_at_1 value: 50.1 - type: precision_at_10 value: 6.87 - type: precision_at_100 value: 0.832 - type: precision_at_1000 value: 0.095 - type: precision_at_3 value: 20.4 - type: precision_at_5 value: 13.04 - type: recall_at_1 value: 50.1 - type: recall_at_10 value: 68.7 - type: recall_at_100 value: 83.2 - type: recall_at_1000 value: 94.6 - type: recall_at_3 value: 61.199999999999996 - type: recall_at_5 value: 65.2 - task: type: Reranking dataset: name: MTEB MMarcoReranking type: C-MTEB/Mmarco-reranking config: default split: dev revision: None metrics: - type: map value: 27.159122893681587 - type: mrr value: 25.659126984126985 - task: type: Classification dataset: name: MTEB MultilingualSentiment type: C-MTEB/MultilingualSentiment-classification config: default split: validation revision: None metrics: - type: accuracy value: 73.02666666666667 - type: f1 value: 72.47691397067602 - task: type: PairClassification dataset: name: MTEB Ocnli type: C-MTEB/OCNLI config: default split: validation revision: None metrics: - type: cos_sim_accuracy value: 67.0817541959935 - type: cos_sim_ap value: 72.29133043915637 - type: cos_sim_f1 value: 72.71207689093188 - type: cos_sim_precision value: 60.16597510373444 - type: cos_sim_recall value: 91.86906019007391 - type: dot_accuracy value: 67.0817541959935 - type: dot_ap value: 72.29133043915637 - type: dot_f1 value: 72.71207689093188 - type: dot_precision value: 60.16597510373444 - type: dot_recall value: 91.86906019007391 - type: euclidean_accuracy value: 67.0817541959935 - type: euclidean_ap value: 72.29133043915637 - type: euclidean_f1 value: 72.71207689093188 - type: euclidean_precision value: 60.16597510373444 - type: euclidean_recall value: 91.86906019007391 - type: manhattan_accuracy value: 66.91932864103953 - type: manhattan_ap value: 72.20070509521395 - type: manhattan_f1 value: 72.52839713925118 - type: manhattan_precision value: 60.27972027972028 - type: manhattan_recall value: 91.02428722280888 - type: max_accuracy value: 67.0817541959935 - type: max_ap value: 72.29133043915637 - type: max_f1 value: 72.71207689093188 - task: type: Classification dataset: name: MTEB OnlineShopping type: C-MTEB/OnlineShopping-classification config: default split: test revision: None metrics: - type: accuracy value: 90.75000000000001 - type: ap value: 87.99706544930007 - type: f1 value: 90.72973221476978 - task: type: STS dataset: name: MTEB PAWSX type: C-MTEB/PAWSX config: default split: test revision: None metrics: - type: cos_sim_pearson value: 33.57372874898899 - type: cos_sim_spearman value: 37.9718472605281 - type: euclidean_pearson value: 38.52264008741102 - type: euclidean_spearman value: 37.97184654854654 - type: manhattan_pearson value: 38.50412571398273 - type: manhattan_spearman value: 37.98038173979437 - task: type: STS dataset: name: MTEB QBQTC type: C-MTEB/QBQTC config: default split: test revision: None metrics: - type: cos_sim_pearson value: 37.510457667606225 - type: cos_sim_spearman value: 37.83522430820119 - type: euclidean_pearson value: 36.65815519443564 - type: euclidean_spearman value: 37.83519816393499 - type: manhattan_pearson value: 36.66835898210608 - type: manhattan_spearman value: 37.85390202705368 - task: type: STS dataset: name: MTEB STS22 (zh) type: mteb/sts22-crosslingual-sts config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 66.9953337569138 - type: cos_sim_spearman value: 67.27632129468024 - type: euclidean_pearson value: 65.83716645437758 - type: euclidean_spearman value: 67.27632129468024 - type: manhattan_pearson value: 65.81209103940279 - type: manhattan_spearman value: 67.26678679870099 - task: type: STS dataset: name: MTEB STSB type: C-MTEB/STSB config: default split: test revision: None metrics: - type: cos_sim_pearson value: 75.73719311549382 - type: cos_sim_spearman value: 75.71173848950517 - type: euclidean_pearson value: 75.23070020894484 - type: euclidean_spearman value: 75.71173839940812 - type: manhattan_pearson value: 75.23517292603057 - type: manhattan_spearman value: 75.74250916645184 - task: type: Reranking dataset: name: MTEB T2Reranking type: C-MTEB/T2Reranking config: default split: dev revision: None metrics: - type: map value: 66.8596523608508 - type: mrr value: 76.9288884590171 - task: type: Retrieval dataset: name: MTEB T2Retrieval type: C-MTEB/T2Retrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 26.618000000000002 - type: map_at_10 value: 74.884 - type: map_at_100 value: 78.65299999999999 - type: map_at_1000 value: 78.724 - type: map_at_3 value: 52.507999999999996 - type: map_at_5 value: 64.52799999999999 - type: mrr_at_1 value: 88.453 - type: mrr_at_10 value: 91.157 - type: mrr_at_100 value: 91.263 - type: mrr_at_1000 value: 91.268 - type: mrr_at_3 value: 90.672 - type: mrr_at_5 value: 90.96499999999999 - type: ndcg_at_1 value: 88.453 - type: ndcg_at_10 value: 82.759 - type: ndcg_at_100 value: 86.709 - type: ndcg_at_1000 value: 87.41499999999999 - type: ndcg_at_3 value: 84.194 - type: ndcg_at_5 value: 82.645 - type: precision_at_1 value: 88.453 - type: precision_at_10 value: 41.369 - type: precision_at_100 value: 4.9910000000000005 - type: precision_at_1000 value: 0.515 - type: precision_at_3 value: 73.79400000000001 - type: precision_at_5 value: 61.80799999999999 - type: recall_at_1 value: 26.618000000000002 - type: recall_at_10 value: 81.772 - type: recall_at_100 value: 94.55 - type: recall_at_1000 value: 98.184 - type: recall_at_3 value: 54.26499999999999 - type: recall_at_5 value: 67.963 - task: type: Classification dataset: name: MTEB TNews type: C-MTEB/TNews-classification config: default split: validation revision: None metrics: - type: accuracy value: 50.690000000000005 - type: f1 value: 48.77079213417325 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringP2P type: C-MTEB/ThuNewsClusteringP2P config: default split: test revision: None metrics: - type: v_measure value: 62.14566804144758 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringS2S type: C-MTEB/ThuNewsClusteringS2S config: default split: test revision: None metrics: - type: v_measure value: 54.66890415410679 - task: type: Retrieval dataset: name: MTEB VideoRetrieval type: C-MTEB/VideoRetrieval config: default split: dev revision: None metrics: - type: map_at_1 value: 55.900000000000006 - type: map_at_10 value: 66.188 - type: map_at_100 value: 66.67699999999999 - type: map_at_1000 value: 66.691 - type: map_at_3 value: 64.017 - type: map_at_5 value: 65.462 - type: mrr_at_1 value: 55.800000000000004 - type: mrr_at_10 value: 66.13799999999999 - type: mrr_at_100 value: 66.62700000000001 - type: mrr_at_1000 value: 66.64099999999999 - type: mrr_at_3 value: 63.967 - type: mrr_at_5 value: 65.412 - type: ndcg_at_1 value: 55.900000000000006 - type: ndcg_at_10 value: 70.961 - type: ndcg_at_100 value: 73.22 - type: ndcg_at_1000 value: 73.583 - type: ndcg_at_3 value: 66.61 - type: ndcg_at_5 value: 69.18900000000001 - type: precision_at_1 value: 55.900000000000006 - type: precision_at_10 value: 8.58 - type: precision_at_100 value: 0.9610000000000001 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 24.7 - type: precision_at_5 value: 16.06 - type: recall_at_1 value: 55.900000000000006 - type: recall_at_10 value: 85.8 - type: recall_at_100 value: 96.1 - type: recall_at_1000 value: 98.9 - type: recall_at_3 value: 74.1 - type: recall_at_5 value: 80.30000000000001 - task: type: Classification dataset: name: MTEB Waimai type: C-MTEB/waimai-classification config: default split: test revision: None metrics: - type: accuracy value: 86.77 - type: ap value: 70.21134107638184 - type: f1 value: 85.22521777795022 --- **新闻 | News** **[2024-04-06]** 开源[puff](https://huggingface.co/infgrad/puff-base-v1)系列模型,**专门针对检索和语义匹配任务,更多的考虑泛化性和私有通用测试集效果,向量维度可变,中英双语**。 **[2024-02-27]** 开源stella-mrl-large-zh-v3.5-1792d模型,支持**向量可变维度**。 **[2024-02-17]** 开源stella v3系列、dialogue编码模型和相关训练数据。 **[2023-10-19]** 开源stella-base-en-v2 使用简单,**不需要任何前缀文本**。 **[2023-10-12]** 开源stella-base-zh-v2和stella-large-zh-v2, 效果更好且使用简单,**不需要任何前缀文本**。 **[2023-09-11]** 开源stella-base-zh和stella-large-zh 欢迎去[本人主页](https://huggingface.co/infgrad)查看最新模型,并提出您的宝贵意见! ## stella model stella是一个通用的文本编码模型,主要有以下模型: | Model Name | Model Size (GB) | Dimension | Sequence Length | Language | Need instruction for retrieval? | |:------------------:|:---------------:|:---------:|:---------------:|:--------:|:-------------------------------:| | stella-base-en-v2 | 0.2 | 768 | 512 | English | No | | stella-large-zh-v2 | 0.65 | 1024 | 1024 | Chinese | No | | stella-base-zh-v2 | 0.2 | 768 | 1024 | Chinese | No | | stella-large-zh | 0.65 | 1024 | 1024 | Chinese | Yes | | stella-base-zh | 0.2 | 768 | 1024 | Chinese | Yes | 完整的训练思路和训练过程已记录在[博客1](https://zhuanlan.zhihu.com/p/655322183)和[博客2](https://zhuanlan.zhihu.com/p/662209559),欢迎阅读讨论。 **训练数据:** 1. 开源数据(wudao_base_200GB[1]、m3e[2]和simclue[3]),着重挑选了长度大于512的文本 2. 在通用语料库上使用LLM构造一批(question, paragraph)和(sentence, paragraph)数据 **训练方法:** 1. 对比学习损失函数 2. 带有难负例的对比学习损失函数(分别基于bm25和vector构造了难负例) 3. EWC(Elastic Weights Consolidation)[4] 4. cosent loss[5] 5. 每一种类型的数据一个迭代器,分别计算loss进行更新 stella-v2在stella模型的基础上,使用了更多的训练数据,同时知识蒸馏等方法去除了前置的instruction( 比如piccolo的`查询:`, `结果:`, e5的`query:`和`passage:`)。 **初始权重:**\ stella-base-zh和stella-large-zh分别以piccolo-base-zh[6]和piccolo-large-zh作为基础模型,512-1024的position embedding使用层次分解位置编码[7]进行初始化。\ 感谢商汤科技研究院开源的[piccolo系列模型](https://huggingface.co/sensenova)。 stella is a general-purpose text encoder, which mainly includes the following models: | Model Name | Model Size (GB) | Dimension | Sequence Length | Language | Need instruction for retrieval? | |:------------------:|:---------------:|:---------:|:---------------:|:--------:|:-------------------------------:| | stella-base-en-v2 | 0.2 | 768 | 512 | English | No | | stella-large-zh-v2 | 0.65 | 1024 | 1024 | Chinese | No | | stella-base-zh-v2 | 0.2 | 768 | 1024 | Chinese | No | | stella-large-zh | 0.65 | 1024 | 1024 | Chinese | Yes | | stella-base-zh | 0.2 | 768 | 1024 | Chinese | Yes | The training data mainly includes: 1. Open-source training data (wudao_base_200GB, m3e, and simclue), with a focus on selecting texts with lengths greater than 512. 2. A batch of (question, paragraph) and (sentence, paragraph) data constructed on a general corpus using LLM. The loss functions mainly include: 1. Contrastive learning loss function 2. Contrastive learning loss function with hard negative examples (based on bm25 and vector hard negatives) 3. EWC (Elastic Weights Consolidation) 4. cosent loss Model weight initialization:\ stella-base-zh and stella-large-zh use piccolo-base-zh and piccolo-large-zh as the base models, respectively, and the 512-1024 position embedding uses the initialization strategy of hierarchical decomposed position encoding. Training strategy:\ One iterator for each type of data, separately calculating the loss. Based on stella models, stella-v2 use more training data and remove instruction by Knowledge Distillation. ## Metric #### C-MTEB leaderboard (Chinese) | Model Name | Model Size (GB) | Dimension | Sequence Length | Average (35) | Classification (9) | Clustering (4) | Pair Classification (2) | Reranking (4) | Retrieval (8) | STS (8) | |:------------------:|:---------------:|:---------:|:---------------:|:------------:|:------------------:|:--------------:|:-----------------------:|:-------------:|:-------------:|:-------:| | stella-large-zh-v2 | 0.65 | 1024 | 1024 | 65.13 | 69.05 | 49.16 | 82.68 | 66.41 | 70.14 | 58.66 | | stella-base-zh-v2 | 0.2 | 768 | 1024 | 64.36 | 68.29 | 49.4 | 79.95 | 66.1 | 70.08 | 56.92 | | stella-large-zh | 0.65 | 1024 | 1024 | 64.54 | 67.62 | 48.65 | 78.72 | 65.98 | 71.02 | 58.3 | | stella-base-zh | 0.2 | 768 | 1024 | 64.16 | 67.77 | 48.7 | 76.09 | 66.95 | 71.07 | 56.54 | #### MTEB leaderboard (English) | Model Name | Model Size (GB) | Dimension | Sequence Length | Average (56) | Classification (12) | Clustering (11) | Pair Classification (3) | Reranking (4) | Retrieval (15) | STS (10) | Summarization (1) | |:-----------------:|:---------------:|:---------:|:---------------:|:------------:|:-------------------:|:---------------:|:-----------------------:|:-------------:|:--------------:|:--------:|:------------------:| | stella-base-en-v2 | 0.2 | 768 | 512 | 62.61 | 75.28 | 44.9 | 86.45 | 58.77 | 50.1 | 83.02 | 32.52 | #### Reproduce our results **C-MTEB:** ```python import torch import numpy as np from typing import List from mteb import MTEB from sentence_transformers import SentenceTransformer class FastTextEncoder(): def __init__(self, model_name): self.model = SentenceTransformer(model_name).cuda().half().eval() self.model.max_seq_length = 512 def encode( self, input_texts: List[str], *args, **kwargs ): new_sens = list(set(input_texts)) new_sens.sort(key=lambda x: len(x), reverse=True) vecs = self.model.encode( new_sens, normalize_embeddings=True, convert_to_numpy=True, batch_size=256 ).astype(np.float32) sen2arrid = {sen: idx for idx, sen in enumerate(new_sens)} vecs = vecs[[sen2arrid[sen] for sen in input_texts]] torch.cuda.empty_cache() return vecs if __name__ == '__main__': model_name = "infgrad/stella-base-zh-v2" output_folder = "zh_mteb_results/stella-base-zh-v2" task_names = [t.description["name"] for t in MTEB(task_langs=['zh', 'zh-CN']).tasks] model = FastTextEncoder(model_name) for task in task_names: MTEB(tasks=[task], task_langs=['zh', 'zh-CN']).run(model, output_folder=output_folder) ``` **MTEB:** You can use official script to reproduce our result. [scripts/run_mteb_english.py](https://github.com/embeddings-benchmark/mteb/blob/main/scripts/run_mteb_english.py) #### Evaluation for long text 经过实际观察发现,C-MTEB的评测数据长度基本都是小于512的, 更致命的是那些长度大于512的文本,其重点都在前半部分 这里以CMRC2018的数据为例说明这个问题: ``` question: 《无双大蛇z》是谁旗下ω-force开发的动作游戏? passage:《无双大蛇z》是光荣旗下ω-force开发的动作游戏,于2009年3月12日登陆索尼playstation3,并于2009年11月27日推...... ``` passage长度为800多,大于512,但是对于这个question而言只需要前面40个字就足以检索,多的内容对于模型而言是一种噪声,反而降低了效果。\ 简言之,现有数据集的2个问题:\ 1)长度大于512的过少\ 2)即便大于512,对于检索而言也只需要前512的文本内容\ 导致**无法准确评估模型的长文本编码能力。** 为了解决这个问题,搜集了相关开源数据并使用规则进行过滤,最终整理了6份长文本测试集,他们分别是: - CMRC2018,通用百科 - CAIL,法律阅读理解 - DRCD,繁体百科,已转简体 - Military,军工问答 - Squad,英文阅读理解,已转中文 - Multifieldqa_zh,清华的大模型长文本理解能力评测数据[9] 处理规则是选取答案在512长度之后的文本,短的测试数据会欠采样一下,长短文本占比约为1:2,所以模型既得理解短文本也得理解长文本。 除了Military数据集,我们提供了其他5个测试数据的下载地址:https://drive.google.com/file/d/1WC6EWaCbVgz-vPMDFH4TwAMkLyh5WNcN/view?usp=sharing 评测指标为Recall@5, 结果如下: | Dataset | piccolo-base-zh | piccolo-large-zh | bge-base-zh | bge-large-zh | stella-base-zh | stella-large-zh | |:---------------:|:---------------:|:----------------:|:-----------:|:------------:|:--------------:|:---------------:| | CMRC2018 | 94.34 | 93.82 | 91.56 | 93.12 | 96.08 | 95.56 | | CAIL | 28.04 | 33.64 | 31.22 | 33.94 | 34.62 | 37.18 | | DRCD | 78.25 | 77.9 | 78.34 | 80.26 | 86.14 | 84.58 | | Military | 76.61 | 73.06 | 75.65 | 75.81 | 83.71 | 80.48 | | Squad | 91.21 | 86.61 | 87.87 | 90.38 | 93.31 | 91.21 | | Multifieldqa_zh | 81.41 | 83.92 | 83.92 | 83.42 | 79.9 | 80.4 | | **Average** | 74.98 | 74.83 | 74.76 | 76.15 | **78.96** | **78.24** | **注意:** 因为长文本评测数据数量稀少,所以构造时也使用了train部分,如果自行评测,请注意模型的训练数据以免数据泄露。 ## Usage #### stella 中文系列模型 stella-base-zh 和 stella-large-zh: 本模型是在piccolo基础上训练的,因此**用法和piccolo完全一致** ,即在检索重排任务上给query和passage加上`查询: `和`结果: `。对于短短匹配不需要做任何操作。 stella-base-zh-v2 和 stella-large-zh-v2: 本模型使用简单,**任何使用场景中都不需要加前缀文本**。 stella中文系列模型均使用mean pooling做为文本向量。 在sentence-transformer库中的使用方法: ```python from sentence_transformers import SentenceTransformer sentences = ["数据1", "数据2"] model = SentenceTransformer('infgrad/stella-base-zh-v2') print(model.max_seq_length) embeddings_1 = model.encode(sentences, normalize_embeddings=True) embeddings_2 = model.encode(sentences, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` 直接使用transformers库: ```python from transformers import AutoModel, AutoTokenizer from sklearn.preprocessing import normalize model = AutoModel.from_pretrained('infgrad/stella-base-zh-v2') tokenizer = AutoTokenizer.from_pretrained('infgrad/stella-base-zh-v2') sentences = ["数据1", "数据ABCDEFGH"] batch_data = tokenizer( batch_text_or_text_pairs=sentences, padding="longest", return_tensors="pt", max_length=1024, truncation=True, ) attention_mask = batch_data["attention_mask"] model_output = model(**batch_data) last_hidden = model_output.last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] vectors = normalize(vectors, norm="l2", axis=1, ) print(vectors.shape) # 2,768 ``` #### stella models for English **Using Sentence-Transformers:** ```python from sentence_transformers import SentenceTransformer sentences = ["one car come", "one car go"] model = SentenceTransformer('infgrad/stella-base-en-v2') print(model.max_seq_length) embeddings_1 = model.encode(sentences, normalize_embeddings=True) embeddings_2 = model.encode(sentences, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` **Using HuggingFace Transformers:** ```python from transformers import AutoModel, AutoTokenizer from sklearn.preprocessing import normalize model = AutoModel.from_pretrained('infgrad/stella-base-en-v2') tokenizer = AutoTokenizer.from_pretrained('infgrad/stella-base-en-v2') sentences = ["one car come", "one car go"] batch_data = tokenizer( batch_text_or_text_pairs=sentences, padding="longest", return_tensors="pt", max_length=512, truncation=True, ) attention_mask = batch_data["attention_mask"] model_output = model(**batch_data) last_hidden = model_output.last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] vectors = normalize(vectors, norm="l2", axis=1, ) print(vectors.shape) # 2,768 ``` ## Training Detail **硬件:** 单卡A100-80GB **环境:** torch1.13.*; transformers-trainer + deepspeed + gradient-checkpointing **学习率:** 1e-6 **batch_size:** base模型为1024,额外增加20%的难负例;large模型为768,额外增加20%的难负例 **数据量:** 第一版模型约100万,其中用LLM构造的数据约有200K. LLM模型大小为13b。v2系列模型到了2000万训练数据。 ## ToDoList **评测的稳定性:** 评测过程中发现Clustering任务会和官方的结果不一致,大约有±0.0x的小差距,原因是聚类代码没有设置random_seed,差距可以忽略不计,不影响评测结论。 **更高质量的长文本训练和测试数据:** 训练数据多是用13b模型构造的,肯定会存在噪声。 测试数据基本都是从mrc数据整理来的,所以问题都是factoid类型,不符合真实分布。 **OOD的性能:** 虽然近期出现了很多向量编码模型,但是对于不是那么通用的domain,这一众模型包括stella、openai和cohere, 它们的效果均比不上BM25。 ## Reference 1. https://www.scidb.cn/en/detail?dataSetId=c6a3fe684227415a9db8e21bac4a15ab 2. https://github.com/wangyuxinwhy/uniem 3. https://github.com/CLUEbenchmark/SimCLUE 4. https://arxiv.org/abs/1612.00796 5. https://kexue.fm/archives/8847 6. https://huggingface.co/sensenova/piccolo-base-zh 7. https://kexue.fm/archives/7947 8. https://github.com/FlagOpen/FlagEmbedding 9. https://github.com/THUDM/LongBench
null
Non_BioNLP
**新闻 | News** **[2024-04-06]** 开源[puff](https://huggingface.co/infgrad/puff-base-v1)系列模型,**专门针对检索和语义匹配任务,更多的考虑泛化性和私有通用测试集效果,向量维度可变,中英双语**。 **[2024-02-27]** 开源stella-mrl-large-zh-v3.5-1792d模型,支持**向量可变维度**。 **[2024-02-17]** 开源stella v3系列、dialogue编码模型和相关训练数据。 **[2023-10-19]** 开源stella-base-en-v2 使用简单,**不需要任何前缀文本**。 **[2023-10-12]** 开源stella-base-zh-v2和stella-large-zh-v2, 效果更好且使用简单,**不需要任何前缀文本**。 **[2023-09-11]** 开源stella-base-zh和stella-large-zh 欢迎去[本人主页](https://huggingface.co/infgrad)查看最新模型,并提出您的宝贵意见! ## stella model stella是一个通用的文本编码模型,主要有以下模型: | Model Name | Model Size (GB) | Dimension | Sequence Length | Language | Need instruction for retrieval? | |:------------------:|:---------------:|:---------:|:---------------:|:--------:|:-------------------------------:| | stella-base-en-v2 | 0.2 | 768 | 512 | English | No | | stella-large-zh-v2 | 0.65 | 1024 | 1024 | Chinese | No | | stella-base-zh-v2 | 0.2 | 768 | 1024 | Chinese | No | | stella-large-zh | 0.65 | 1024 | 1024 | Chinese | Yes | | stella-base-zh | 0.2 | 768 | 1024 | Chinese | Yes | 完整的训练思路和训练过程已记录在[博客1](https://zhuanlan.zhihu.com/p/655322183)和[博客2](https://zhuanlan.zhihu.com/p/662209559),欢迎阅读讨论。 **训练数据:** 1. 开源数据(wudao_base_200GB[1]、m3e[2]和simclue[3]),着重挑选了长度大于512的文本 2. 在通用语料库上使用LLM构造一批(question, paragraph)和(sentence, paragraph)数据 **训练方法:** 1. 对比学习损失函数 2. 带有难负例的对比学习损失函数(分别基于bm25和vector构造了难负例) 3. EWC(Elastic Weights Consolidation)[4] 4. cosent loss[5] 5. 每一种类型的数据一个迭代器,分别计算loss进行更新 stella-v2在stella模型的基础上,使用了更多的训练数据,同时知识蒸馏等方法去除了前置的instruction( 比如piccolo的`查询:`, `结果:`, e5的`query:`和`passage:`)。 **初始权重:**\ stella-base-zh和stella-large-zh分别以piccolo-base-zh[6]和piccolo-large-zh作为基础模型,512-1024的position embedding使用层次分解位置编码[7]进行初始化。\ 感谢商汤科技研究院开源的[piccolo系列模型](https://huggingface.co/sensenova)。 stella is a general-purpose text encoder, which mainly includes the following models: | Model Name | Model Size (GB) | Dimension | Sequence Length | Language | Need instruction for retrieval? | |:------------------:|:---------------:|:---------:|:---------------:|:--------:|:-------------------------------:| | stella-base-en-v2 | 0.2 | 768 | 512 | English | No | | stella-large-zh-v2 | 0.65 | 1024 | 1024 | Chinese | No | | stella-base-zh-v2 | 0.2 | 768 | 1024 | Chinese | No | | stella-large-zh | 0.65 | 1024 | 1024 | Chinese | Yes | | stella-base-zh | 0.2 | 768 | 1024 | Chinese | Yes | The training data mainly includes: 1. Open-source training data (wudao_base_200GB, m3e, and simclue), with a focus on selecting texts with lengths greater than 512. 2. A batch of (question, paragraph) and (sentence, paragraph) data constructed on a general corpus using LLM. The loss functions mainly include: 1. Contrastive learning loss function 2. Contrastive learning loss function with hard negative examples (based on bm25 and vector hard negatives) 3. EWC (Elastic Weights Consolidation) 4. cosent loss Model weight initialization:\ stella-base-zh and stella-large-zh use piccolo-base-zh and piccolo-large-zh as the base models, respectively, and the 512-1024 position embedding uses the initialization strategy of hierarchical decomposed position encoding. Training strategy:\ One iterator for each type of data, separately calculating the loss. Based on stella models, stella-v2 use more training data and remove instruction by Knowledge Distillation. ## Metric #### C-MTEB leaderboard (Chinese) | Model Name | Model Size (GB) | Dimension | Sequence Length | Average (35) | Classification (9) | Clustering (4) | Pair Classification (2) | Reranking (4) | Retrieval (8) | STS (8) | |:------------------:|:---------------:|:---------:|:---------------:|:------------:|:------------------:|:--------------:|:-----------------------:|:-------------:|:-------------:|:-------:| | stella-large-zh-v2 | 0.65 | 1024 | 1024 | 65.13 | 69.05 | 49.16 | 82.68 | 66.41 | 70.14 | 58.66 | | stella-base-zh-v2 | 0.2 | 768 | 1024 | 64.36 | 68.29 | 49.4 | 79.95 | 66.1 | 70.08 | 56.92 | | stella-large-zh | 0.65 | 1024 | 1024 | 64.54 | 67.62 | 48.65 | 78.72 | 65.98 | 71.02 | 58.3 | | stella-base-zh | 0.2 | 768 | 1024 | 64.16 | 67.77 | 48.7 | 76.09 | 66.95 | 71.07 | 56.54 | #### MTEB leaderboard (English) | Model Name | Model Size (GB) | Dimension | Sequence Length | Average (56) | Classification (12) | Clustering (11) | Pair Classification (3) | Reranking (4) | Retrieval (15) | STS (10) | Summarization (1) | |:-----------------:|:---------------:|:---------:|:---------------:|:------------:|:-------------------:|:---------------:|:-----------------------:|:-------------:|:--------------:|:--------:|:------------------:| | stella-base-en-v2 | 0.2 | 768 | 512 | 62.61 | 75.28 | 44.9 | 86.45 | 58.77 | 50.1 | 83.02 | 32.52 | #### Reproduce our results **C-MTEB:** ```python import torch import numpy as np from typing import List from mteb import MTEB from sentence_transformers import SentenceTransformer class FastTextEncoder(): def __init__(self, model_name): self.model = SentenceTransformer(model_name).cuda().half().eval() self.model.max_seq_length = 512 def encode( self, input_texts: List[str], *args, **kwargs ): new_sens = list(set(input_texts)) new_sens.sort(key=lambda x: len(x), reverse=True) vecs = self.model.encode( new_sens, normalize_embeddings=True, convert_to_numpy=True, batch_size=256 ).astype(np.float32) sen2arrid = {sen: idx for idx, sen in enumerate(new_sens)} vecs = vecs[[sen2arrid[sen] for sen in input_texts]] torch.cuda.empty_cache() return vecs if __name__ == '__main__': model_name = "infgrad/stella-base-zh-v2" output_folder = "zh_mteb_results/stella-base-zh-v2" task_names = [t.description["name"] for t in MTEB(task_langs=['zh', 'zh-CN']).tasks] model = FastTextEncoder(model_name) for task in task_names: MTEB(tasks=[task], task_langs=['zh', 'zh-CN']).run(model, output_folder=output_folder) ``` **MTEB:** You can use official script to reproduce our result. [scripts/run_mteb_english.py](https://github.com/embeddings-benchmark/mteb/blob/main/scripts/run_mteb_english.py) #### Evaluation for long text 经过实际观察发现,C-MTEB的评测数据长度基本都是小于512的, 更致命的是那些长度大于512的文本,其重点都在前半部分 这里以CMRC2018的数据为例说明这个问题: ``` question: 《无双大蛇z》是谁旗下ω-force开发的动作游戏? passage:《无双大蛇z》是光荣旗下ω-force开发的动作游戏,于2009年3月12日登陆索尼playstation3,并于2009年11月27日推...... ``` passage长度为800多,大于512,但是对于这个question而言只需要前面40个字就足以检索,多的内容对于模型而言是一种噪声,反而降低了效果。\ 简言之,现有数据集的2个问题:\ 1)长度大于512的过少\ 2)即便大于512,对于检索而言也只需要前512的文本内容\ 导致**无法准确评估模型的长文本编码能力。** 为了解决这个问题,搜集了相关开源数据并使用规则进行过滤,最终整理了6份长文本测试集,他们分别是: - CMRC2018,通用百科 - CAIL,法律阅读理解 - DRCD,繁体百科,已转简体 - Military,军工问答 - Squad,英文阅读理解,已转中文 - Multifieldqa_zh,清华的大模型长文本理解能力评测数据[9] 处理规则是选取答案在512长度之后的文本,短的测试数据会欠采样一下,长短文本占比约为1:2,所以模型既得理解短文本也得理解长文本。 除了Military数据集,我们提供了其他5个测试数据的下载地址:https://drive.google.com/file/d/1WC6EWaCbVgz-vPMDFH4TwAMkLyh5WNcN/view?usp=sharing 评测指标为Recall@5, 结果如下: | Dataset | piccolo-base-zh | piccolo-large-zh | bge-base-zh | bge-large-zh | stella-base-zh | stella-large-zh | |:---------------:|:---------------:|:----------------:|:-----------:|:------------:|:--------------:|:---------------:| | CMRC2018 | 94.34 | 93.82 | 91.56 | 93.12 | 96.08 | 95.56 | | CAIL | 28.04 | 33.64 | 31.22 | 33.94 | 34.62 | 37.18 | | DRCD | 78.25 | 77.9 | 78.34 | 80.26 | 86.14 | 84.58 | | Military | 76.61 | 73.06 | 75.65 | 75.81 | 83.71 | 80.48 | | Squad | 91.21 | 86.61 | 87.87 | 90.38 | 93.31 | 91.21 | | Multifieldqa_zh | 81.41 | 83.92 | 83.92 | 83.42 | 79.9 | 80.4 | | **Average** | 74.98 | 74.83 | 74.76 | 76.15 | **78.96** | **78.24** | **注意:** 因为长文本评测数据数量稀少,所以构造时也使用了train部分,如果自行评测,请注意模型的训练数据以免数据泄露。 ## Usage #### stella 中文系列模型 stella-base-zh 和 stella-large-zh: 本模型是在piccolo基础上训练的,因此**用法和piccolo完全一致** ,即在检索重排任务上给query和passage加上`查询: `和`结果: `。对于短短匹配不需要做任何操作。 stella-base-zh-v2 和 stella-large-zh-v2: 本模型使用简单,**任何使用场景中都不需要加前缀文本**。 stella中文系列模型均使用mean pooling做为文本向量。 在sentence-transformer库中的使用方法: ```python from sentence_transformers import SentenceTransformer sentences = ["数据1", "数据2"] model = SentenceTransformer('infgrad/stella-base-zh-v2') print(model.max_seq_length) embeddings_1 = model.encode(sentences, normalize_embeddings=True) embeddings_2 = model.encode(sentences, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` 直接使用transformers库: ```python from transformers import AutoModel, AutoTokenizer from sklearn.preprocessing import normalize model = AutoModel.from_pretrained('infgrad/stella-base-zh-v2') tokenizer = AutoTokenizer.from_pretrained('infgrad/stella-base-zh-v2') sentences = ["数据1", "数据ABCDEFGH"] batch_data = tokenizer( batch_text_or_text_pairs=sentences, padding="longest", return_tensors="pt", max_length=1024, truncation=True, ) attention_mask = batch_data["attention_mask"] model_output = model(**batch_data) last_hidden = model_output.last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] vectors = normalize(vectors, norm="l2", axis=1, ) print(vectors.shape) # 2,768 ``` #### stella models for English **Using Sentence-Transformers:** ```python from sentence_transformers import SentenceTransformer sentences = ["one car come", "one car go"] model = SentenceTransformer('infgrad/stella-base-en-v2') print(model.max_seq_length) embeddings_1 = model.encode(sentences, normalize_embeddings=True) embeddings_2 = model.encode(sentences, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` **Using HuggingFace Transformers:** ```python from transformers import AutoModel, AutoTokenizer from sklearn.preprocessing import normalize model = AutoModel.from_pretrained('infgrad/stella-base-en-v2') tokenizer = AutoTokenizer.from_pretrained('infgrad/stella-base-en-v2') sentences = ["one car come", "one car go"] batch_data = tokenizer( batch_text_or_text_pairs=sentences, padding="longest", return_tensors="pt", max_length=512, truncation=True, ) attention_mask = batch_data["attention_mask"] model_output = model(**batch_data) last_hidden = model_output.last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] vectors = normalize(vectors, norm="l2", axis=1, ) print(vectors.shape) # 2,768 ``` ## Training Detail **硬件:** 单卡A100-80GB **环境:** torch1.13.*; transformers-trainer + deepspeed + gradient-checkpointing **学习率:** 1e-6 **batch_size:** base模型为1024,额外增加20%的难负例;large模型为768,额外增加20%的难负例 **数据量:** 第一版模型约100万,其中用LLM构造的数据约有200K. LLM模型大小为13b。v2系列模型到了2000万训练数据。 ## ToDoList **评测的稳定性:** 评测过程中发现Clustering任务会和官方的结果不一致,大约有±0.0x的小差距,原因是聚类代码没有设置random_seed,差距可以忽略不计,不影响评测结论。 **更高质量的长文本训练和测试数据:** 训练数据多是用13b模型构造的,肯定会存在噪声。 测试数据基本都是从mrc数据整理来的,所以问题都是factoid类型,不符合真实分布。 **OOD的性能:** 虽然近期出现了很多向量编码模型,但是对于不是那么通用的domain,这一众模型包括stella、openai和cohere, 它们的效果均比不上BM25。 ## Reference 1. https://www.scidb.cn/en/detail?dataSetId=c6a3fe684227415a9db8e21bac4a15ab 2. https://github.com/wangyuxinwhy/uniem 3. https://github.com/CLUEbenchmark/SimCLUE 4. https://arxiv.org/abs/1612.00796 5. https://kexue.fm/archives/8847 6. https://huggingface.co/sensenova/piccolo-base-zh 7. https://kexue.fm/archives/7947 8. https://github.com/FlagOpen/FlagEmbedding 9. https://github.com/THUDM/LongBench
{"tags": ["mteb"], "model-index": [{"name": "stella-large-zh", "results": [{"task": {"type": "STS"}, "dataset": {"name": "MTEB AFQMC", "type": "C-MTEB/AFQMC", "config": "default", "split": "validation", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 51.61327712288466}, {"type": "cos_sim_spearman", "value": 54.48753880097122}, {"type": "euclidean_pearson", "value": 52.68387289931342}, {"type": "euclidean_spearman", "value": 54.48753879487172}, {"type": "manhattan_pearson", "value": 52.635406372350026}, {"type": "manhattan_spearman", "value": 54.447390526317044}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB ATEC", "type": "C-MTEB/ATEC", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 53.39178036427897}, {"type": "cos_sim_spearman", "value": 54.450028472876134}, {"type": "euclidean_pearson", "value": 56.87300033777842}, {"type": "euclidean_spearman", "value": 54.45002622056799}, {"type": "manhattan_pearson", "value": 56.84326996138951}, {"type": "manhattan_spearman", "value": 54.433880144849375}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB AmazonReviewsClassification (zh)", "type": "mteb/amazon_reviews_multi", "config": "zh", "split": "test", "revision": "1399c76144fd37290681b995c656ef9b2e06e26d"}, "metrics": [{"type": "accuracy", "value": 40.574000000000005}, {"type": "f1", "value": 38.87775700245793}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB BQ", "type": "C-MTEB/BQ", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 60.80957921870066}, {"type": "cos_sim_spearman", "value": 62.37707350882237}, {"type": "euclidean_pearson", "value": 61.29032932843765}, {"type": "euclidean_spearman", "value": 62.37707350713817}, {"type": "manhattan_pearson", "value": 61.23028102541801}, {"type": "manhattan_spearman", "value": 62.31280056582247}]}, {"task": {"type": "Clustering"}, "dataset": {"name": "MTEB CLSClusteringP2P", "type": "C-MTEB/CLSClusteringP2P", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "v_measure", "value": 40.27066616318565}]}, {"task": {"type": "Clustering"}, "dataset": {"name": "MTEB CLSClusteringS2S", "type": "C-MTEB/CLSClusteringS2S", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "v_measure", "value": 37.503323644484716}]}, {"task": {"type": "Reranking"}, "dataset": {"name": "MTEB CMedQAv1", "type": "C-MTEB/CMedQAv1-reranking", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "map", "value": 84.69295191328456}, {"type": "mrr", "value": 87.08992063492063}]}, {"task": {"type": "Reranking"}, "dataset": {"name": "MTEB CMedQAv2", "type": "C-MTEB/CMedQAv2-reranking", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "map", "value": 85.22650690364465}, {"type": "mrr", "value": 87.72158730158729}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB CmedqaRetrieval", "type": "C-MTEB/CmedqaRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 23.54}, {"type": "map_at_10", "value": 35.591}, {"type": "map_at_100", "value": 37.549}, {"type": "map_at_1000", "value": 37.663000000000004}, {"type": "map_at_3", "value": 31.405}, {"type": "map_at_5", "value": 33.792}, {"type": "mrr_at_1", "value": 36.359}, {"type": "mrr_at_10", "value": 44.624}, {"type": "mrr_at_100", "value": 45.660000000000004}, {"type": "mrr_at_1000", "value": 45.707}, {"type": "mrr_at_3", "value": 42.002}, {"type": "mrr_at_5", "value": 43.535000000000004}, {"type": "ndcg_at_1", "value": 36.359}, {"type": "ndcg_at_10", "value": 42.28}, {"type": "ndcg_at_100", "value": 49.997}, {"type": "ndcg_at_1000", "value": 51.966}, {"type": "ndcg_at_3", "value": 36.851}, {"type": "ndcg_at_5", "value": 39.249}, {"type": "precision_at_1", "value": 36.359}, {"type": "precision_at_10", "value": 9.542}, {"type": "precision_at_100", "value": 1.582}, {"type": "precision_at_1000", "value": 0.183}, {"type": "precision_at_3", "value": 20.913999999999998}, {"type": "precision_at_5", "value": 15.404000000000002}, {"type": "recall_at_1", "value": 23.54}, {"type": "recall_at_10", "value": 53.005}, {"type": "recall_at_100", "value": 85.085}, {"type": "recall_at_1000", "value": 98.21}, {"type": "recall_at_3", "value": 36.944}, {"type": "recall_at_5", "value": 44.137}]}, {"task": {"type": "PairClassification"}, "dataset": {"name": "MTEB Cmnli", "type": "C-MTEB/CMNLI", "config": "default", "split": "validation", "revision": "None"}, "metrics": [{"type": "cos_sim_accuracy", "value": 76.16355983162958}, {"type": "cos_sim_ap", "value": 85.14228023901842}, {"type": "cos_sim_f1", "value": 77.86752827140549}, {"type": "cos_sim_precision", "value": 72.18450479233228}, {"type": "cos_sim_recall", "value": 84.5218611176058}, {"type": "dot_accuracy", "value": 76.16355983162958}, {"type": "dot_ap", "value": 85.16266644596179}, {"type": "dot_f1", "value": 77.86752827140549}, {"type": "dot_precision", "value": 72.18450479233228}, {"type": "dot_recall", "value": 84.5218611176058}, {"type": "euclidean_accuracy", "value": 76.16355983162958}, {"type": "euclidean_ap", "value": 85.14227717790371}, {"type": "euclidean_f1", "value": 77.86752827140549}, {"type": "euclidean_precision", "value": 72.18450479233228}, {"type": "euclidean_recall", "value": 84.5218611176058}, {"type": "manhattan_accuracy", "value": 75.99518941671678}, {"type": "manhattan_ap", "value": 85.10764940972825}, {"type": "manhattan_f1", "value": 77.80804694048618}, {"type": "manhattan_precision", "value": 70.49553825707233}, {"type": "manhattan_recall", "value": 86.81318681318682}, {"type": "max_accuracy", "value": 76.16355983162958}, {"type": "max_ap", "value": 85.16266644596179}, {"type": "max_f1", "value": 77.86752827140549}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB CovidRetrieval", "type": "C-MTEB/CovidRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 73.762}, {"type": "map_at_10", "value": 81.76299999999999}, {"type": "map_at_100", "value": 81.974}, {"type": "map_at_1000", "value": 81.977}, {"type": "map_at_3", "value": 80.23400000000001}, {"type": "map_at_5", "value": 81.189}, {"type": "mrr_at_1", "value": 74.18299999999999}, {"type": "mrr_at_10", "value": 81.792}, {"type": "mrr_at_100", "value": 81.994}, {"type": "mrr_at_1000", "value": 81.997}, {"type": "mrr_at_3", "value": 80.277}, {"type": "mrr_at_5", "value": 81.221}, {"type": "ndcg_at_1", "value": 74.078}, {"type": "ndcg_at_10", "value": 85.195}, {"type": "ndcg_at_100", "value": 86.041}, {"type": "ndcg_at_1000", "value": 86.111}, {"type": "ndcg_at_3", "value": 82.171}, {"type": "ndcg_at_5", "value": 83.90100000000001}, {"type": "precision_at_1", "value": 74.078}, {"type": "precision_at_10", "value": 9.684}, {"type": "precision_at_100", "value": 1.004}, {"type": "precision_at_1000", "value": 0.101}, {"type": "precision_at_3", "value": 29.470000000000002}, {"type": "precision_at_5", "value": 18.567}, {"type": "recall_at_1", "value": 73.762}, {"type": "recall_at_10", "value": 95.785}, {"type": "recall_at_100", "value": 99.368}, {"type": "recall_at_1000", "value": 99.895}, {"type": "recall_at_3", "value": 87.724}, {"type": "recall_at_5", "value": 91.93900000000001}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB DuRetrieval", "type": "C-MTEB/DuRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 25.911}, {"type": "map_at_10", "value": 80.656}, {"type": "map_at_100", "value": 83.446}, {"type": "map_at_1000", "value": 83.485}, {"type": "map_at_3", "value": 55.998000000000005}, {"type": "map_at_5", "value": 70.577}, {"type": "mrr_at_1", "value": 90.14999999999999}, {"type": "mrr_at_10", "value": 93.35900000000001}, {"type": "mrr_at_100", "value": 93.419}, {"type": "mrr_at_1000", "value": 93.423}, {"type": "mrr_at_3", "value": 93.133}, {"type": "mrr_at_5", "value": 93.26100000000001}, {"type": "ndcg_at_1", "value": 90.14999999999999}, {"type": "ndcg_at_10", "value": 87.806}, {"type": "ndcg_at_100", "value": 90.4}, {"type": "ndcg_at_1000", "value": 90.776}, {"type": "ndcg_at_3", "value": 86.866}, {"type": "ndcg_at_5", "value": 85.619}, {"type": "precision_at_1", "value": 90.14999999999999}, {"type": "precision_at_10", "value": 42.045}, {"type": "precision_at_100", "value": 4.814}, {"type": "precision_at_1000", "value": 0.49}, {"type": "precision_at_3", "value": 78.0}, {"type": "precision_at_5", "value": 65.62}, {"type": "recall_at_1", "value": 25.911}, {"type": "recall_at_10", "value": 88.942}, {"type": "recall_at_100", "value": 97.56700000000001}, {"type": "recall_at_1000", "value": 99.62}, {"type": "recall_at_3", "value": 58.361}, {"type": "recall_at_5", "value": 75.126}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB EcomRetrieval", "type": "C-MTEB/EcomRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 46.2}, {"type": "map_at_10", "value": 56.309}, {"type": "map_at_100", "value": 56.977}, {"type": "map_at_1000", "value": 56.995}, {"type": "map_at_3", "value": 53.55}, {"type": "map_at_5", "value": 55.19}, {"type": "mrr_at_1", "value": 46.2}, {"type": "mrr_at_10", "value": 56.309}, {"type": "mrr_at_100", "value": 56.977}, {"type": "mrr_at_1000", "value": 56.995}, {"type": "mrr_at_3", "value": 53.55}, {"type": "mrr_at_5", "value": 55.19}, {"type": "ndcg_at_1", "value": 46.2}, {"type": "ndcg_at_10", "value": 61.656}, {"type": "ndcg_at_100", "value": 64.714}, {"type": "ndcg_at_1000", "value": 65.217}, {"type": "ndcg_at_3", "value": 56.022000000000006}, {"type": "ndcg_at_5", "value": 58.962}, {"type": "precision_at_1", "value": 46.2}, {"type": "precision_at_10", "value": 7.86}, {"type": "precision_at_100", "value": 0.9249999999999999}, {"type": "precision_at_1000", "value": 0.097}, {"type": "precision_at_3", "value": 21.067}, {"type": "precision_at_5", "value": 14.06}, {"type": "recall_at_1", "value": 46.2}, {"type": "recall_at_10", "value": 78.60000000000001}, {"type": "recall_at_100", "value": 92.5}, {"type": "recall_at_1000", "value": 96.5}, {"type": "recall_at_3", "value": 63.2}, {"type": "recall_at_5", "value": 70.3}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB IFlyTek", "type": "C-MTEB/IFlyTek-classification", "config": "default", "split": "validation", "revision": "None"}, "metrics": [{"type": "accuracy", "value": 47.03347441323585}, {"type": "f1", "value": 35.50895794566714}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB JDReview", "type": "C-MTEB/JDReview-classification", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "accuracy", "value": 86.73545966228893}, {"type": "ap", "value": 55.43694740493539}, {"type": "f1", "value": 81.47218440859787}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB LCQMC", "type": "C-MTEB/LCQMC", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 70.49478085579923}, {"type": "cos_sim_spearman", "value": 76.28442852235379}, {"type": "euclidean_pearson", "value": 74.90910715249527}, {"type": "euclidean_spearman", "value": 76.28443517178847}, {"type": "manhattan_pearson", "value": 74.90744903779758}, {"type": "manhattan_spearman", "value": 76.2886829916495}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB MMarcoRetrieval", "type": "C-MTEB/MMarcoRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 64.798}, {"type": "map_at_10", "value": 74.263}, {"type": "map_at_100", "value": 74.59}, {"type": "map_at_1000", "value": 74.601}, {"type": "map_at_3", "value": 72.382}, {"type": "map_at_5", "value": 73.59700000000001}, {"type": "mrr_at_1", "value": 67.049}, {"type": "mrr_at_10", "value": 74.86500000000001}, {"type": "mrr_at_100", "value": 75.155}, {"type": "mrr_at_1000", "value": 75.165}, {"type": "mrr_at_3", "value": 73.21600000000001}, {"type": "mrr_at_5", "value": 74.259}, {"type": "ndcg_at_1", "value": 67.049}, {"type": "ndcg_at_10", "value": 78.104}, {"type": "ndcg_at_100", "value": 79.56400000000001}, {"type": "ndcg_at_1000", "value": 79.85600000000001}, {"type": "ndcg_at_3", "value": 74.54499999999999}, {"type": "ndcg_at_5", "value": 76.587}, {"type": "precision_at_1", "value": 67.049}, {"type": "precision_at_10", "value": 9.493}, {"type": "precision_at_100", "value": 1.022}, {"type": "precision_at_1000", "value": 0.105}, {"type": "precision_at_3", "value": 28.189999999999998}, {"type": "precision_at_5", "value": 18.003}, {"type": "recall_at_1", "value": 64.798}, {"type": "recall_at_10", "value": 89.328}, {"type": "recall_at_100", "value": 95.916}, {"type": "recall_at_1000", "value": 98.223}, {"type": "recall_at_3", "value": 79.93599999999999}, {"type": "recall_at_5", "value": 84.789}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB MassiveIntentClassification (zh-CN)", "type": "mteb/amazon_massive_intent", "config": "zh-CN", "split": "test", "revision": "31efe3c427b0bae9c22cbb560b8f15491cc6bed7"}, "metrics": [{"type": "accuracy", "value": 64.01815736381977}, {"type": "f1", "value": 61.07806329750582}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB MassiveScenarioClassification (zh-CN)", "type": "mteb/amazon_massive_scenario", "config": "zh-CN", "split": "test", "revision": "7d571f92784cd94a019292a1f45445077d0ef634"}, "metrics": [{"type": "accuracy", "value": 68.94754539340954}, {"type": "f1", "value": 68.76446930296682}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB MedicalRetrieval", "type": "C-MTEB/MedicalRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 50.1}, {"type": "map_at_10", "value": 56.406}, {"type": "map_at_100", "value": 56.958}, {"type": "map_at_1000", "value": 57.007}, {"type": "map_at_3", "value": 55.083000000000006}, {"type": "map_at_5", "value": 55.952999999999996}, {"type": "mrr_at_1", "value": 50.1}, {"type": "mrr_at_10", "value": 56.401999999999994}, {"type": "mrr_at_100", "value": 56.955}, {"type": "mrr_at_1000", "value": 57.004}, {"type": "mrr_at_3", "value": 55.05}, {"type": "mrr_at_5", "value": 55.95}, {"type": "ndcg_at_1", "value": 50.1}, {"type": "ndcg_at_10", "value": 59.384}, {"type": "ndcg_at_100", "value": 62.339}, {"type": "ndcg_at_1000", "value": 63.756}, {"type": "ndcg_at_3", "value": 56.657999999999994}, {"type": "ndcg_at_5", "value": 58.267}, {"type": "precision_at_1", "value": 50.1}, {"type": "precision_at_10", "value": 6.87}, {"type": "precision_at_100", "value": 0.832}, {"type": "precision_at_1000", "value": 0.095}, {"type": "precision_at_3", "value": 20.4}, {"type": "precision_at_5", "value": 13.04}, {"type": "recall_at_1", "value": 50.1}, {"type": "recall_at_10", "value": 68.7}, {"type": "recall_at_100", "value": 83.2}, {"type": "recall_at_1000", "value": 94.6}, {"type": "recall_at_3", "value": 61.199999999999996}, {"type": "recall_at_5", "value": 65.2}]}, {"task": {"type": "Reranking"}, "dataset": {"name": "MTEB MMarcoReranking", "type": "C-MTEB/Mmarco-reranking", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map", "value": 27.159122893681587}, {"type": "mrr", "value": 25.659126984126985}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB MultilingualSentiment", "type": "C-MTEB/MultilingualSentiment-classification", "config": "default", "split": "validation", "revision": "None"}, "metrics": [{"type": "accuracy", "value": 73.02666666666667}, {"type": "f1", "value": 72.47691397067602}]}, {"task": {"type": "PairClassification"}, "dataset": {"name": "MTEB Ocnli", "type": "C-MTEB/OCNLI", "config": "default", "split": "validation", "revision": "None"}, "metrics": [{"type": "cos_sim_accuracy", "value": 67.0817541959935}, {"type": "cos_sim_ap", "value": 72.29133043915637}, {"type": "cos_sim_f1", "value": 72.71207689093188}, {"type": "cos_sim_precision", "value": 60.16597510373444}, {"type": "cos_sim_recall", "value": 91.86906019007391}, {"type": "dot_accuracy", "value": 67.0817541959935}, {"type": "dot_ap", "value": 72.29133043915637}, {"type": "dot_f1", "value": 72.71207689093188}, {"type": "dot_precision", "value": 60.16597510373444}, {"type": "dot_recall", "value": 91.86906019007391}, {"type": "euclidean_accuracy", "value": 67.0817541959935}, {"type": "euclidean_ap", "value": 72.29133043915637}, {"type": "euclidean_f1", "value": 72.71207689093188}, {"type": "euclidean_precision", "value": 60.16597510373444}, {"type": "euclidean_recall", "value": 91.86906019007391}, {"type": "manhattan_accuracy", "value": 66.91932864103953}, {"type": "manhattan_ap", "value": 72.20070509521395}, {"type": "manhattan_f1", "value": 72.52839713925118}, {"type": "manhattan_precision", "value": 60.27972027972028}, {"type": "manhattan_recall", "value": 91.02428722280888}, {"type": "max_accuracy", "value": 67.0817541959935}, {"type": "max_ap", "value": 72.29133043915637}, {"type": "max_f1", "value": 72.71207689093188}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB OnlineShopping", "type": "C-MTEB/OnlineShopping-classification", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "accuracy", "value": 90.75000000000001}, {"type": "ap", "value": 87.99706544930007}, {"type": "f1", "value": 90.72973221476978}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB PAWSX", "type": "C-MTEB/PAWSX", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 33.57372874898899}, {"type": "cos_sim_spearman", "value": 37.9718472605281}, {"type": "euclidean_pearson", "value": 38.52264008741102}, {"type": "euclidean_spearman", "value": 37.97184654854654}, {"type": "manhattan_pearson", "value": 38.50412571398273}, {"type": "manhattan_spearman", "value": 37.98038173979437}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB QBQTC", "type": "C-MTEB/QBQTC", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 37.510457667606225}, {"type": "cos_sim_spearman", "value": 37.83522430820119}, {"type": "euclidean_pearson", "value": 36.65815519443564}, {"type": "euclidean_spearman", "value": 37.83519816393499}, {"type": "manhattan_pearson", "value": 36.66835898210608}, {"type": "manhattan_spearman", "value": 37.85390202705368}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB STS22 (zh)", "type": "mteb/sts22-crosslingual-sts", "config": "zh", "split": "test", "revision": "6d1ba47164174a496b7fa5d3569dae26a6813b80"}, "metrics": [{"type": "cos_sim_pearson", "value": 66.9953337569138}, {"type": "cos_sim_spearman", "value": 67.27632129468024}, {"type": "euclidean_pearson", "value": 65.83716645437758}, {"type": "euclidean_spearman", "value": 67.27632129468024}, {"type": "manhattan_pearson", "value": 65.81209103940279}, {"type": "manhattan_spearman", "value": 67.26678679870099}]}, {"task": {"type": "STS"}, "dataset": {"name": "MTEB STSB", "type": "C-MTEB/STSB", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "cos_sim_pearson", "value": 75.73719311549382}, {"type": "cos_sim_spearman", "value": 75.71173848950517}, {"type": "euclidean_pearson", "value": 75.23070020894484}, {"type": "euclidean_spearman", "value": 75.71173839940812}, {"type": "manhattan_pearson", "value": 75.23517292603057}, {"type": "manhattan_spearman", "value": 75.74250916645184}]}, {"task": {"type": "Reranking"}, "dataset": {"name": "MTEB T2Reranking", "type": "C-MTEB/T2Reranking", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map", "value": 66.8596523608508}, {"type": "mrr", "value": 76.9288884590171}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB T2Retrieval", "type": "C-MTEB/T2Retrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 26.618000000000002}, {"type": "map_at_10", "value": 74.884}, {"type": "map_at_100", "value": 78.65299999999999}, {"type": "map_at_1000", "value": 78.724}, {"type": "map_at_3", "value": 52.507999999999996}, {"type": "map_at_5", "value": 64.52799999999999}, {"type": "mrr_at_1", "value": 88.453}, {"type": "mrr_at_10", "value": 91.157}, {"type": "mrr_at_100", "value": 91.263}, {"type": "mrr_at_1000", "value": 91.268}, {"type": "mrr_at_3", "value": 90.672}, {"type": "mrr_at_5", "value": 90.96499999999999}, {"type": "ndcg_at_1", "value": 88.453}, {"type": "ndcg_at_10", "value": 82.759}, {"type": "ndcg_at_100", "value": 86.709}, {"type": "ndcg_at_1000", "value": 87.41499999999999}, {"type": "ndcg_at_3", "value": 84.194}, {"type": "ndcg_at_5", "value": 82.645}, {"type": "precision_at_1", "value": 88.453}, {"type": "precision_at_10", "value": 41.369}, {"type": "precision_at_100", "value": 4.9910000000000005}, {"type": "precision_at_1000", "value": 0.515}, {"type": "precision_at_3", "value": 73.79400000000001}, {"type": "precision_at_5", "value": 61.80799999999999}, {"type": "recall_at_1", "value": 26.618000000000002}, {"type": "recall_at_10", "value": 81.772}, {"type": "recall_at_100", "value": 94.55}, {"type": "recall_at_1000", "value": 98.184}, {"type": "recall_at_3", "value": 54.26499999999999}, {"type": "recall_at_5", "value": 67.963}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB TNews", "type": "C-MTEB/TNews-classification", "config": "default", "split": "validation", "revision": "None"}, "metrics": [{"type": "accuracy", "value": 50.690000000000005}, {"type": "f1", "value": 48.77079213417325}]}, {"task": {"type": "Clustering"}, "dataset": {"name": "MTEB ThuNewsClusteringP2P", "type": "C-MTEB/ThuNewsClusteringP2P", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "v_measure", "value": 62.14566804144758}]}, {"task": {"type": "Clustering"}, "dataset": {"name": "MTEB ThuNewsClusteringS2S", "type": "C-MTEB/ThuNewsClusteringS2S", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "v_measure", "value": 54.66890415410679}]}, {"task": {"type": "Retrieval"}, "dataset": {"name": "MTEB VideoRetrieval", "type": "C-MTEB/VideoRetrieval", "config": "default", "split": "dev", "revision": "None"}, "metrics": [{"type": "map_at_1", "value": 55.900000000000006}, {"type": "map_at_10", "value": 66.188}, {"type": "map_at_100", "value": 66.67699999999999}, {"type": "map_at_1000", "value": 66.691}, {"type": "map_at_3", "value": 64.017}, {"type": "map_at_5", "value": 65.462}, {"type": "mrr_at_1", "value": 55.800000000000004}, {"type": "mrr_at_10", "value": 66.13799999999999}, {"type": "mrr_at_100", "value": 66.62700000000001}, {"type": "mrr_at_1000", "value": 66.64099999999999}, {"type": "mrr_at_3", "value": 63.967}, {"type": "mrr_at_5", "value": 65.412}, {"type": "ndcg_at_1", "value": 55.900000000000006}, {"type": "ndcg_at_10", "value": 70.961}, {"type": "ndcg_at_100", "value": 73.22}, {"type": "ndcg_at_1000", "value": 73.583}, {"type": "ndcg_at_3", "value": 66.61}, {"type": "ndcg_at_5", "value": 69.18900000000001}, {"type": "precision_at_1", "value": 55.900000000000006}, {"type": "precision_at_10", "value": 8.58}, {"type": "precision_at_100", "value": 0.9610000000000001}, {"type": "precision_at_1000", "value": 0.099}, {"type": "precision_at_3", "value": 24.7}, {"type": "precision_at_5", "value": 16.06}, {"type": "recall_at_1", "value": 55.900000000000006}, {"type": "recall_at_10", "value": 85.8}, {"type": "recall_at_100", "value": 96.1}, {"type": "recall_at_1000", "value": 98.9}, {"type": "recall_at_3", "value": 74.1}, {"type": "recall_at_5", "value": 80.30000000000001}]}, {"task": {"type": "Classification"}, "dataset": {"name": "MTEB Waimai", "type": "C-MTEB/waimai-classification", "config": "default", "split": "test", "revision": "None"}, "metrics": [{"type": "accuracy", "value": 86.77}, {"type": "ap", "value": 70.21134107638184}, {"type": "f1", "value": 85.22521777795022}]}]}]}
task
[ "SUMMARIZATION" ]
45,475
NeoChen1024/dolphin-2.9.3-mistral-nemo-12b-exl2-6.0bpw-h8
NeoChen1024
null
[ "safetensors", "mistral", "generated_from_trainer", "axolotl", "dataset:cognitivecomputations/Dolphin-2.9", "dataset:teknium/OpenHermes-2.5", "dataset:m-a-p/CodeFeedback-Filtered-Instruction", "dataset:cognitivecomputations/dolphin-coder", "dataset:cognitivecomputations/samantha-data", "dataset:microsoft/orca-math-word-problems-200k", "dataset:Locutusque/function-calling-chatml", "dataset:internlm/Agent-FLAN", "base_model:cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b", "base_model:quantized:cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b", "license:apache-2.0", "6-bit", "exl2", "region:us" ]
2024-10-03T04:34:47Z
2024-10-03T05:32:47+00:00
12
0
--- base_model: - cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b datasets: - cognitivecomputations/Dolphin-2.9 - teknium/OpenHermes-2.5 - m-a-p/CodeFeedback-Filtered-Instruction - cognitivecomputations/dolphin-coder - cognitivecomputations/samantha-data - microsoft/orca-math-word-problems-200k - Locutusque/function-calling-chatml - internlm/Agent-FLAN license: apache-2.0 tags: - generated_from_trainer - axolotl --- # Dolphin 2.9.3 Mistral Nemo 12b 🐬 Curated and trained by Eric Hartford and Cognitive Computations [![Discord](https://img.shields.io/discord/1156064224225808488?logo=Discord&logoColor=%23ffffff&label=Discord&link=https%3A%2F%2Fdiscord.gg%2FtCMkMDDHwm)](https://discord.gg/h3K4XGj2RH) Discord: https://discord.gg/h3K4XGj2RH <img src="https://hf.fast360.xyz/production/uploads/63111b2d88942700629f5771/ldkN1J0WIDQwU4vutGYiD.png" width="600" /> Our appreciation for the sponsors of Dolphin 2.9.3: - [Crusoe Cloud](https://crusoe.ai/) - provided excellent on-demand 8xL40S node This model is based on mistralai/Mistral-Nemo-Base-2407, and is governed by the apache 2.0 license. The base model has 128K context, and our finetuning used 8192 sequence length. Dolphin 2.9.3 uses ChatML prompt template format. example: ``` <|im_start|>system You are Dolphin, a helpful AI assistant.<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` Dolphin-2.9.3 has a variety of instruction following, conversational, and coding skills. It also has initial agentic abilities and supports function calling. Dolphin is uncensored. We have filtered the dataset to remove alignment and bias. This makes the model more compliant. You are advised to implement your own alignment layer before exposing the model as a service. It will be highly compliant with any requests, even unethical ones. Please read my blog post about uncensored models. https://erichartford.com/uncensored-models You are responsible for any content you create using this model. Enjoy responsibly. Dolphin is licensed according to apache 2.0 license. We grant permission for any use, including commercial. Dolphin was trained on data generated from GPT4, among other models. ## Evals <details><summary>See evals</summary> ``` | Tasks |Version|Filter|n-shot| Metric | |Value | |Stderr| |-----------------------------------------------------------|-------|------|-----:|-----------------------|---|-----:|---|------| |leaderboard |N/A |none | 0|acc |↑ |0.3437|± |0.0043| | | |none | 0|acc_norm |↑ |0.5076|± |0.0053| | | |none | 0|exact_match |↑ |0.0536|± |0.0061| | | |none | 0|inst_level_loose_acc |↑ |0.4388|± |N/A | | | |none | 0|inst_level_strict_acc |↑ |0.3741|± |N/A | | | |none | 0|prompt_level_loose_acc |↑ |0.3105|± |0.0199| | | |none | 0|prompt_level_strict_acc|↑ |0.2477|± |0.0186| | - leaderboard_bbh |N/A |none | 3|acc_norm |↑ |0.5549|± |0.0061| | - leaderboard_bbh_boolean_expressions | 0|none | 3|acc_norm |↑ |0.8640|± |0.0217| | - leaderboard_bbh_causal_judgement | 0|none | 3|acc_norm |↑ |0.6417|± |0.0352| | - leaderboard_bbh_date_understanding | 0|none | 3|acc_norm |↑ |0.6080|± |0.0309| | - leaderboard_bbh_disambiguation_qa | 0|none | 3|acc_norm |↑ |0.6480|± |0.0303| | - leaderboard_bbh_formal_fallacies | 0|none | 3|acc_norm |↑ |0.5360|± |0.0316| | - leaderboard_bbh_geometric_shapes | 0|none | 3|acc_norm |↑ |0.5240|± |0.0316| | - leaderboard_bbh_hyperbaton | 0|none | 3|acc_norm |↑ |0.6440|± |0.0303| | - leaderboard_bbh_logical_deduction_five_objects | 0|none | 3|acc_norm |↑ |0.4600|± |0.0316| | - leaderboard_bbh_logical_deduction_seven_objects | 0|none | 3|acc_norm |↑ |0.4680|± |0.0316| | - leaderboard_bbh_logical_deduction_three_objects | 0|none | 3|acc_norm |↑ |0.7000|± |0.0290| | - leaderboard_bbh_movie_recommendation | 0|none | 3|acc_norm |↑ |0.8160|± |0.0246| | - leaderboard_bbh_navigate | 0|none | 3|acc_norm |↑ |0.6040|± |0.0310| | - leaderboard_bbh_object_counting | 0|none | 3|acc_norm |↑ |0.3680|± |0.0306| | - leaderboard_bbh_penguins_in_a_table | 0|none | 3|acc_norm |↑ |0.5548|± |0.0413| | - leaderboard_bbh_reasoning_about_colored_objects | 0|none | 3|acc_norm |↑ |0.6320|± |0.0306| | - leaderboard_bbh_ruin_names | 0|none | 3|acc_norm |↑ |0.7440|± |0.0277| | - leaderboard_bbh_salient_translation_error_detection | 0|none | 3|acc_norm |↑ |0.5280|± |0.0316| | - leaderboard_bbh_snarks | 0|none | 3|acc_norm |↑ |0.6292|± |0.0363| | - leaderboard_bbh_sports_understanding | 0|none | 3|acc_norm |↑ |0.8040|± |0.0252| | - leaderboard_bbh_temporal_sequences | 0|none | 3|acc_norm |↑ |0.4680|± |0.0316| | - leaderboard_bbh_tracking_shuffled_objects_five_objects | 0|none | 3|acc_norm |↑ |0.2160|± |0.0261| | - leaderboard_bbh_tracking_shuffled_objects_seven_objects| 0|none | 3|acc_norm |↑ |0.1160|± |0.0203| | - leaderboard_bbh_tracking_shuffled_objects_three_objects| 0|none | 3|acc_norm |↑ |0.3000|± |0.0290| | - leaderboard_bbh_web_of_lies | 0|none | 3|acc_norm |↑ |0.4880|± |0.0317| | - leaderboard_gpqa |N/A |none | 0|acc_norm |↑ |0.3146|± |0.0135| | - leaderboard_gpqa_diamond | 1|none | 0|acc_norm |↑ |0.3182|± |0.0332| | - leaderboard_gpqa_extended | 1|none | 0|acc_norm |↑ |0.3187|± |0.0200| | - leaderboard_gpqa_main | 1|none | 0|acc_norm |↑ |0.3080|± |0.0218| | - leaderboard_ifeval | 2|none | 0|inst_level_loose_acc |↑ |0.4388|± |N/A | | | |none | 0|inst_level_strict_acc |↑ |0.3741|± |N/A | | | |none | 0|prompt_level_loose_acc |↑ |0.3105|± |0.0199| | | |none | 0|prompt_level_strict_acc|↑ |0.2477|± |0.0186| | - leaderboard_math_algebra_hard | 1|none | 4|exact_match |↑ |0.0749|± |0.0150| | - leaderboard_math_counting_and_prob_hard | 1|none | 4|exact_match |↑ |0.0244|± |0.0140| | - leaderboard_math_geometry_hard | 1|none | 4|exact_match |↑ |0.0227|± |0.0130| | - leaderboard_math_hard |N/A |none | 4|exact_match |↑ |0.0536|± |0.0061| | - leaderboard_math_intermediate_algebra_hard | 1|none | 4|exact_match |↑ |0.0250|± |0.0093| | - leaderboard_math_num_theory_hard | 1|none | 4|exact_match |↑ |0.0390|± |0.0156| | - leaderboard_math_prealgebra_hard | 1|none | 4|exact_match |↑ |0.1295|± |0.0242| | - leaderboard_math_precalculus_hard | 1|none | 4|exact_match |↑ |0.0296|± |0.0146| | - leaderboard_mmlu_pro | 0.1|none | 5|acc |↑ |0.3437|± |0.0043| | - leaderboard_musr |N/A |none | 0|acc_norm |↑ |0.4511|± |0.0178| | - leaderboard_musr_murder_mysteries | 1|none | 0|acc_norm |↑ |0.5880|± |0.0312| | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.3438|± |0.0297| | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.4240|± |0.0313| | Groups |Version|Filter|n-shot| Metric | |Value | |Stderr| |------------------------|-------|------|-----:|-----------------------|---|-----:|---|------| |leaderboard |N/A |none | 0|acc |↑ |0.3437|± |0.0043| | | |none | 0|acc_norm |↑ |0.5076|± |0.0053| | | |none | 0|exact_match |↑ |0.0536|± |0.0061| | | |none | 0|inst_level_loose_acc |↑ |0.4388|± |N/A | | | |none | 0|inst_level_strict_acc |↑ |0.3741|± |N/A | | | |none | 0|prompt_level_loose_acc |↑ |0.3105|± |0.0199| | | |none | 0|prompt_level_strict_acc|↑ |0.2477|± |0.0186| | - leaderboard_bbh |N/A |none | 3|acc_norm |↑ |0.5549|± |0.0061| | - leaderboard_gpqa |N/A |none | 0|acc_norm |↑ |0.3146|± |0.0135| | - leaderboard_math_hard|N/A |none | 4|exact_match |↑ |0.0536|± |0.0061| | - leaderboard_musr |N/A |none | 0|acc_norm |↑ |0.4511|± |0.0178| ``` </details><br> ## Training <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml base_model: /workspace/models/Mistral-Nemo-Base-2407 model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false # load_in_4bit: true strict: false datasets: - path: /workspace/datasets/dolphin-2.9.3/dolphin201-sharegpt2.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/SystemChat_filtered_sharegpt.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/SystemChat_multilingual_sharegpt.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/dolphin-coder-translate-sharegpt2.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/dolphin-coder-codegen-sharegpt2.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/m-a-p_Code-Feedback-sharegpt-unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/m-a-p_CodeFeedback-Filtered-Instruction-sharegpt-unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/not_samantha_norefusals.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/Orca-Math-resort-unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/agent_instruct_react_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_instruct_j1s1_3k_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_negative_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_react_10p_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_tflan_cot_30p_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/openhermes200k_unfiltered.jsonl type: sharegpt conversation: chatml chat_template: chatml # adapter: qlora # lora_r: 128 # lora_alpha: 16 # lora_modules_to_save: [embed_tokens, lm_head] # lora_dropout: 0.05 # lora_target_linear: true unfrozen_parameters: - ^lm_head.weight$ - ^model.embed_tokens.weight$ - input_layernorm - model.norm - post_attention_layernorm - self_attn.rotary_emb # mlp.down_proj layers - model.layers.0.mlp.down_proj - model.layers.1.mlp.down_proj - model.layers.4.mlp.down_proj - model.layers.37.mlp.down_proj - model.layers.24.mlp.down_proj - model.layers.2.mlp.down_proj - model.layers.38.mlp.down_proj - model.layers.35.mlp.down_proj - model.layers.25.mlp.down_proj - model.layers.6.mlp.down_proj - model.layers.22.mlp.down_proj - model.layers.23.mlp.down_proj - model.layers.3.mlp.down_proj - model.layers.21.mlp.down_proj - model.layers.5.mlp.down_proj - model.layers.28.mlp.down_proj - model.layers.20.mlp.down_proj - model.layers.26.mlp.down_proj - model.layers.19.mlp.down_proj - model.layers.34.mlp.down_proj # mlp.gate_proj layers - model.layers.2.mlp.gate_proj - model.layers.1.mlp.gate_proj - model.layers.3.mlp.gate_proj - model.layers.5.mlp.gate_proj - model.layers.4.mlp.gate_proj - model.layers.35.mlp.gate_proj - model.layers.36.mlp.gate_proj - model.layers.37.mlp.gate_proj - model.layers.38.mlp.gate_proj - model.layers.34.mlp.gate_proj - model.layers.33.mlp.gate_proj - model.layers.8.mlp.gate_proj - model.layers.32.mlp.gate_proj - model.layers.6.mlp.gate_proj - model.layers.28.mlp.gate_proj - model.layers.26.mlp.gate_proj - model.layers.30.mlp.gate_proj - model.layers.23.mlp.gate_proj - model.layers.29.mlp.gate_proj - model.layers.27.mlp.gate_proj # mlp.up_proj layers - model.layers.3.mlp.up_proj - model.layers.4.mlp.up_proj - model.layers.6.mlp.up_proj - model.layers.2.mlp.up_proj - model.layers.5.mlp.up_proj - model.layers.8.mlp.up_proj - model.layers.10.mlp.up_proj - model.layers.9.mlp.up_proj - model.layers.7.mlp.up_proj - model.layers.0.mlp.up_proj - model.layers.17.mlp.up_proj - model.layers.15.mlp.up_proj - model.layers.22.mlp.up_proj - model.layers.18.mlp.up_proj - model.layers.16.mlp.up_proj - model.layers.11.mlp.up_proj - model.layers.21.mlp.up_proj - model.layers.23.mlp.up_proj - model.layers.20.mlp.up_proj - model.layers.27.mlp.up_proj # self_attn.k_proj layers - model.layers.30.self_attn.k_proj - model.layers.27.self_attn.k_proj - model.layers.25.self_attn.k_proj - model.layers.33.self_attn.k_proj - model.layers.26.self_attn.k_proj - model.layers.31.self_attn.k_proj - model.layers.35.self_attn.k_proj - model.layers.39.self_attn.k_proj - model.layers.22.self_attn.k_proj - model.layers.24.self_attn.k_proj - model.layers.21.self_attn.k_proj - model.layers.28.self_attn.k_proj - model.layers.23.self_attn.k_proj - model.layers.36.self_attn.k_proj - model.layers.20.self_attn.k_proj - model.layers.37.self_attn.k_proj - model.layers.29.self_attn.k_proj - model.layers.32.self_attn.k_proj - model.layers.16.self_attn.k_proj - model.layers.18.self_attn.k_proj # self_attn.o_proj layers - model.layers.7.self_attn.o_proj - model.layers.6.self_attn.o_proj - model.layers.9.self_attn.o_proj - model.layers.5.self_attn.o_proj - model.layers.27.self_attn.o_proj - model.layers.26.self_attn.o_proj - model.layers.4.self_attn.o_proj - model.layers.31.self_attn.o_proj - model.layers.8.self_attn.o_proj - model.layers.16.self_attn.o_proj - model.layers.3.self_attn.o_proj - model.layers.10.self_attn.o_proj - model.layers.18.self_attn.o_proj - model.layers.33.self_attn.o_proj - model.layers.17.self_attn.o_proj - model.layers.32.self_attn.o_proj - model.layers.30.self_attn.o_proj - model.layers.2.self_attn.o_proj - model.layers.15.self_attn.o_proj - model.layers.11.self_attn.o_proj # self_attn.q_proj layers - model.layers.14.self_attn.q_proj - model.layers.11.self_attn.q_proj - model.layers.15.self_attn.q_proj - model.layers.9.self_attn.q_proj - model.layers.8.self_attn.q_proj - model.layers.18.self_attn.q_proj - model.layers.12.self_attn.q_proj - model.layers.13.self_attn.q_proj - model.layers.19.self_attn.q_proj - model.layers.16.self_attn.q_proj - model.layers.10.self_attn.q_proj - model.layers.17.self_attn.q_proj - model.layers.7.self_attn.q_proj - model.layers.5.self_attn.q_proj - model.layers.20.self_attn.q_proj - model.layers.3.self_attn.q_proj - model.layers.26.self_attn.q_proj - model.layers.27.self_attn.q_proj - model.layers.28.self_attn.q_proj - model.layers.33.self_attn.q_proj # self_attn.v_proj layers - model.layers.27.self_attn.v_proj - model.layers.20.self_attn.v_proj - model.layers.24.self_attn.v_proj - model.layers.25.self_attn.v_proj - model.layers.30.self_attn.v_proj - model.layers.2.self_attn.v_proj - model.layers.23.self_attn.v_proj - model.layers.22.self_attn.v_proj - model.layers.26.self_attn.v_proj - model.layers.33.self_attn.v_proj - model.layers.37.self_attn.v_proj - model.layers.7.self_attn.v_proj - model.layers.4.self_attn.v_proj - model.layers.18.self_attn.v_proj - model.layers.31.self_attn.v_proj - model.layers.17.self_attn.v_proj - model.layers.35.self_attn.v_proj - model.layers.32.self_attn.v_proj - model.layers.21.self_attn.v_proj - model.layers.3.self_attn.v_proj dataset_prepared_path: /workspace/axolotl/dolph-2.9.3-nemo-prepared val_set_size: 0.01 output_dir: /workspace/axolotl/dolphin-2.9.3-mistral-nemo sequence_len: 8192 sample_packing: true pad_to_sequence_len: true wandb_project: dolphin-2.9.3-Mistral-nemo wandb_watch: wandb_run_id: wandb_log_model: gradient_accumulation_steps: 16 micro_batch_size: 1 num_epochs: 3 optimizer: adamw_torch lr_scheduler: cosine learning_rate: 5e-6 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false early_stopping_patience: resume_from_checkpoint: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 100 # evals_per_epoch: 4 eval_table_size: saves_per_epoch: 1 save_total_limit: 2 save_steps: debug: deepspeed: deepspeed_configs/zero3_bf16.json weight_decay: 0.1 special_tokens: eos_token: "<|im_end|>" pad_token: "<pad>" bos_token: "<s>" unk_token: "<unk>" tokens: - "<|im_start|>" # fsdp: # - full_shard # - auto_wrap # fsdp_config: # fsdp_limit_all_gathers: true # fsdp_sync_module_states: true # fsdp_offload_params: true # fsdp_use_orig_params: false # fsdp_cpu_ram_efficient_loading: true # fsdp_transformer_layer_cls_to_wrap: MixtralSparseMoeBlock # fsdp_state_dict_type: FULL_STATE_DICT # fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP # fsdp_sharding_strategy: FULL_SHARD # fsdp_forward_prefetch: false # fsdp_backward_prefetch: BACKWARD_PRE ``` </details><br> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/ehartford/dolphin-2.9.3-Mistral-nemo/runs/c23odyoj) # workspace/axolotl/dolphin-2.9.3-mistral-nemo This model was trained from scratch on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5605 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 16 - total_train_batch_size: 128 - total_eval_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 100 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.5691 | 1.0162 | 983 | 0.5734 | | 0.5335 | 2.0174 | 1968 | 0.5609 | | 0.5297 | 2.9639 | 2901 | 0.5605 | ### Framework versions - Transformers 4.43.0.dev0 - Pytorch 2.2.2+cu121 - Datasets 2.19.1 - Tokenizers 0.19.1
null
Non_BioNLP
# Dolphin 2.9.3 Mistral Nemo 12b 🐬 Curated and trained by Eric Hartford and Cognitive Computations [![Discord](https://img.shields.io/discord/1156064224225808488?logo=Discord&logoColor=%23ffffff&label=Discord&link=https%3A%2F%2Fdiscord.gg%2FtCMkMDDHwm)](https://discord.gg/h3K4XGj2RH) Discord: https://discord.gg/h3K4XGj2RH <img src="https://hf.fast360.xyz/production/uploads/63111b2d88942700629f5771/ldkN1J0WIDQwU4vutGYiD.png" width="600" /> Our appreciation for the sponsors of Dolphin 2.9.3: - [Crusoe Cloud](https://crusoe.ai/) - provided excellent on-demand 8xL40S node This model is based on mistralai/Mistral-Nemo-Base-2407, and is governed by the apache 2.0 license. The base model has 128K context, and our finetuning used 8192 sequence length. Dolphin 2.9.3 uses ChatML prompt template format. example: ``` <|im_start|>system You are Dolphin, a helpful AI assistant.<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` Dolphin-2.9.3 has a variety of instruction following, conversational, and coding skills. It also has initial agentic abilities and supports function calling. Dolphin is uncensored. We have filtered the dataset to remove alignment and bias. This makes the model more compliant. You are advised to implement your own alignment layer before exposing the model as a service. It will be highly compliant with any requests, even unethical ones. Please read my blog post about uncensored models. https://erichartford.com/uncensored-models You are responsible for any content you create using this model. Enjoy responsibly. Dolphin is licensed according to apache 2.0 license. We grant permission for any use, including commercial. Dolphin was trained on data generated from GPT4, among other models. ## Evals <details><summary>See evals</summary> ``` | Tasks |Version|Filter|n-shot| Metric | |Value | |Stderr| |-----------------------------------------------------------|-------|------|-----:|-----------------------|---|-----:|---|------| |leaderboard |N/A |none | 0|acc |↑ |0.3437|± |0.0043| | | |none | 0|acc_norm |↑ |0.5076|± |0.0053| | | |none | 0|exact_match |↑ |0.0536|± |0.0061| | | |none | 0|inst_level_loose_acc |↑ |0.4388|± |N/A | | | |none | 0|inst_level_strict_acc |↑ |0.3741|± |N/A | | | |none | 0|prompt_level_loose_acc |↑ |0.3105|± |0.0199| | | |none | 0|prompt_level_strict_acc|↑ |0.2477|± |0.0186| | - leaderboard_bbh |N/A |none | 3|acc_norm |↑ |0.5549|± |0.0061| | - leaderboard_bbh_boolean_expressions | 0|none | 3|acc_norm |↑ |0.8640|± |0.0217| | - leaderboard_bbh_causal_judgement | 0|none | 3|acc_norm |↑ |0.6417|± |0.0352| | - leaderboard_bbh_date_understanding | 0|none | 3|acc_norm |↑ |0.6080|± |0.0309| | - leaderboard_bbh_disambiguation_qa | 0|none | 3|acc_norm |↑ |0.6480|± |0.0303| | - leaderboard_bbh_formal_fallacies | 0|none | 3|acc_norm |↑ |0.5360|± |0.0316| | - leaderboard_bbh_geometric_shapes | 0|none | 3|acc_norm |↑ |0.5240|± |0.0316| | - leaderboard_bbh_hyperbaton | 0|none | 3|acc_norm |↑ |0.6440|± |0.0303| | - leaderboard_bbh_logical_deduction_five_objects | 0|none | 3|acc_norm |↑ |0.4600|± |0.0316| | - leaderboard_bbh_logical_deduction_seven_objects | 0|none | 3|acc_norm |↑ |0.4680|± |0.0316| | - leaderboard_bbh_logical_deduction_three_objects | 0|none | 3|acc_norm |↑ |0.7000|± |0.0290| | - leaderboard_bbh_movie_recommendation | 0|none | 3|acc_norm |↑ |0.8160|± |0.0246| | - leaderboard_bbh_navigate | 0|none | 3|acc_norm |↑ |0.6040|± |0.0310| | - leaderboard_bbh_object_counting | 0|none | 3|acc_norm |↑ |0.3680|± |0.0306| | - leaderboard_bbh_penguins_in_a_table | 0|none | 3|acc_norm |↑ |0.5548|± |0.0413| | - leaderboard_bbh_reasoning_about_colored_objects | 0|none | 3|acc_norm |↑ |0.6320|± |0.0306| | - leaderboard_bbh_ruin_names | 0|none | 3|acc_norm |↑ |0.7440|± |0.0277| | - leaderboard_bbh_salient_translation_error_detection | 0|none | 3|acc_norm |↑ |0.5280|± |0.0316| | - leaderboard_bbh_snarks | 0|none | 3|acc_norm |↑ |0.6292|± |0.0363| | - leaderboard_bbh_sports_understanding | 0|none | 3|acc_norm |↑ |0.8040|± |0.0252| | - leaderboard_bbh_temporal_sequences | 0|none | 3|acc_norm |↑ |0.4680|± |0.0316| | - leaderboard_bbh_tracking_shuffled_objects_five_objects | 0|none | 3|acc_norm |↑ |0.2160|± |0.0261| | - leaderboard_bbh_tracking_shuffled_objects_seven_objects| 0|none | 3|acc_norm |↑ |0.1160|± |0.0203| | - leaderboard_bbh_tracking_shuffled_objects_three_objects| 0|none | 3|acc_norm |↑ |0.3000|± |0.0290| | - leaderboard_bbh_web_of_lies | 0|none | 3|acc_norm |↑ |0.4880|± |0.0317| | - leaderboard_gpqa |N/A |none | 0|acc_norm |↑ |0.3146|± |0.0135| | - leaderboard_gpqa_diamond | 1|none | 0|acc_norm |↑ |0.3182|± |0.0332| | - leaderboard_gpqa_extended | 1|none | 0|acc_norm |↑ |0.3187|± |0.0200| | - leaderboard_gpqa_main | 1|none | 0|acc_norm |↑ |0.3080|± |0.0218| | - leaderboard_ifeval | 2|none | 0|inst_level_loose_acc |↑ |0.4388|± |N/A | | | |none | 0|inst_level_strict_acc |↑ |0.3741|± |N/A | | | |none | 0|prompt_level_loose_acc |↑ |0.3105|± |0.0199| | | |none | 0|prompt_level_strict_acc|↑ |0.2477|± |0.0186| | - leaderboard_math_algebra_hard | 1|none | 4|exact_match |↑ |0.0749|± |0.0150| | - leaderboard_math_counting_and_prob_hard | 1|none | 4|exact_match |↑ |0.0244|± |0.0140| | - leaderboard_math_geometry_hard | 1|none | 4|exact_match |↑ |0.0227|± |0.0130| | - leaderboard_math_hard |N/A |none | 4|exact_match |↑ |0.0536|± |0.0061| | - leaderboard_math_intermediate_algebra_hard | 1|none | 4|exact_match |↑ |0.0250|± |0.0093| | - leaderboard_math_num_theory_hard | 1|none | 4|exact_match |↑ |0.0390|± |0.0156| | - leaderboard_math_prealgebra_hard | 1|none | 4|exact_match |↑ |0.1295|± |0.0242| | - leaderboard_math_precalculus_hard | 1|none | 4|exact_match |↑ |0.0296|± |0.0146| | - leaderboard_mmlu_pro | 0.1|none | 5|acc |↑ |0.3437|± |0.0043| | - leaderboard_musr |N/A |none | 0|acc_norm |↑ |0.4511|± |0.0178| | - leaderboard_musr_murder_mysteries | 1|none | 0|acc_norm |↑ |0.5880|± |0.0312| | - leaderboard_musr_object_placements | 1|none | 0|acc_norm |↑ |0.3438|± |0.0297| | - leaderboard_musr_team_allocation | 1|none | 0|acc_norm |↑ |0.4240|± |0.0313| | Groups |Version|Filter|n-shot| Metric | |Value | |Stderr| |------------------------|-------|------|-----:|-----------------------|---|-----:|---|------| |leaderboard |N/A |none | 0|acc |↑ |0.3437|± |0.0043| | | |none | 0|acc_norm |↑ |0.5076|± |0.0053| | | |none | 0|exact_match |↑ |0.0536|± |0.0061| | | |none | 0|inst_level_loose_acc |↑ |0.4388|± |N/A | | | |none | 0|inst_level_strict_acc |↑ |0.3741|± |N/A | | | |none | 0|prompt_level_loose_acc |↑ |0.3105|± |0.0199| | | |none | 0|prompt_level_strict_acc|↑ |0.2477|± |0.0186| | - leaderboard_bbh |N/A |none | 3|acc_norm |↑ |0.5549|± |0.0061| | - leaderboard_gpqa |N/A |none | 0|acc_norm |↑ |0.3146|± |0.0135| | - leaderboard_math_hard|N/A |none | 4|exact_match |↑ |0.0536|± |0.0061| | - leaderboard_musr |N/A |none | 0|acc_norm |↑ |0.4511|± |0.0178| ``` </details><br> ## Training <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.4.1` ```yaml base_model: /workspace/models/Mistral-Nemo-Base-2407 model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false # load_in_4bit: true strict: false datasets: - path: /workspace/datasets/dolphin-2.9.3/dolphin201-sharegpt2.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/SystemChat_filtered_sharegpt.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/SystemChat_multilingual_sharegpt.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/dolphin-coder-translate-sharegpt2.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/dolphin-coder-codegen-sharegpt2.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/m-a-p_Code-Feedback-sharegpt-unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/m-a-p_CodeFeedback-Filtered-Instruction-sharegpt-unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/not_samantha_norefusals.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/Orca-Math-resort-unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/agent_instruct_react_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_instruct_j1s1_3k_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_negative_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_react_10p_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/toolbench_tflan_cot_30p_unfiltered.jsonl type: sharegpt conversation: chatml - path: /workspace/datasets/dolphin-2.9.3/openhermes200k_unfiltered.jsonl type: sharegpt conversation: chatml chat_template: chatml # adapter: qlora # lora_r: 128 # lora_alpha: 16 # lora_modules_to_save: [embed_tokens, lm_head] # lora_dropout: 0.05 # lora_target_linear: true unfrozen_parameters: - ^lm_head.weight$ - ^model.embed_tokens.weight$ - input_layernorm - model.norm - post_attention_layernorm - self_attn.rotary_emb # mlp.down_proj layers - model.layers.0.mlp.down_proj - model.layers.1.mlp.down_proj - model.layers.4.mlp.down_proj - model.layers.37.mlp.down_proj - model.layers.24.mlp.down_proj - model.layers.2.mlp.down_proj - model.layers.38.mlp.down_proj - model.layers.35.mlp.down_proj - model.layers.25.mlp.down_proj - model.layers.6.mlp.down_proj - model.layers.22.mlp.down_proj - model.layers.23.mlp.down_proj - model.layers.3.mlp.down_proj - model.layers.21.mlp.down_proj - model.layers.5.mlp.down_proj - model.layers.28.mlp.down_proj - model.layers.20.mlp.down_proj - model.layers.26.mlp.down_proj - model.layers.19.mlp.down_proj - model.layers.34.mlp.down_proj # mlp.gate_proj layers - model.layers.2.mlp.gate_proj - model.layers.1.mlp.gate_proj - model.layers.3.mlp.gate_proj - model.layers.5.mlp.gate_proj - model.layers.4.mlp.gate_proj - model.layers.35.mlp.gate_proj - model.layers.36.mlp.gate_proj - model.layers.37.mlp.gate_proj - model.layers.38.mlp.gate_proj - model.layers.34.mlp.gate_proj - model.layers.33.mlp.gate_proj - model.layers.8.mlp.gate_proj - model.layers.32.mlp.gate_proj - model.layers.6.mlp.gate_proj - model.layers.28.mlp.gate_proj - model.layers.26.mlp.gate_proj - model.layers.30.mlp.gate_proj - model.layers.23.mlp.gate_proj - model.layers.29.mlp.gate_proj - model.layers.27.mlp.gate_proj # mlp.up_proj layers - model.layers.3.mlp.up_proj - model.layers.4.mlp.up_proj - model.layers.6.mlp.up_proj - model.layers.2.mlp.up_proj - model.layers.5.mlp.up_proj - model.layers.8.mlp.up_proj - model.layers.10.mlp.up_proj - model.layers.9.mlp.up_proj - model.layers.7.mlp.up_proj - model.layers.0.mlp.up_proj - model.layers.17.mlp.up_proj - model.layers.15.mlp.up_proj - model.layers.22.mlp.up_proj - model.layers.18.mlp.up_proj - model.layers.16.mlp.up_proj - model.layers.11.mlp.up_proj - model.layers.21.mlp.up_proj - model.layers.23.mlp.up_proj - model.layers.20.mlp.up_proj - model.layers.27.mlp.up_proj # self_attn.k_proj layers - model.layers.30.self_attn.k_proj - model.layers.27.self_attn.k_proj - model.layers.25.self_attn.k_proj - model.layers.33.self_attn.k_proj - model.layers.26.self_attn.k_proj - model.layers.31.self_attn.k_proj - model.layers.35.self_attn.k_proj - model.layers.39.self_attn.k_proj - model.layers.22.self_attn.k_proj - model.layers.24.self_attn.k_proj - model.layers.21.self_attn.k_proj - model.layers.28.self_attn.k_proj - model.layers.23.self_attn.k_proj - model.layers.36.self_attn.k_proj - model.layers.20.self_attn.k_proj - model.layers.37.self_attn.k_proj - model.layers.29.self_attn.k_proj - model.layers.32.self_attn.k_proj - model.layers.16.self_attn.k_proj - model.layers.18.self_attn.k_proj # self_attn.o_proj layers - model.layers.7.self_attn.o_proj - model.layers.6.self_attn.o_proj - model.layers.9.self_attn.o_proj - model.layers.5.self_attn.o_proj - model.layers.27.self_attn.o_proj - model.layers.26.self_attn.o_proj - model.layers.4.self_attn.o_proj - model.layers.31.self_attn.o_proj - model.layers.8.self_attn.o_proj - model.layers.16.self_attn.o_proj - model.layers.3.self_attn.o_proj - model.layers.10.self_attn.o_proj - model.layers.18.self_attn.o_proj - model.layers.33.self_attn.o_proj - model.layers.17.self_attn.o_proj - model.layers.32.self_attn.o_proj - model.layers.30.self_attn.o_proj - model.layers.2.self_attn.o_proj - model.layers.15.self_attn.o_proj - model.layers.11.self_attn.o_proj # self_attn.q_proj layers - model.layers.14.self_attn.q_proj - model.layers.11.self_attn.q_proj - model.layers.15.self_attn.q_proj - model.layers.9.self_attn.q_proj - model.layers.8.self_attn.q_proj - model.layers.18.self_attn.q_proj - model.layers.12.self_attn.q_proj - model.layers.13.self_attn.q_proj - model.layers.19.self_attn.q_proj - model.layers.16.self_attn.q_proj - model.layers.10.self_attn.q_proj - model.layers.17.self_attn.q_proj - model.layers.7.self_attn.q_proj - model.layers.5.self_attn.q_proj - model.layers.20.self_attn.q_proj - model.layers.3.self_attn.q_proj - model.layers.26.self_attn.q_proj - model.layers.27.self_attn.q_proj - model.layers.28.self_attn.q_proj - model.layers.33.self_attn.q_proj # self_attn.v_proj layers - model.layers.27.self_attn.v_proj - model.layers.20.self_attn.v_proj - model.layers.24.self_attn.v_proj - model.layers.25.self_attn.v_proj - model.layers.30.self_attn.v_proj - model.layers.2.self_attn.v_proj - model.layers.23.self_attn.v_proj - model.layers.22.self_attn.v_proj - model.layers.26.self_attn.v_proj - model.layers.33.self_attn.v_proj - model.layers.37.self_attn.v_proj - model.layers.7.self_attn.v_proj - model.layers.4.self_attn.v_proj - model.layers.18.self_attn.v_proj - model.layers.31.self_attn.v_proj - model.layers.17.self_attn.v_proj - model.layers.35.self_attn.v_proj - model.layers.32.self_attn.v_proj - model.layers.21.self_attn.v_proj - model.layers.3.self_attn.v_proj dataset_prepared_path: /workspace/axolotl/dolph-2.9.3-nemo-prepared val_set_size: 0.01 output_dir: /workspace/axolotl/dolphin-2.9.3-mistral-nemo sequence_len: 8192 sample_packing: true pad_to_sequence_len: true wandb_project: dolphin-2.9.3-Mistral-nemo wandb_watch: wandb_run_id: wandb_log_model: gradient_accumulation_steps: 16 micro_batch_size: 1 num_epochs: 3 optimizer: adamw_torch lr_scheduler: cosine learning_rate: 5e-6 train_on_inputs: false group_by_length: false bf16: auto fp16: tf32: gradient_checkpointing: true gradient_checkpointing_kwargs: use_reentrant: false early_stopping_patience: resume_from_checkpoint: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 100 # evals_per_epoch: 4 eval_table_size: saves_per_epoch: 1 save_total_limit: 2 save_steps: debug: deepspeed: deepspeed_configs/zero3_bf16.json weight_decay: 0.1 special_tokens: eos_token: "<|im_end|>" pad_token: "<pad>" bos_token: "<s>" unk_token: "<unk>" tokens: - "<|im_start|>" # fsdp: # - full_shard # - auto_wrap # fsdp_config: # fsdp_limit_all_gathers: true # fsdp_sync_module_states: true # fsdp_offload_params: true # fsdp_use_orig_params: false # fsdp_cpu_ram_efficient_loading: true # fsdp_transformer_layer_cls_to_wrap: MixtralSparseMoeBlock # fsdp_state_dict_type: FULL_STATE_DICT # fsdp_auto_wrap_policy: TRANSFORMER_BASED_WRAP # fsdp_sharding_strategy: FULL_SHARD # fsdp_forward_prefetch: false # fsdp_backward_prefetch: BACKWARD_PRE ``` </details><br> [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="200" height="32"/>](https://wandb.ai/ehartford/dolphin-2.9.3-Mistral-nemo/runs/c23odyoj) # workspace/axolotl/dolphin-2.9.3-mistral-nemo This model was trained from scratch on the None dataset. It achieves the following results on the evaluation set: - Loss: 0.5605 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 8 - gradient_accumulation_steps: 16 - total_train_batch_size: 128 - total_eval_batch_size: 8 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 100 - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.5691 | 1.0162 | 983 | 0.5734 | | 0.5335 | 2.0174 | 1968 | 0.5609 | | 0.5297 | 2.9639 | 2901 | 0.5605 | ### Framework versions - Transformers 4.43.0.dev0 - Pytorch 2.2.2+cu121 - Datasets 2.19.1 - Tokenizers 0.19.1
{"base_model": ["cognitivecomputations/dolphin-2.9.3-mistral-nemo-12b"], "datasets": ["cognitivecomputations/Dolphin-2.9", "teknium/OpenHermes-2.5", "m-a-p/CodeFeedback-Filtered-Instruction", "cognitivecomputations/dolphin-coder", "cognitivecomputations/samantha-data", "microsoft/orca-math-word-problems-200k", "Locutusque/function-calling-chatml", "internlm/Agent-FLAN"], "license": "apache-2.0", "tags": ["generated_from_trainer", "axolotl"]}
task
[ "TRANSLATION" ]
45,476
feserrm/mbart-de-es-translation
feserrm
text2text-generation
[ "transformers", "safetensors", "mbart", "text2text-generation", "generated_from_trainer", "base_model:facebook/mbart-large-50", "base_model:finetune:facebook/mbart-large-50", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-03-07T11:44:04Z
2025-03-07T11:45:41+00:00
12
0
--- base_model: facebook/mbart-large-50 library_name: transformers license: mit tags: - generated_from_trainer model-index: - name: mbart-de-es-translation results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-de-es-translation This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.49.0 - Pytorch 2.5.1+cu124 - Datasets 3.3.2 - Tokenizers 0.21.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mbart-de-es-translation This model is a fine-tuned version of [facebook/mbart-large-50](https://huggingface.co/facebook/mbart-large-50) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.49.0 - Pytorch 2.5.1+cu124 - Datasets 3.3.2 - Tokenizers 0.21.0
{"base_model": "facebook/mbart-large-50", "library_name": "transformers", "license": "mit", "tags": ["generated_from_trainer"], "model-index": [{"name": "mbart-de-es-translation", "results": []}]}
task
[ "TRANSLATION" ]
45,477
L-macc/autotrain-Biomedical_sc_summ-1217846148
L-macc
summarization
[ "transformers", "pytorch", "longt5", "text2text-generation", "autotrain", "summarization", "unk", "dataset:L-macc/autotrain-data-Biomedical_sc_summ", "model-index", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-08-04T07:45:21Z
2022-08-06T12:54:46+00:00
22
0
--- datasets: - L-macc/autotrain-data-Biomedical_sc_summ language: - unk tags: - autotrain - summarization widget: - text: I love AutoTrain 🤗 co2_eq_emissions: emissions: 13.651986586580765 model-index: - name: L-macc/autotrain-Biomedical_sc_summ-1217846148 results: - task: type: summarization name: Summarization dataset: name: Blaise-g/SumPubmed type: Blaise-g/SumPubmed config: Blaise-g--SumPubmed split: test metrics: - type: rouge value: 40.1043 name: ROUGE-1 verified: true - type: rouge value: 13.0457 name: ROUGE-2 verified: true - type: rouge value: 22.2288 name: ROUGE-L verified: true - type: rouge value: 35.9295 name: ROUGE-LSUM verified: true - type: loss value: 2.090137004852295 name: loss verified: true - type: gen_len value: 139.7369 name: gen_len verified: true --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1217846148 - CO2 Emissions (in grams): 13.6520 ## Validation Metrics - Loss: 2.503 - Rouge1: 38.768 - Rouge2: 10.791 - RougeL: 21.946 - RougeLsum: 33.780 - Gen Len: 123.331 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/L-macc/autotrain-Biomedical_sc_summ-1217846148 ```
null
BioNLP
# Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1217846148 - CO2 Emissions (in grams): 13.6520 ## Validation Metrics - Loss: 2.503 - Rouge1: 38.768 - Rouge2: 10.791 - RougeL: 21.946 - RougeLsum: 33.780 - Gen Len: 123.331 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/L-macc/autotrain-Biomedical_sc_summ-1217846148 ```
{"datasets": ["L-macc/autotrain-data-Biomedical_sc_summ"], "language": ["unk"], "tags": ["autotrain", "summarization"], "widget": [{"text": "I love AutoTrain 🤗"}], "co2_eq_emissions": {"emissions": 13.651986586580765}, "model-index": [{"name": "L-macc/autotrain-Biomedical_sc_summ-1217846148", "results": [{"task": {"type": "summarization", "name": "Summarization"}, "dataset": {"name": "Blaise-g/SumPubmed", "type": "Blaise-g/SumPubmed", "config": "Blaise-g--SumPubmed", "split": "test"}, "metrics": [{"type": "rouge", "value": 40.1043, "name": "ROUGE-1", "verified": true}, {"type": "rouge", "value": 13.0457, "name": "ROUGE-2", "verified": true}, {"type": "rouge", "value": 22.2288, "name": "ROUGE-L", "verified": true}, {"type": "rouge", "value": 35.9295, "name": "ROUGE-LSUM", "verified": true}, {"type": "loss", "value": 2.090137004852295, "name": "loss", "verified": true}, {"type": "gen_len", "value": 139.7369, "name": "gen_len", "verified": true}]}]}]}
task
[ "SUMMARIZATION" ]
45,478
meandyou200175/phobert-finetune
meandyou200175
sentence-similarity
[ "sentence-transformers", "safetensors", "roberta", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:43804", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:vinai/phobert-base-v2", "base_model:finetune:vinai/phobert-base-v2", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-10-21T14:15:05Z
2024-10-21T14:15:25+00:00
8
0
--- base_model: vinai/phobert-base-v2 library_name: sentence-transformers metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 - dot_accuracy@1 - dot_accuracy@3 - dot_accuracy@5 - dot_accuracy@10 - dot_precision@1 - dot_precision@3 - dot_precision@5 - dot_precision@10 - dot_recall@1 - dot_recall@3 - dot_recall@5 - dot_recall@10 - dot_ndcg@10 - dot_mrr@10 - dot_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:43804 - loss:MultipleNegativesRankingLoss widget: - source_sentence: Nhờ bác sĩ cho biết việc lựa chọn đóng đinh nội tủy và nẹp vít để kết hợp xương đòn dựa trên cơ sở nào ạ? Ca phẫu thuật thường kéo dài trong bao lâu? Bệnh nhân nằm viện mấy ngày? sentences: - ' Chào em, là bệnh mãn tính phải điều trị suốt đời, phải kiên nhẫn và kiên trì nên đôi khi lượng đường trong cơ thể không ổn định. Lúc đi khám xét nghiệm thì ổn do bản thân biết mai đi khám nên sẽ kiêng ăn, ăn ít... còn bệnh lâu dài nên trong ngày đôi khi thèm chút này hay thích ăn chút kia, quên uống thuốc, suy nghĩ, mất ngủ cũng làm đường không ổn định. Đường trong cơ thể lúc lên lúc xuống dễ đưa đến biến chứng. Em hay thấy bệnh nhân tiểu đường tháo khớp ngón chân, ngón tay, đôi khi tháo khớp gối, khớp háng, đây là do tê liệt hệ thần kinh nên khi va chạm bệnh nhân không phát hiện. Đến khi phát hiện thì đã nhiễm trùng nặng phải tháo khớp. Theo BS mẹ em có khả năng do biến chứng tiểu đường vì mẹ em bị bệnh khá lâu nên ít nhiều ảnh hưởng thần kinh bị tê liệt gây đau. Em nên nhớ dặn mẹ đi tái khám và điều trị cho thật ổn định nhé! Thân mến!' - ' Để lựa chọn phương pháp đóng đinh nội tủy hay nẹp vít cho bệnh nhân cần dựa vào nhiều yếu tố. Trong lòng tủy xương có một cái ống, nếu lòng tủy bệnh nhân nhỏ mà đường gãy không bị gãy thành nhiều mảnh thì nên lựa chọn phương pháp đóng đinh. Phương pháp này có nhược điểm dễ bị lộ phần đinh khi đinh vừa đóng, chưa chắc vào xương. Tuy nhiên, ưu điểm là khi đóng đinh, đường mổ sẽ nhỏ, đơn giản. Đối với nẹp vít, đường mổ dài hơn nhưng phần nắn chỉnh sẽ tuyệt đối, vững chắc hơn. Nhìn chung, giữa 2 phương pháp thời gian mổ không khác biệt nhau nhiều, từ 30-45 phút sẽ hoàn thành cuộc phẫu thuật kết hợp xương. Tại bệnh viện Nhân dân 115, sau khi bệnh nhân được làm phẫu thuật có thể xuất viện rất sớm trong vòng khoảng 3-5 ngày, tùy theo đường mổ lớn hay nhỏ. Giữa việc lựa chọn phẫu thuật hay bảo tồn, đinh nội tủy hay nẹp vít phụ thuộc vào lòng tủy của bệnh nhân và thói quen, sự đánh giá của phẫu thuật viên. Cá nhân tôi thường lựa chọn phương pháp phẫu thuật nẹp vít sẽ cho kết quả nắn chỉnh tốt, chắc hơn và bệnh nhân không bị biến chứng trồi đinh về sau. Thân mến.' - Chào em, Tình trạng người mệt mỏi, khó thở, tim đập nhanh xảy ra khi không gắng sức có thể do nhiều nguyên nhân, gồm tim mạch, hô hấp, thần kinh cơ, tiêu hóa (chủ yếu là ống tiêu hóa trên), tâm lý, bệnh lý nội tiết tố… Viêm dạ dày trào ngược có thể gây các triệu chứng này do dịch acid trào ngược từ dạ dày lên thực quản kích thích thần kinh tim. Mặt khác bệnh dạ dày là bệnh có thể tái phát, điều trị hết bệnh rồi thì bệnh vẫn có thể tái lại. Do đó, nếu em đã khám tim mạch và hô hấp bình thường, để biết có phải mình mệt mỏi do bệnh dạ dày gây ra hay không thì tốt nhất là em khám chuyên khoa nội tiêu hóa và điều trị trào ngược dạ dày thực quản thử, nếu triệu chứng cải thiện nhanh chóng thì chính hắn là nguyên nhân, em nhé. - source_sentence: Tôi bị tình trạng nuốt nước miếng có cảm giác bị vướng ở cổ, không đau rát, không ho sốt, ăn uống bình thường đã 1 ngày nay. Chỉ có nuốt nước miếng là có cảm giác vướng thôi, lỗ tai bên trái thì cảm giác ngứa nhẹ. Xin hỏi là bệnh gì vậy ạ? sentences: - "Em Lan thân mến, Hiện nay, xét nghiệm được xem là một xét nghiệm\r\nthường quy,\ \ nên thai kỳ của em cũng rất cần được làm những xét nghiệm này mặc\r\ndù gia\ \ đình em không có bệnh lý bất thường. Tuy nhiên, thai kỳ của em đã qua thời gian\ \ làm xét nghiệm Double test, bây\r\ngiờ em phải chờ đến lúc thai được 16 – 18\ \ tuần tuổi, làm xét nghiệm Triple test\r\nem nhé! Chúc em và bé khỏe mạnh!" - 'Trường hợp thoái hóa cột sống thắt lưng gây đau mỏi liên tục dù đã dùng thuốc giảm đau liều cao Chào em, Thoái hóa khớp, thoái hóa cột sống là tiến trình lão hóa không thể tránh khỏi của con người, đặc biệt có thể xảy ra sớm và nhanh hơn ở người nữ sau mãn kinh, sinh nở nhiều, suy dinh dưỡng hay ăn uống thiếu chất khoáng, lao động vất vả lúc còn trẻ. Trường hợp thoái hóa cột sống thắt lưng gây đau mỏi liên tục dù đã dùng thuốc giảm đau liều cao, đặc biệt là đau lan xuống hai chân, tê yếu hai chân thì cần chụp MRI cột sống để tầm soát thoát vị đĩa đệm chèn ép tủy sống. Trường hợp của em, mới phát hiện thoái hóa cột sống thắt lưng gần đây, cũng mới uống thuốc 1 tuần và không duy trì nữa, việc đau lưng vẫn còn âm ỉ nhưng không lan xuống hai chân thì chưa đến mức cần chụp MRI cột sống thắt lưng. Nhưng mà, em cần tích cực điều trị để bệnh thoái hóa cột sống thắt lưng không tiến triển nặng hơn. Bệnh này trị khỏi hoàn toàn là không thể, vì sinh lão bệnh tử không thể cải hoàn, nhưng mà việc điều trị tích cực sẽ giúp khống chế được bệnh, giảm đau và giảm tốc độ tiến triển của bệnh. Về việc sử dụng thuốc, dù là thuốc Tây hay thuốc Đông y, em cũng cần phải thăm khám bs ck cơ xương khớp (Tây y) hay ck y học cổ truyền (Đông y) để được kê thuốc phù hợp. các thuốc thường dùng là giảm đau, giãn cơ, bổ sung vi khoáng chất (canxi, vitamin D3, magie...). Bên cạnh đó, về phương pháp giảm đau hỗ trợ không dùng thuốc, em nên chú ý: - Chú ý thay đổi tư thế trong quá trình làm việc, không giữ mãi một tư thế trong nhiều giờ liền. Ngồi làm việc đúng tư thế để tránh các bệnh cột sống. - Vận động đúng cách, khi vác vật nặng không vặn cột sống. - Thường xuyên tập thể dục rèn luyện để cột sống vững chắc, cơ thể dẻo dai, bơi cũng được mà yoga là tốt nhất. - Ăn uống khoa học, xây dựng chế độ dinh dưỡng hợp lý, tăng cường nhóm thực phẩm giàu canxi, vitamin D, omega 3… giúp nâng cao độ chắc khỏe của đĩa đệm cũng như xương khớp. - Duy trì cân nặng bình thường, tránh để tăng cân quá mức. - Tư thế ngủ: nằm ngửa trên ván cứng hay nệm bông ép chặt, tránh nệm lò xo hay nệm cao su quá mềm, có thể đệm ở vùng khoeo làm co nhẹ khớp gối và khớp háng, nên nằm đầu thấp không gối sẽ tốt cho cột sống cổ. - Có thể thực hiện điều trị vật lý và các liệu pháp phản xạ: bao gồm phương pháp nhiệt như chườm nóng (túi nước, muối rang, cám rang, lá lốt, lá ngải cứu nóng); dùng các dòng điện tại khoa vật lý trị liệu, điều trị bằng laser; châm cứu, kéo cơ để hỗ trợ giảm đau cơ cạnh sống. Trân trọng!' - Chào bạn, Nuốt vướng ở cổ thường gặp trong một số bệnh lý viêm nhiễm hầu họng như viêm họng, viêm amidan mạn, trào ngược dạ dày thực quản, hội chứng chảy mũi sau… Đây là có thể là triệu chứng đầu tiên báo hiệu một đợt bùng phát cấp tính của viêm nhiễm hô hấp trên do triệu chứng mới chỉ xuất hiện 1 ngày. Bạn nên khám bác sĩ Tai mũi họng để thăm khám trực tiếp, đánh giá và kê toa điều trị bạn nhé! Thân mến. - source_sentence: Chào bác sĩ, em bị gãy xương gót, đã đóng đinh đến nay được gần 5 tuần. Vậy 6 tuần em tháo đinh được chưa ạ? sentences: - ' Chào em, gồm 2 trị số, trị số lớn nhất gọi là huyết áp tâm thu, bình thường < 140 và > 90 mmHg; trị số thấp nhất gọi là huyết áp tâm trương, bình thường < 90 và > 60 mmHg. Huyết áp có thể tăng khi căng thẳng, do lo lắng, do hội chứng áo choàng trắng (khi vào bv, khi gặp bác sĩ thì huyết áp cao), bệnh lý viêm nhiễm, do cafe, khi khó thở... nhìn chung là các stress đối với cơ thể. Như vậy, huyết áp ghi nhận ở những lúc cơ thể đang lo lắng, bồn chồn, có bệnh thì sẽ không phản ánh chính xác được huyết áp dao động bình thường của người bệnh. Do vậy em nên khám chuyên khoa tim mạch, bác sĩ sẽ thăm khám và làm xét nghiệm kiểm tra xem em có các dấu chứng của tăng huyết áp hay không (như dày thành tim, tiểu đạm, đo huyết áp 24 giờ...) để xác định em có tăng huyết áp hay không và điều trị thích hợp. Những triệu chứng hoa mắt, chóng mặt, đau đầu, đau 1 bên mắt, tiểu nhiều có thể là do bệnh tăng huyết áp gây ra (ảnh hưởng lên mạch máu não, lên thận...) hoặc là 1 bệnh lý khác như thiếu máu, rối loạn tiền đình, viêm nhiễm hệ thống, viêm mũi xoang, bệnh lý mạch máu não... (và tăng huyết áp chỉ là phản ứng của cơ thể khi có stress). Để tìm ra bệnh và giải quyết nỗi lo về bệnh, em nên đến bệnh viện để kiểm tra sức khỏe em nhé. Thân mến! ' - ' Chào em, Thời điểm 6 tuần là quá sớm để rút đinh cố định xương gót (trừ trường hợp khung cố định xương bên ngoài). Tháo đinh vít kim loại chỉ bắt buộc thực hiện sớm trong những trường hợp bất thường như gãy vít, nhiễm trùng, khớp giả... gây ra các triệu chứng bất thường với bệnh nhân mà thôi. Em nên tái khám tại chuyên khoa Chấn thương Chỉnh hình để bác sĩ kiểm tra lại việc lành xương của em tốt chưa và dặn em lịch trình rút đinh phù hợp, em nhé. Thân mến.' - K dạ dày không điều trị tiên lượng sống khá ngắn Chào em, K dạ dày là ung thư dạ dày. Bệnh ung thư dạ dày là bệnh lý ác tính và có chỉ định phẫu thuật cắt khối u – cắt dạ dày khi còn có thể cắt được. Nếu đã phát hiện ung thư dạ dày mà không điều trị phẫu thuật thì thời gian sống của bệnh nhân trung bình là 6 tháng đến 1 năm tùy loại ung thư dạ dày, khi ung thư tiến triển di căn có thể gây nhiều đau đớn hơn. Hiện tại chị em đang bị suy nhược cơ thể nhiều, không ăn uống được, đau nhiều do ung thư dạ dày là có chỉ định vào bệnh viện nằm điều trị luôn rồi, chứ không thể nào lấy thuốc mà không tới phòng khám được đâu. Vô bệnh viện chị em sẽ được truyền dịch, chích thuốc, nâng thể trạng lên rồi mới tính đến chuyện điều trị khối ung thư kia. Em đưa chị em đến bệnh viện càng sớm càng tốt, tốt nhất là bệnh viện Ung bướu, em nhé. - source_sentence: "Thưa bác sĩ,\r\n\r\nEm bị đục thủy tinh thể do chấn thương và\ \ vừa mổ mắt về và em cũng bị cận thị. Thời gian khoảng 1 tuần em thấy mắt mình\ \ nhìn chỉ rõ hơn được 1 phần nào. Nhìn xa thì vẫn thấy nhưng vẫn mờ mờ. Bác sĩ\ \ cho em lời khuyên nên làm cách nào và mắt em có thể sáng lại như bình thường\ \ được không ạ?\r\n\r\nEm xin chân thành cảm ơn! (Minh Tiến - Bình Định)" sentences: - Bạn Minh Tiến thân mến, Hiện nay phẫu thuật đục thủy tinh thể đã được y học nói chung và ngành Nhãn khoa Việt Nam thực hiện hoàn chỉnh đến mức tuyệt vời. Phẫu thuật này được xem như một cuộc cách mạng rất đáng tự hào của ngành nhãn khoa. Hàng ngày có thể tới hàng ngàn ca phẫu thuật đem lại ánh sáng cho người mù lòa đục thể thủy tinh tại Việt Nam. Nói như vậy để giúp cho bạn hiểu rõ phẫu thuật này các bác sĩ Việt Nam thực hiện rất thường xuyên và rất tốt. Tuy nhiên, với mắt đục thủy tinh thể do chấn thương của bạn là ca phẫu thuật tương đối không đơn giản. Thêm vào đó ngoài đục thủy tinh thể do chấn thương, mắt bạn cũng có thể kèm theo tổn thương ở các bộ phận khác của mắt mà trước mổ bác sĩ khó có thể chẩn đoán được. Với hai lý do nêu trên, nên đôi khi mắt mổ khó có thể tốt theo ý muốn của cả bệnh nhân lẫn thầy thuốc. Bạn cần có thời gian theo dõi và điều trị tiếp sau mổ. Sau thời gian ổn định khoảng 1 tháng, bạn cần đo thử kính xem có cải thiện thị lực thêm không? Chúc bạn may mắn! - Chào em, Bình thường các hạch trong cơ thể không sưng to lên đến mức có thể sờ chạm hay nhận biết được. Vì thế, hạch sưng lên, hay thường gọi là nổi hạch, là một triệu chứng bất thường của cơ thể. Cho nên, em lo lắng là đúng khi phát hiện hạch ở vùng cổ. Hạch bạch huyết đóng vai trò quan trọng đối với hoạt động của hệ miễn dịch. Chúng chứa các tế bào miễn dịch như lympho bào, đại thực bào... có chức năng miễn dịch chống lại các yếu tố lạ như vi khuẩn, virus, kí sinh trùng... xâm nhập vào cơ thể. Trong quá trình đó các hạch có thể bị viêm và sưng lên. Một số trường hợp hạch sưng có thể là hạch ung thư hoặc di căn. Đặc điểm của hạch viêm là nhỏ, số lượng ít, bờ tròn đều, không phát triển theo thời gian, không xâm lấn da xung quanh. Thông thường đối với hạch viêm thì nguồn viêm có thể tấn công tại hạch, cũng có khi là hạch viêm phản ứng với ổ viêm nhiễm cạnh đó, điều trị hết viêm thì hạch sẽ lặn dần, có thể lặn chậm hơn vài tuần đến vài tháng, có một số loại hạch cũng là hạch viêm nhưng mà chỉ giảm kích thước rồi cứ "lì" vậy luôn - không lặn hẳn nhưng không còn sưng như trước và vẫn giữ hình ảnh của hạch viêm, cũng có loại hạch viêm sau lại chuyển sang xơ chai hóa như sẹo cũ và không lặn. Như vậy, em có 1 hạch vùng cổ đã được xác định là hạch viêm thông qua sinh thiết hạch cách đây 10 năm. Trong vòng 10 năm nay, hạch cổ đó không có triệu chứng bất thường. Gần đây, hạch cổ đó có biểu hiện viêm trở lại, mặc dù em uống thuốc (tự mua) thì hạch hết sưng đau, nhưng em cũng cần khám lại bên chuyên khoa ung bướu để kiểm tra tổng quát lại 1 lần, tìm nguyên nhân gây kích thích hạch viêm này tái hoạt động, xem là nguyên nhân lành tính hay tiềm ẩn nguyên nhân khác (vì lần kiểm tra trước đã cách đây 10 năm rồi), em nhé. - ' Chào em, Trường hợp em mô tả là những bất thường của hệ hô hấp có thể là bệnh lý tai mũi họng hay hô hấp dưới như viêm phổi, viêm phế quản, em cần đến các cơ sở y tế chuyên sâu tai mũi họng hay hô hấp để khám thêm. Những biểu hiện đó hoàn toàn không có cơ sở nghĩ . Thân mến!' - source_sentence: Bác sĩ cho em hỏi, em bị rạn nứt xương gót chân bên phải. Em bị hơn 1 tháng nay rồi. Em bỏ thuốc lá. Em muốn hỏi bác sĩ thông thường bó bột hơn hay thuốc lá hơn? Như của em khoảng bao lâu thì khỏi? Và giờ em vẫn chưa đi được bác sĩ ạ. Em cảm ơn. sentences: - 'Câu hỏi của em rất chân thành. Tự ý thức quyết tâm cai nghiệm là điều đáng quý. Nếu em tiếp tục sử dụng thì tình trạng sẽ tồi tệ hơn rất nhiều. Ba yếu tố quan trọng nhất và tiến hành đồng thời để cai nghiện thành công, đó là: 1. Ý chí 2. Sự hiểu biết thấu đáo 3. Môi trường thân thiện. Các Trung tâm cai nghiện sẽ giúp em phần 2 và phần 3, từ đó sẽ củng cố phần 1 của em. Trường hợp ở nhà mà em tự cai, thực hành mỗi ngày với 3 điều kiện trên, em sẽ thành công như nhiều bạn khác. Không nên nôn nóng, sốt ruột. Trước tiên em phải thuộc lòng và thực hành những quy tắc này thành thói quen và áp dụng suốt đời. Nhiều trường hợp cai được vài năm vẫn tái nghiện. Do đó, nên tránh xa những "nguồn" khiến em tái nghiện, tránh xa bạn bè nghiện ngập em nhé. Chúc em quyết tâm và đem lại niềm vui cho bố mẹ.' - Chào em, Thứ nhất, bắt buộc phải có phim Xquang để biết em có thực sự nứt xương gót hay bị gãy phức tạp hơn, vì nhiều trường hợp tưởng chỉ nứt xương thôi nhưng thật ra là vỡ phức tạp, phải phẫu thuật mới nhanh ổn được. Thứ hai, theo nguyên tắc điều trị nứt gãy xương là phải cố định tốt để can xương mọc ra, chỗ nứt gãy mới được nối liền. Do đó, nếu bó bột thì chân sẽ được cố định liên tục trong 4-6 tuần, còn bó lá thì phải thay thường xuyên, mỗi lần thay là 1 lần xê dịch nên xương khó lành. Tốt hơn hết em nên đến Bệnh viện Chấn thương Chỉnh hình để được kiểm tra và điều trị thích hợp, em nhé. Thân mến. - Chào bạn, Qua hình ảnh sang thương và mô tả triệu chứng, bệnh lý của bạn có khả năng là chàm hay còn gọi là viêm da dị ứng với đặc điểm là viêm và nổi mụn nhỏ, ngứa ngáy. Nguyên nhân của chàm hiện nay chưa rõ nhưng có thể do cơ địa dị ứng (người mắc hen, viêm mũi dị ứng có nguy cơ cao mắc chàm), do kích thích của hóa chất như nước rửa chén, bột giặt, cao su, kim loại, chất liệu giày dép (chàm tiếp xúc),... Thời tiết lạnh, stress, đổ mồ hôi nhiều và phấn hoa... cũng là những nguyên nhân có thể khiến da bị chàm. Chàm cũng có thể gặp ở người bị suy van tĩnh mạch, giãn tĩnh mạch chân khiến tình trạng bệnh dai dẳng, kém đáp ứng điều trị. Điều trị chàm thường phải sử dụng một số loại thuốc bôi da kéo dài, có thể để lại tác dụng phụ, do đó bạn nên khám BS Da liễu để kê toa loại thuốc phù hợp. Ngoài ra, bạn nên chú ý xem có yếu tố nào thường kích thích khởi phát chàm để tránh cho bệnh tái phát bạn nhé! Thân mến. model-index: - name: SentenceTransformer based on vinai/phobert-base-v2 results: - task: type: information-retrieval name: Information Retrieval dataset: name: Unknown type: unknown metrics: - type: cosine_accuracy@1 value: 0.7045288531775018 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.835281227173119 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8767348429510592 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9231190650109569 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.7045288531775018 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.278427075724373 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.17534696859021182 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09231190650109566 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.7045288531775018 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.835281227173119 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8767348429510592 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9231190650109569 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.8141256948149351 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7791635506162544 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7827867266296543 name: Cosine Map@100 - type: dot_accuracy@1 value: 0.6860847333820307 name: Dot Accuracy@1 - type: dot_accuracy@3 value: 0.8221329437545654 name: Dot Accuracy@3 - type: dot_accuracy@5 value: 0.8697954711468225 name: Dot Accuracy@5 - type: dot_accuracy@10 value: 0.920745069393718 name: Dot Accuracy@10 - type: dot_precision@1 value: 0.6860847333820307 name: Dot Precision@1 - type: dot_precision@3 value: 0.2740443145848551 name: Dot Precision@3 - type: dot_precision@5 value: 0.17395909422936448 name: Dot Precision@5 - type: dot_precision@10 value: 0.09207450693937179 name: Dot Precision@10 - type: dot_recall@1 value: 0.6860847333820307 name: Dot Recall@1 - type: dot_recall@3 value: 0.8221329437545654 name: Dot Recall@3 - type: dot_recall@5 value: 0.8697954711468225 name: Dot Recall@5 - type: dot_recall@10 value: 0.920745069393718 name: Dot Recall@10 - type: dot_ndcg@10 value: 0.8028187077698474 name: Dot Ndcg@10 - type: dot_mrr@10 value: 0.7651250913075242 name: Dot Mrr@10 - type: dot_map@100 value: 0.7689382784168486 name: Dot Map@100 --- # SentenceTransformer based on vinai/phobert-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [vinai/phobert-base-v2](https://huggingface.co/vinai/phobert-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [vinai/phobert-base-v2](https://huggingface.co/vinai/phobert-base-v2) <!-- at revision e2375d266bdf39c6e8e9a87af16a5da3190b0cc8 --> - **Maximum Sequence Length:** 256 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("meandyou200175/phobert-finetune") # Run inference sentences = [ 'Bác sĩ cho em hỏi, em bị rạn nứt xương gót chân bên phải. Em bị hơn 1 tháng nay rồi. Em bỏ thuốc lá. Em muốn hỏi bác sĩ thông thường bó bột hơn hay thuốc lá hơn? Như của em khoảng bao lâu thì khỏi? Và giờ em vẫn chưa đi được bác sĩ ạ. Em cảm ơn.', 'Chào em, Thứ nhất, bắt buộc phải có phim Xquang để biết em có thực sự nứt xương gót hay bị gãy phức tạp hơn, vì nhiều trường hợp tưởng chỉ nứt xương thôi nhưng thật ra là vỡ phức tạp, phải phẫu thuật mới nhanh ổn được. Thứ hai, theo nguyên tắc điều trị nứt gãy xương là phải cố định tốt để can xương mọc ra, chỗ nứt gãy mới được nối liền. Do đó, nếu bó bột thì chân sẽ được cố định liên tục trong 4-6 tuần, còn bó lá thì phải thay thường xuyên, mỗi lần thay là 1 lần xê dịch nên xương khó lành. Tốt hơn hết em nên đến Bệnh viện Chấn thương Chỉnh hình để được kiểm tra và điều trị thích hợp, em nhé. Thân mến.', 'Chào bạn, Qua hình ảnh sang thương và mô tả triệu chứng, bệnh lý của bạn có khả năng là chàm hay còn gọi là viêm da dị ứng với đặc điểm là viêm và nổi mụn nhỏ, ngứa ngáy. Nguyên nhân của chàm hiện nay chưa rõ nhưng có thể do cơ địa dị ứng (người mắc hen, viêm mũi dị ứng có nguy cơ cao mắc chàm), do kích thích của hóa chất như nước rửa chén, bột giặt, cao su, kim loại, chất liệu giày dép (chàm tiếp xúc),... Thời tiết lạnh, stress, đổ mồ hôi nhiều và phấn hoa... cũng là những nguyên nhân có thể khiến da bị chàm. Chàm cũng có thể gặp ở người bị suy van tĩnh mạch, giãn tĩnh mạch chân khiến tình trạng bệnh dai dẳng, kém đáp ứng điều trị. Điều trị chàm thường phải sử dụng một số loại thuốc bôi da kéo dài, có thể để lại tác dụng phụ, do đó bạn nên khám BS Da liễu để kê toa loại thuốc phù hợp. Ngoài ra, bạn nên chú ý xem có yếu tố nào thường kích thích khởi phát chàm để tránh cho bệnh tái phát bạn nhé! Thân mến.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.7045 | | cosine_accuracy@3 | 0.8353 | | cosine_accuracy@5 | 0.8767 | | cosine_accuracy@10 | 0.9231 | | cosine_precision@1 | 0.7045 | | cosine_precision@3 | 0.2784 | | cosine_precision@5 | 0.1753 | | cosine_precision@10 | 0.0923 | | cosine_recall@1 | 0.7045 | | cosine_recall@3 | 0.8353 | | cosine_recall@5 | 0.8767 | | cosine_recall@10 | 0.9231 | | cosine_ndcg@10 | 0.8141 | | cosine_mrr@10 | 0.7792 | | **cosine_map@100** | **0.7828** | | dot_accuracy@1 | 0.6861 | | dot_accuracy@3 | 0.8221 | | dot_accuracy@5 | 0.8698 | | dot_accuracy@10 | 0.9207 | | dot_precision@1 | 0.6861 | | dot_precision@3 | 0.274 | | dot_precision@5 | 0.174 | | dot_precision@10 | 0.0921 | | dot_recall@1 | 0.6861 | | dot_recall@3 | 0.8221 | | dot_recall@5 | 0.8698 | | dot_recall@10 | 0.9207 | | dot_ndcg@10 | 0.8028 | | dot_mrr@10 | 0.7651 | | dot_map@100 | 0.7689 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 5 - `warmup_ratio`: 0.1 - `fp16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | Validation Loss | cosine_map@100 | |:------:|:-----:|:-------------:|:---------------:|:--------------:| | 0 | 0 | - | - | 0.1388 | | 0.0365 | 100 | 2.3162 | - | - | | 0.0730 | 200 | 0.6289 | - | - | | 0.1096 | 300 | 0.3538 | - | - | | 0.1461 | 400 | 0.2815 | - | - | | 0.1826 | 500 | 0.2206 | - | - | | 0.2191 | 600 | 0.224 | - | - | | 0.2557 | 700 | 0.1869 | - | - | | 0.2922 | 800 | 0.1742 | - | - | | 0.3287 | 900 | 0.1825 | - | - | | 0.3652 | 1000 | 0.1731 | 0.1270 | 0.6999 | | 0.4018 | 1100 | 0.1557 | - | - | | 0.4383 | 1200 | 0.1845 | - | - | | 0.4748 | 1300 | 0.1593 | - | - | | 0.5113 | 1400 | 0.1352 | - | - | | 0.5478 | 1500 | 0.1573 | - | - | | 0.5844 | 1600 | 0.1482 | - | - | | 0.6209 | 1700 | 0.1413 | - | - | | 0.6574 | 1800 | 0.1315 | - | - | | 0.6939 | 1900 | 0.1253 | - | - | | 0.7305 | 2000 | 0.1298 | 0.1068 | 0.7262 | | 0.7670 | 2100 | 0.1282 | - | - | | 0.8035 | 2200 | 0.1052 | - | - | | 0.8400 | 2300 | 0.1002 | - | - | | 0.8766 | 2400 | 0.1001 | - | - | | 0.9131 | 2500 | 0.1233 | - | - | | 0.9496 | 2600 | 0.1191 | - | - | | 0.9861 | 2700 | 0.1156 | - | - | | 1.0226 | 2800 | 0.1026 | - | - | | 1.0592 | 2900 | 0.0999 | - | - | | 1.0957 | 3000 | 0.0989 | 0.0785 | 0.7370 | | 1.1322 | 3100 | 0.0769 | - | - | | 1.1687 | 3200 | 0.0839 | - | - | | 1.2053 | 3300 | 0.0777 | - | - | | 1.2418 | 3400 | 0.0745 | - | - | | 1.2783 | 3500 | 0.0676 | - | - | | 1.3148 | 3600 | 0.0636 | - | - | | 1.3514 | 3700 | 0.0575 | - | - | | 1.3879 | 3800 | 0.0403 | - | - | | 1.4244 | 3900 | 0.0616 | - | - | | 1.4609 | 4000 | 0.0379 | 0.0717 | 0.7557 | | 1.4974 | 4100 | 0.0386 | - | - | | 1.5340 | 4200 | 0.0346 | - | - | | 1.5705 | 4300 | 0.0327 | - | - | | 1.6070 | 4400 | 0.0378 | - | - | | 1.6435 | 4500 | 0.0265 | - | - | | 1.6801 | 4600 | 0.0317 | - | - | | 1.7166 | 4700 | 0.0262 | - | - | | 1.7531 | 4800 | 0.0275 | - | - | | 1.7896 | 4900 | 0.025 | - | - | | 1.8262 | 5000 | 0.025 | 0.0687 | 0.7647 | | 1.8627 | 5100 | 0.0195 | - | - | | 1.8992 | 5200 | 0.0316 | - | - | | 1.9357 | 5300 | 0.0368 | - | - | | 1.9722 | 5400 | 0.029 | - | - | | 2.0088 | 5500 | 0.0285 | - | - | | 2.0453 | 5600 | 0.0245 | - | - | | 2.0818 | 5700 | 0.0208 | - | - | | 2.1183 | 5800 | 0.0197 | - | - | | 2.1549 | 5900 | 0.0234 | - | - | | 2.1914 | 6000 | 0.0195 | 0.0682 | 0.7699 | | 2.2279 | 6100 | 0.0233 | - | - | | 2.2644 | 6200 | 0.0184 | - | - | | 2.3009 | 6300 | 0.0217 | - | - | | 2.3375 | 6400 | 0.0233 | - | - | | 2.3740 | 6500 | 0.016 | - | - | | 2.4105 | 6600 | 0.0168 | - | - | | 2.4470 | 6700 | 0.0151 | - | - | | 2.4836 | 6800 | 0.0123 | - | - | | 2.5201 | 6900 | 0.0081 | - | - | | 2.5566 | 7000 | 0.0109 | 0.0616 | 0.7744 | | 2.5931 | 7100 | 0.0073 | - | - | | 2.6297 | 7200 | 0.0088 | - | - | | 2.6662 | 7300 | 0.0124 | - | - | | 2.7027 | 7400 | 0.0078 | - | - | | 2.7392 | 7500 | 0.0066 | - | - | | 2.7757 | 7600 | 0.0066 | - | - | | 2.8123 | 7700 | 0.0072 | - | - | | 2.8488 | 7800 | 0.0066 | - | - | | 2.8853 | 7900 | 0.0088 | - | - | | 2.9218 | 8000 | 0.0096 | 0.0579 | 0.7767 | | 2.9584 | 8100 | 0.0094 | - | - | | 2.9949 | 8200 | 0.0084 | - | - | | 3.0314 | 8300 | 0.008 | - | - | | 3.0679 | 8400 | 0.0061 | - | - | | 3.1045 | 8500 | 0.0082 | - | - | | 3.1410 | 8600 | 0.0056 | - | - | | 3.1775 | 8700 | 0.005 | - | - | | 3.2140 | 8800 | 0.0052 | - | - | | 3.2505 | 8900 | 0.0059 | - | - | | 3.2871 | 9000 | 0.0071 | 0.0592 | 0.7757 | | 3.3236 | 9100 | 0.0059 | - | - | | 3.3601 | 9200 | 0.0056 | - | - | | 3.3966 | 9300 | 0.0037 | - | - | | 3.4332 | 9400 | 0.0068 | - | - | | 3.4697 | 9500 | 0.0037 | - | - | | 3.5062 | 9600 | 0.0037 | - | - | | 3.5427 | 9700 | 0.0041 | - | - | | 3.5793 | 9800 | 0.0026 | - | - | | 3.6158 | 9900 | 0.0029 | - | - | | 3.6523 | 10000 | 0.0044 | 0.0607 | 0.7752 | | 3.6888 | 10100 | 0.0029 | - | - | | 3.7253 | 10200 | 0.003 | - | - | | 3.7619 | 10300 | 0.0026 | - | - | | 3.7984 | 10400 | 0.0028 | - | - | | 3.8349 | 10500 | 0.003 | - | - | | 3.8714 | 10600 | 0.0026 | - | - | | 3.9080 | 10700 | 0.0029 | - | - | | 3.9445 | 10800 | 0.003 | - | - | | 3.9810 | 10900 | 0.0022 | - | - | | 4.0175 | 11000 | 0.003 | 0.0579 | 0.7816 | | 4.0541 | 11100 | 0.0024 | - | - | | 4.0906 | 11200 | 0.0029 | - | - | | 4.1271 | 11300 | 0.0025 | - | - | | 4.1636 | 11400 | 0.0024 | - | - | | 4.2001 | 11500 | 0.0022 | - | - | | 4.2367 | 11600 | 0.0026 | - | - | | 4.2732 | 11700 | 0.0022 | - | - | | 4.3097 | 11800 | 0.003 | - | - | | 4.3462 | 11900 | 0.0023 | - | - | | 4.3828 | 12000 | 0.0018 | 0.0562 | 0.7819 | | 4.4193 | 12100 | 0.0022 | - | - | | 4.4558 | 12200 | 0.0021 | - | - | | 4.4923 | 12300 | 0.0017 | - | - | | 4.5289 | 12400 | 0.0016 | - | - | | 4.5654 | 12500 | 0.0017 | - | - | | 4.6019 | 12600 | 0.0015 | - | - | | 4.6384 | 12700 | 0.0017 | - | - | | 4.6749 | 12800 | 0.0016 | - | - | | 4.7115 | 12900 | 0.0014 | - | - | | 4.7480 | 13000 | 0.0015 | 0.0561 | 0.7828 | | 4.7845 | 13100 | 0.0015 | - | - | | 4.8210 | 13200 | 0.0012 | - | - | | 4.8576 | 13300 | 0.0014 | - | - | | 4.8941 | 13400 | 0.0017 | - | - | | 4.9306 | 13500 | 0.0018 | - | - | | 4.9671 | 13600 | 0.0015 | - | - | </details> ### Framework Versions - Python: 3.10.14 - Sentence Transformers: 3.2.0 - Transformers: 4.45.1 - PyTorch: 2.4.0 - Accelerate: 0.34.2 - Datasets: 3.0.1 - Tokenizers: 0.20.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
BioNLP
# SentenceTransformer based on vinai/phobert-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [vinai/phobert-base-v2](https://huggingface.co/vinai/phobert-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [vinai/phobert-base-v2](https://huggingface.co/vinai/phobert-base-v2) <!-- at revision e2375d266bdf39c6e8e9a87af16a5da3190b0cc8 --> - **Maximum Sequence Length:** 256 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 256, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("meandyou200175/phobert-finetune") # Run inference sentences = [ 'Bác sĩ cho em hỏi, em bị rạn nứt xương gót chân bên phải. Em bị hơn 1 tháng nay rồi. Em bỏ thuốc lá. Em muốn hỏi bác sĩ thông thường bó bột hơn hay thuốc lá hơn? Như của em khoảng bao lâu thì khỏi? Và giờ em vẫn chưa đi được bác sĩ ạ. Em cảm ơn.', 'Chào em, Thứ nhất, bắt buộc phải có phim Xquang để biết em có thực sự nứt xương gót hay bị gãy phức tạp hơn, vì nhiều trường hợp tưởng chỉ nứt xương thôi nhưng thật ra là vỡ phức tạp, phải phẫu thuật mới nhanh ổn được. Thứ hai, theo nguyên tắc điều trị nứt gãy xương là phải cố định tốt để can xương mọc ra, chỗ nứt gãy mới được nối liền. Do đó, nếu bó bột thì chân sẽ được cố định liên tục trong 4-6 tuần, còn bó lá thì phải thay thường xuyên, mỗi lần thay là 1 lần xê dịch nên xương khó lành. Tốt hơn hết em nên đến Bệnh viện Chấn thương Chỉnh hình để được kiểm tra và điều trị thích hợp, em nhé. Thân mến.', 'Chào bạn, Qua hình ảnh sang thương và mô tả triệu chứng, bệnh lý của bạn có khả năng là chàm hay còn gọi là viêm da dị ứng với đặc điểm là viêm và nổi mụn nhỏ, ngứa ngáy. Nguyên nhân của chàm hiện nay chưa rõ nhưng có thể do cơ địa dị ứng (người mắc hen, viêm mũi dị ứng có nguy cơ cao mắc chàm), do kích thích của hóa chất như nước rửa chén, bột giặt, cao su, kim loại, chất liệu giày dép (chàm tiếp xúc),... Thời tiết lạnh, stress, đổ mồ hôi nhiều và phấn hoa... cũng là những nguyên nhân có thể khiến da bị chàm. Chàm cũng có thể gặp ở người bị suy van tĩnh mạch, giãn tĩnh mạch chân khiến tình trạng bệnh dai dẳng, kém đáp ứng điều trị. Điều trị chàm thường phải sử dụng một số loại thuốc bôi da kéo dài, có thể để lại tác dụng phụ, do đó bạn nên khám BS Da liễu để kê toa loại thuốc phù hợp. Ngoài ra, bạn nên chú ý xem có yếu tố nào thường kích thích khởi phát chàm để tránh cho bệnh tái phát bạn nhé! Thân mến.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.7045 | | cosine_accuracy@3 | 0.8353 | | cosine_accuracy@5 | 0.8767 | | cosine_accuracy@10 | 0.9231 | | cosine_precision@1 | 0.7045 | | cosine_precision@3 | 0.2784 | | cosine_precision@5 | 0.1753 | | cosine_precision@10 | 0.0923 | | cosine_recall@1 | 0.7045 | | cosine_recall@3 | 0.8353 | | cosine_recall@5 | 0.8767 | | cosine_recall@10 | 0.9231 | | cosine_ndcg@10 | 0.8141 | | cosine_mrr@10 | 0.7792 | | **cosine_map@100** | **0.7828** | | dot_accuracy@1 | 0.6861 | | dot_accuracy@3 | 0.8221 | | dot_accuracy@5 | 0.8698 | | dot_accuracy@10 | 0.9207 | | dot_precision@1 | 0.6861 | | dot_precision@3 | 0.274 | | dot_precision@5 | 0.174 | | dot_precision@10 | 0.0921 | | dot_recall@1 | 0.6861 | | dot_recall@3 | 0.8221 | | dot_recall@5 | 0.8698 | | dot_recall@10 | 0.9207 | | dot_ndcg@10 | 0.8028 | | dot_mrr@10 | 0.7651 | | dot_map@100 | 0.7689 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 5 - `warmup_ratio`: 0.1 - `fp16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | Validation Loss | cosine_map@100 | |:------:|:-----:|:-------------:|:---------------:|:--------------:| | 0 | 0 | - | - | 0.1388 | | 0.0365 | 100 | 2.3162 | - | - | | 0.0730 | 200 | 0.6289 | - | - | | 0.1096 | 300 | 0.3538 | - | - | | 0.1461 | 400 | 0.2815 | - | - | | 0.1826 | 500 | 0.2206 | - | - | | 0.2191 | 600 | 0.224 | - | - | | 0.2557 | 700 | 0.1869 | - | - | | 0.2922 | 800 | 0.1742 | - | - | | 0.3287 | 900 | 0.1825 | - | - | | 0.3652 | 1000 | 0.1731 | 0.1270 | 0.6999 | | 0.4018 | 1100 | 0.1557 | - | - | | 0.4383 | 1200 | 0.1845 | - | - | | 0.4748 | 1300 | 0.1593 | - | - | | 0.5113 | 1400 | 0.1352 | - | - | | 0.5478 | 1500 | 0.1573 | - | - | | 0.5844 | 1600 | 0.1482 | - | - | | 0.6209 | 1700 | 0.1413 | - | - | | 0.6574 | 1800 | 0.1315 | - | - | | 0.6939 | 1900 | 0.1253 | - | - | | 0.7305 | 2000 | 0.1298 | 0.1068 | 0.7262 | | 0.7670 | 2100 | 0.1282 | - | - | | 0.8035 | 2200 | 0.1052 | - | - | | 0.8400 | 2300 | 0.1002 | - | - | | 0.8766 | 2400 | 0.1001 | - | - | | 0.9131 | 2500 | 0.1233 | - | - | | 0.9496 | 2600 | 0.1191 | - | - | | 0.9861 | 2700 | 0.1156 | - | - | | 1.0226 | 2800 | 0.1026 | - | - | | 1.0592 | 2900 | 0.0999 | - | - | | 1.0957 | 3000 | 0.0989 | 0.0785 | 0.7370 | | 1.1322 | 3100 | 0.0769 | - | - | | 1.1687 | 3200 | 0.0839 | - | - | | 1.2053 | 3300 | 0.0777 | - | - | | 1.2418 | 3400 | 0.0745 | - | - | | 1.2783 | 3500 | 0.0676 | - | - | | 1.3148 | 3600 | 0.0636 | - | - | | 1.3514 | 3700 | 0.0575 | - | - | | 1.3879 | 3800 | 0.0403 | - | - | | 1.4244 | 3900 | 0.0616 | - | - | | 1.4609 | 4000 | 0.0379 | 0.0717 | 0.7557 | | 1.4974 | 4100 | 0.0386 | - | - | | 1.5340 | 4200 | 0.0346 | - | - | | 1.5705 | 4300 | 0.0327 | - | - | | 1.6070 | 4400 | 0.0378 | - | - | | 1.6435 | 4500 | 0.0265 | - | - | | 1.6801 | 4600 | 0.0317 | - | - | | 1.7166 | 4700 | 0.0262 | - | - | | 1.7531 | 4800 | 0.0275 | - | - | | 1.7896 | 4900 | 0.025 | - | - | | 1.8262 | 5000 | 0.025 | 0.0687 | 0.7647 | | 1.8627 | 5100 | 0.0195 | - | - | | 1.8992 | 5200 | 0.0316 | - | - | | 1.9357 | 5300 | 0.0368 | - | - | | 1.9722 | 5400 | 0.029 | - | - | | 2.0088 | 5500 | 0.0285 | - | - | | 2.0453 | 5600 | 0.0245 | - | - | | 2.0818 | 5700 | 0.0208 | - | - | | 2.1183 | 5800 | 0.0197 | - | - | | 2.1549 | 5900 | 0.0234 | - | - | | 2.1914 | 6000 | 0.0195 | 0.0682 | 0.7699 | | 2.2279 | 6100 | 0.0233 | - | - | | 2.2644 | 6200 | 0.0184 | - | - | | 2.3009 | 6300 | 0.0217 | - | - | | 2.3375 | 6400 | 0.0233 | - | - | | 2.3740 | 6500 | 0.016 | - | - | | 2.4105 | 6600 | 0.0168 | - | - | | 2.4470 | 6700 | 0.0151 | - | - | | 2.4836 | 6800 | 0.0123 | - | - | | 2.5201 | 6900 | 0.0081 | - | - | | 2.5566 | 7000 | 0.0109 | 0.0616 | 0.7744 | | 2.5931 | 7100 | 0.0073 | - | - | | 2.6297 | 7200 | 0.0088 | - | - | | 2.6662 | 7300 | 0.0124 | - | - | | 2.7027 | 7400 | 0.0078 | - | - | | 2.7392 | 7500 | 0.0066 | - | - | | 2.7757 | 7600 | 0.0066 | - | - | | 2.8123 | 7700 | 0.0072 | - | - | | 2.8488 | 7800 | 0.0066 | - | - | | 2.8853 | 7900 | 0.0088 | - | - | | 2.9218 | 8000 | 0.0096 | 0.0579 | 0.7767 | | 2.9584 | 8100 | 0.0094 | - | - | | 2.9949 | 8200 | 0.0084 | - | - | | 3.0314 | 8300 | 0.008 | - | - | | 3.0679 | 8400 | 0.0061 | - | - | | 3.1045 | 8500 | 0.0082 | - | - | | 3.1410 | 8600 | 0.0056 | - | - | | 3.1775 | 8700 | 0.005 | - | - | | 3.2140 | 8800 | 0.0052 | - | - | | 3.2505 | 8900 | 0.0059 | - | - | | 3.2871 | 9000 | 0.0071 | 0.0592 | 0.7757 | | 3.3236 | 9100 | 0.0059 | - | - | | 3.3601 | 9200 | 0.0056 | - | - | | 3.3966 | 9300 | 0.0037 | - | - | | 3.4332 | 9400 | 0.0068 | - | - | | 3.4697 | 9500 | 0.0037 | - | - | | 3.5062 | 9600 | 0.0037 | - | - | | 3.5427 | 9700 | 0.0041 | - | - | | 3.5793 | 9800 | 0.0026 | - | - | | 3.6158 | 9900 | 0.0029 | - | - | | 3.6523 | 10000 | 0.0044 | 0.0607 | 0.7752 | | 3.6888 | 10100 | 0.0029 | - | - | | 3.7253 | 10200 | 0.003 | - | - | | 3.7619 | 10300 | 0.0026 | - | - | | 3.7984 | 10400 | 0.0028 | - | - | | 3.8349 | 10500 | 0.003 | - | - | | 3.8714 | 10600 | 0.0026 | - | - | | 3.9080 | 10700 | 0.0029 | - | - | | 3.9445 | 10800 | 0.003 | - | - | | 3.9810 | 10900 | 0.0022 | - | - | | 4.0175 | 11000 | 0.003 | 0.0579 | 0.7816 | | 4.0541 | 11100 | 0.0024 | - | - | | 4.0906 | 11200 | 0.0029 | - | - | | 4.1271 | 11300 | 0.0025 | - | - | | 4.1636 | 11400 | 0.0024 | - | - | | 4.2001 | 11500 | 0.0022 | - | - | | 4.2367 | 11600 | 0.0026 | - | - | | 4.2732 | 11700 | 0.0022 | - | - | | 4.3097 | 11800 | 0.003 | - | - | | 4.3462 | 11900 | 0.0023 | - | - | | 4.3828 | 12000 | 0.0018 | 0.0562 | 0.7819 | | 4.4193 | 12100 | 0.0022 | - | - | | 4.4558 | 12200 | 0.0021 | - | - | | 4.4923 | 12300 | 0.0017 | - | - | | 4.5289 | 12400 | 0.0016 | - | - | | 4.5654 | 12500 | 0.0017 | - | - | | 4.6019 | 12600 | 0.0015 | - | - | | 4.6384 | 12700 | 0.0017 | - | - | | 4.6749 | 12800 | 0.0016 | - | - | | 4.7115 | 12900 | 0.0014 | - | - | | 4.7480 | 13000 | 0.0015 | 0.0561 | 0.7828 | | 4.7845 | 13100 | 0.0015 | - | - | | 4.8210 | 13200 | 0.0012 | - | - | | 4.8576 | 13300 | 0.0014 | - | - | | 4.8941 | 13400 | 0.0017 | - | - | | 4.9306 | 13500 | 0.0018 | - | - | | 4.9671 | 13600 | 0.0015 | - | - | </details> ### Framework Versions - Python: 3.10.14 - Sentence Transformers: 3.2.0 - Transformers: 4.45.1 - PyTorch: 2.4.0 - Accelerate: 0.34.2 - Datasets: 3.0.1 - Tokenizers: 0.20.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "vinai/phobert-base-v2", "library_name": "sentence-transformers", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100", "dot_accuracy@1", "dot_accuracy@3", "dot_accuracy@5", "dot_accuracy@10", "dot_precision@1", "dot_precision@3", "dot_precision@5", "dot_precision@10", "dot_recall@1", "dot_recall@3", "dot_recall@5", "dot_recall@10", "dot_ndcg@10", "dot_mrr@10", "dot_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:43804", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Nhờ bác sĩ cho biết việc lựa chọn đóng đinh nội tủy và nẹp vít để kết hợp xương đòn dựa trên cơ sở nào ạ? Ca phẫu thuật thường kéo dài trong bao lâu? Bệnh nhân nằm viện mấy ngày?", "sentences": [" Chào em, là bệnh mãn tính phải điều trị suốt đời, phải kiên nhẫn và kiên trì nên đôi khi lượng đường trong cơ thể không ổn định. Lúc đi khám xét nghiệm thì ổn do bản thân biết mai đi khám nên sẽ kiêng ăn, ăn ít... còn bệnh lâu dài nên trong ngày đôi khi thèm chút này hay thích ăn chút kia, quên uống thuốc, suy nghĩ, mất ngủ cũng làm đường không ổn định. Đường trong cơ thể lúc lên lúc xuống dễ đưa đến biến chứng. Em hay thấy bệnh nhân tiểu đường tháo khớp ngón chân, ngón tay, đôi khi tháo khớp gối, khớp háng, đây là do tê liệt hệ thần kinh nên khi va chạm bệnh nhân không phát hiện. Đến khi phát hiện thì đã nhiễm trùng nặng phải tháo khớp. Theo BS mẹ em có khả năng do biến chứng tiểu đường vì mẹ em bị bệnh khá lâu nên ít nhiều ảnh hưởng thần kinh bị tê liệt gây đau. Em nên nhớ dặn mẹ đi tái khám và điều trị cho thật ổn định nhé! Thân mến!", " Để lựa chọn phương pháp đóng đinh nội tủy hay nẹp vít cho bệnh nhân cần dựa vào nhiều yếu tố. Trong lòng tủy xương có một cái ống, nếu lòng tủy bệnh nhân nhỏ mà đường gãy không bị gãy thành nhiều mảnh thì nên lựa chọn phương pháp đóng đinh. Phương pháp này có nhược điểm dễ bị lộ phần đinh khi đinh vừa đóng, chưa chắc vào xương. Tuy nhiên, ưu điểm là khi đóng đinh, đường mổ sẽ nhỏ, đơn giản. Đối với nẹp vít, đường mổ dài hơn nhưng phần nắn chỉnh sẽ tuyệt đối, vững chắc hơn. Nhìn chung, giữa 2 phương pháp thời gian mổ không khác biệt nhau nhiều, từ 30-45 phút sẽ hoàn thành cuộc phẫu thuật kết hợp xương. Tại bệnh viện Nhân dân 115, sau khi bệnh nhân được làm phẫu thuật có thể xuất viện rất sớm trong vòng khoảng 3-5 ngày, tùy theo đường mổ lớn hay nhỏ. Giữa việc lựa chọn phẫu thuật hay bảo tồn, đinh nội tủy hay nẹp vít phụ thuộc vào lòng tủy của bệnh nhân và thói quen, sự đánh giá của phẫu thuật viên. Cá nhân tôi thường lựa chọn phương pháp phẫu thuật nẹp vít sẽ cho kết quả nắn chỉnh tốt, chắc hơn và bệnh nhân không bị biến chứng trồi đinh về sau. Thân mến.", "Chào em, Tình trạng người mệt mỏi, khó thở, tim đập nhanh xảy ra khi không gắng sức có thể do nhiều nguyên nhân, gồm tim mạch, hô hấp, thần kinh cơ, tiêu hóa (chủ yếu là ống tiêu hóa trên), tâm lý, bệnh lý nội tiết tố… Viêm dạ dày trào ngược có thể gây các triệu chứng này do dịch acid trào ngược từ dạ dày lên thực quản kích thích thần kinh tim. Mặt khác bệnh dạ dày là bệnh có thể tái phát, điều trị hết bệnh rồi thì bệnh vẫn có thể tái lại. Do đó, nếu em đã khám tim mạch và hô hấp bình thường, để biết có phải mình mệt mỏi do bệnh dạ dày gây ra hay không thì tốt nhất là em khám chuyên khoa nội tiêu hóa và điều trị trào ngược dạ dày thực quản thử, nếu triệu chứng cải thiện nhanh chóng thì chính hắn là nguyên nhân, em nhé."]}, {"source_sentence": "Tôi bị tình trạng nuốt nước miếng có cảm giác bị vướng ở cổ, không đau rát, không ho sốt, ăn uống bình thường đã 1 ngày nay. Chỉ có nuốt nước miếng là có cảm giác vướng thôi, lỗ tai bên trái thì cảm giác ngứa nhẹ. Xin hỏi là bệnh gì vậy ạ?", "sentences": ["Em Lan thân mến, Hiện nay, xét nghiệm được xem là một xét nghiệm\r\nthường quy, nên thai kỳ của em cũng rất cần được làm những xét nghiệm này mặc\r\ndù gia đình em không có bệnh lý bất thường. Tuy nhiên, thai kỳ của em đã qua thời gian làm xét nghiệm Double test, bây\r\ngiờ em phải chờ đến lúc thai được 16 – 18 tuần tuổi, làm xét nghiệm Triple test\r\nem nhé! Chúc em và bé khỏe mạnh!", "Trường hợp thoái hóa cột sống thắt lưng gây đau mỏi liên tục dù đã dùng thuốc giảm đau liều cao Chào em, Thoái hóa khớp, thoái hóa cột sống là tiến trình lão hóa không thể tránh khỏi của con người, đặc biệt có thể xảy ra sớm và nhanh hơn ở người nữ sau mãn kinh, sinh nở nhiều, suy dinh dưỡng hay ăn uống thiếu chất khoáng, lao động vất vả lúc còn trẻ. Trường hợp thoái hóa cột sống thắt lưng gây đau mỏi liên tục dù đã dùng thuốc giảm đau liều cao, đặc biệt là đau lan xuống hai chân, tê yếu hai chân thì cần chụp MRI cột sống để tầm soát thoát vị đĩa đệm chèn ép tủy sống. Trường hợp của em, mới phát hiện thoái hóa cột sống thắt lưng gần đây, cũng mới uống thuốc 1 tuần và không duy trì nữa, việc đau lưng vẫn còn âm ỉ nhưng không lan xuống hai chân thì chưa đến mức cần chụp MRI cột sống thắt lưng. Nhưng mà, em cần tích cực điều trị để bệnh thoái hóa cột sống thắt lưng không tiến triển nặng hơn. Bệnh này trị khỏi hoàn toàn là không thể, vì sinh lão bệnh tử không thể cải hoàn, nhưng mà việc điều trị tích cực sẽ giúp khống chế được bệnh, giảm đau và giảm tốc độ tiến triển của bệnh. Về việc sử dụng thuốc, dù là thuốc Tây hay thuốc Đông y, em cũng cần phải thăm khám bs ck cơ xương khớp (Tây y) hay ck y học cổ truyền (Đông y) để được kê thuốc phù hợp. các thuốc thường dùng là giảm đau, giãn cơ, bổ sung vi khoáng chất (canxi, vitamin D3, magie...). Bên cạnh đó, về phương pháp giảm đau hỗ trợ không dùng thuốc, em nên chú ý: - Chú ý thay đổi tư thế trong quá trình làm việc, không giữ mãi một tư thế trong nhiều giờ liền. Ngồi làm việc đúng tư thế để tránh các bệnh cột sống. - Vận động đúng cách, khi vác vật nặng không vặn cột sống. - Thường xuyên tập thể dục rèn luyện để cột sống vững chắc, cơ thể dẻo dai, bơi cũng được mà yoga là tốt nhất. - Ăn uống khoa học, xây dựng chế độ dinh dưỡng hợp lý, tăng cường nhóm thực phẩm giàu canxi, vitamin D, omega 3… giúp nâng cao độ chắc khỏe của đĩa đệm cũng như xương khớp. - Duy trì cân nặng bình thường, tránh để tăng cân quá mức. - Tư thế ngủ: nằm ngửa trên ván cứng hay nệm bông ép chặt, tránh nệm lò xo hay nệm cao su quá mềm, có thể đệm ở vùng khoeo làm co nhẹ khớp gối và khớp háng, nên nằm đầu thấp không gối sẽ tốt cho cột sống cổ. - Có thể thực hiện điều trị vật lý và các liệu pháp phản xạ: bao gồm phương pháp nhiệt như chườm nóng (túi nước, muối rang, cám rang, lá lốt, lá ngải cứu nóng); dùng các dòng điện tại khoa vật lý trị liệu, điều trị bằng laser; châm cứu, kéo cơ để hỗ trợ giảm đau cơ cạnh sống. Trân trọng!", "Chào bạn, Nuốt vướng ở cổ thường gặp trong một số bệnh lý viêm nhiễm hầu họng như viêm họng, viêm amidan mạn, trào ngược dạ dày thực quản, hội chứng chảy mũi sau… Đây là có thể là triệu chứng đầu tiên báo hiệu một đợt bùng phát cấp tính của viêm nhiễm hô hấp trên do triệu chứng mới chỉ xuất hiện 1 ngày. Bạn nên khám bác sĩ Tai mũi họng để thăm khám trực tiếp, đánh giá và kê toa điều trị bạn nhé! Thân mến."]}, {"source_sentence": "Chào bác sĩ, em bị gãy xương gót, đã đóng đinh đến nay được gần 5 tuần. Vậy 6 tuần em tháo đinh được chưa ạ?", "sentences": [" Chào em, gồm 2 trị số, trị số lớn nhất gọi là huyết áp tâm thu, bình thường < 140 và > 90 mmHg; trị số thấp nhất gọi là huyết áp tâm trương, bình thường < 90 và > 60 mmHg. Huyết áp có thể tăng khi căng thẳng, do lo lắng, do hội chứng áo choàng trắng (khi vào bv, khi gặp bác sĩ thì huyết áp cao), bệnh lý viêm nhiễm, do cafe, khi khó thở... nhìn chung là các stress đối với cơ thể. Như vậy, huyết áp ghi nhận ở những lúc cơ thể đang lo lắng, bồn chồn, có bệnh thì sẽ không phản ánh chính xác được huyết áp dao động bình thường của người bệnh. Do vậy em nên khám chuyên khoa tim mạch, bác sĩ sẽ thăm khám và làm xét nghiệm kiểm tra xem em có các dấu chứng của tăng huyết áp hay không (như dày thành tim, tiểu đạm, đo huyết áp 24 giờ...) để xác định em có tăng huyết áp hay không và điều trị thích hợp. Những triệu chứng hoa mắt, chóng mặt, đau đầu, đau 1 bên mắt, tiểu nhiều có thể là do bệnh tăng huyết áp gây ra (ảnh hưởng lên mạch máu não, lên thận...) hoặc là 1 bệnh lý khác như thiếu máu, rối loạn tiền đình, viêm nhiễm hệ thống, viêm mũi xoang, bệnh lý mạch máu não... (và tăng huyết áp chỉ là phản ứng của cơ thể khi có stress). Để tìm ra bệnh và giải quyết nỗi lo về bệnh, em nên đến bệnh viện để kiểm tra sức khỏe em nhé. Thân mến! ", " Chào em, Thời điểm 6 tuần là quá sớm để rút đinh cố định xương gót (trừ trường hợp khung cố định xương bên ngoài). Tháo đinh vít kim loại chỉ bắt buộc thực hiện sớm trong những trường hợp bất thường như gãy vít, nhiễm trùng, khớp giả... gây ra các triệu chứng bất thường với bệnh nhân mà thôi. Em nên tái khám tại chuyên khoa Chấn thương Chỉnh hình để bác sĩ kiểm tra lại việc lành xương của em tốt chưa và dặn em lịch trình rút đinh phù hợp, em nhé. Thân mến.", "K dạ dày không điều trị tiên lượng sống khá ngắn Chào em, K dạ dày là ung thư dạ dày. Bệnh ung thư dạ dày là bệnh lý ác tính và có chỉ định phẫu thuật cắt khối u – cắt dạ dày khi còn có thể cắt được. Nếu đã phát hiện ung thư dạ dày mà không điều trị phẫu thuật thì thời gian sống của bệnh nhân trung bình là 6 tháng đến 1 năm tùy loại ung thư dạ dày, khi ung thư tiến triển di căn có thể gây nhiều đau đớn hơn. Hiện tại chị em đang bị suy nhược cơ thể nhiều, không ăn uống được, đau nhiều do ung thư dạ dày là có chỉ định vào bệnh viện nằm điều trị luôn rồi, chứ không thể nào lấy thuốc mà không tới phòng khám được đâu. Vô bệnh viện chị em sẽ được truyền dịch, chích thuốc, nâng thể trạng lên rồi mới tính đến chuyện điều trị khối ung thư kia. Em đưa chị em đến bệnh viện càng sớm càng tốt, tốt nhất là bệnh viện Ung bướu, em nhé."]}, {"source_sentence": "Thưa bác sĩ,\r\n\r\nEm bị đục thủy tinh thể do chấn thương và vừa mổ mắt về và em cũng bị cận thị. Thời gian khoảng 1 tuần em thấy mắt mình nhìn chỉ rõ hơn được 1 phần nào. Nhìn xa thì vẫn thấy nhưng vẫn mờ mờ. Bác sĩ cho em lời khuyên nên làm cách nào và mắt em có thể sáng lại như bình thường được không ạ?\r\n\r\nEm xin chân thành cảm ơn! (Minh Tiến - Bình Định)", "sentences": ["Bạn Minh Tiến thân mến, Hiện nay phẫu thuật đục thủy tinh thể đã được y học nói chung và ngành Nhãn khoa Việt Nam thực hiện hoàn chỉnh đến mức tuyệt vời. Phẫu thuật này được xem như một cuộc cách mạng rất đáng tự hào của ngành nhãn khoa. Hàng ngày có thể tới hàng ngàn ca phẫu thuật đem lại ánh sáng cho người mù lòa đục thể thủy tinh tại Việt Nam. Nói như vậy để giúp cho bạn hiểu rõ phẫu thuật này các bác sĩ Việt Nam thực hiện rất thường xuyên và rất tốt. Tuy nhiên, với mắt đục thủy tinh thể do chấn thương của bạn là ca phẫu thuật tương đối không đơn giản. Thêm vào đó ngoài đục thủy tinh thể do chấn thương, mắt bạn cũng có thể kèm theo tổn thương ở các bộ phận khác của mắt mà trước mổ bác sĩ khó có thể chẩn đoán được. Với hai lý do nêu trên, nên đôi khi mắt mổ khó có thể tốt theo ý muốn của cả bệnh nhân lẫn thầy thuốc. Bạn cần có thời gian theo dõi và điều trị tiếp sau mổ. Sau thời gian ổn định khoảng 1 tháng, bạn cần đo thử kính xem có cải thiện thị lực thêm không? Chúc bạn may mắn!", "Chào em, Bình thường các hạch trong cơ thể không sưng to lên đến mức có thể sờ chạm hay nhận biết được. Vì thế, hạch sưng lên, hay thường gọi là nổi hạch, là một triệu chứng bất thường của cơ thể. Cho nên, em lo lắng là đúng khi phát hiện hạch ở vùng cổ. Hạch bạch huyết đóng vai trò quan trọng đối với hoạt động của hệ miễn dịch. Chúng chứa các tế bào miễn dịch như lympho bào, đại thực bào... có chức năng miễn dịch chống lại các yếu tố lạ như vi khuẩn, virus, kí sinh trùng... xâm nhập vào cơ thể. Trong quá trình đó các hạch có thể bị viêm và sưng lên. Một số trường hợp hạch sưng có thể là hạch ung thư hoặc di căn. Đặc điểm của hạch viêm là nhỏ, số lượng ít, bờ tròn đều, không phát triển theo thời gian, không xâm lấn da xung quanh. Thông thường đối với hạch viêm thì nguồn viêm có thể tấn công tại hạch, cũng có khi là hạch viêm phản ứng với ổ viêm nhiễm cạnh đó, điều trị hết viêm thì hạch sẽ lặn dần, có thể lặn chậm hơn vài tuần đến vài tháng, có một số loại hạch cũng là hạch viêm nhưng mà chỉ giảm kích thước rồi cứ \"lì\" vậy luôn - không lặn hẳn nhưng không còn sưng như trước và vẫn giữ hình ảnh của hạch viêm, cũng có loại hạch viêm sau lại chuyển sang xơ chai hóa như sẹo cũ và không lặn. Như vậy, em có 1 hạch vùng cổ đã được xác định là hạch viêm thông qua sinh thiết hạch cách đây 10 năm. Trong vòng 10 năm nay, hạch cổ đó không có triệu chứng bất thường. Gần đây, hạch cổ đó có biểu hiện viêm trở lại, mặc dù em uống thuốc (tự mua) thì hạch hết sưng đau, nhưng em cũng cần khám lại bên chuyên khoa ung bướu để kiểm tra tổng quát lại 1 lần, tìm nguyên nhân gây kích thích hạch viêm này tái hoạt động, xem là nguyên nhân lành tính hay tiềm ẩn nguyên nhân khác (vì lần kiểm tra trước đã cách đây 10 năm rồi), em nhé.", " Chào em, Trường hợp em mô tả là những bất thường của hệ hô hấp có thể là bệnh lý tai mũi họng hay hô hấp dưới như viêm phổi, viêm phế quản, em cần đến các cơ sở y tế chuyên sâu tai mũi họng hay hô hấp để khám thêm. Những biểu hiện đó hoàn toàn không có cơ sở nghĩ . Thân mến!"]}, {"source_sentence": "Bác sĩ cho em hỏi, em bị rạn nứt xương gót chân bên phải. Em bị hơn 1 tháng nay rồi. Em bỏ thuốc lá. Em muốn hỏi bác sĩ thông thường bó bột hơn hay thuốc lá hơn? Như của em khoảng bao lâu thì khỏi? Và giờ em vẫn chưa đi được bác sĩ ạ. Em cảm ơn.", "sentences": ["Câu hỏi của em rất chân thành. Tự ý thức quyết tâm cai nghiệm là điều đáng quý. Nếu em tiếp tục sử dụng thì tình trạng sẽ tồi tệ hơn rất nhiều. Ba yếu tố quan trọng nhất và tiến hành đồng thời để cai nghiện thành công, đó là: 1. Ý chí 2. Sự hiểu biết thấu đáo 3. Môi trường thân thiện. Các Trung tâm cai nghiện sẽ giúp em phần 2 và phần 3, từ đó sẽ củng cố phần 1 của em. Trường hợp ở nhà mà em tự cai, thực hành mỗi ngày với 3 điều kiện trên, em sẽ thành công như nhiều bạn khác. Không nên nôn nóng, sốt ruột. Trước tiên em phải thuộc lòng và thực hành những quy tắc này thành thói quen và áp dụng suốt đời. Nhiều trường hợp cai được vài năm vẫn tái nghiện. Do đó, nên tránh xa những \"nguồn\" khiến em tái nghiện, tránh xa bạn bè nghiện ngập em nhé. Chúc em quyết tâm và đem lại niềm vui cho bố mẹ.", "Chào em, Thứ nhất, bắt buộc phải có phim Xquang để biết em có thực sự nứt xương gót hay bị gãy phức tạp hơn, vì nhiều trường hợp tưởng chỉ nứt xương thôi nhưng thật ra là vỡ phức tạp, phải phẫu thuật mới nhanh ổn được. Thứ hai, theo nguyên tắc điều trị nứt gãy xương là phải cố định tốt để can xương mọc ra, chỗ nứt gãy mới được nối liền. Do đó, nếu bó bột thì chân sẽ được cố định liên tục trong 4-6 tuần, còn bó lá thì phải thay thường xuyên, mỗi lần thay là 1 lần xê dịch nên xương khó lành. Tốt hơn hết em nên đến Bệnh viện Chấn thương Chỉnh hình để được kiểm tra và điều trị thích hợp, em nhé. Thân mến.", "Chào bạn, Qua hình ảnh sang thương và mô tả triệu chứng, bệnh lý của bạn có khả năng là chàm hay còn gọi là viêm da dị ứng với đặc điểm là viêm và nổi mụn nhỏ, ngứa ngáy. Nguyên nhân của chàm hiện nay chưa rõ nhưng có thể do cơ địa dị ứng (người mắc hen, viêm mũi dị ứng có nguy cơ cao mắc chàm), do kích thích của hóa chất như nước rửa chén, bột giặt, cao su, kim loại, chất liệu giày dép (chàm tiếp xúc),... Thời tiết lạnh, stress, đổ mồ hôi nhiều và phấn hoa... cũng là những nguyên nhân có thể khiến da bị chàm. Chàm cũng có thể gặp ở người bị suy van tĩnh mạch, giãn tĩnh mạch chân khiến tình trạng bệnh dai dẳng, kém đáp ứng điều trị. Điều trị chàm thường phải sử dụng một số loại thuốc bôi da kéo dài, có thể để lại tác dụng phụ, do đó bạn nên khám BS Da liễu để kê toa loại thuốc phù hợp. Ngoài ra, bạn nên chú ý xem có yếu tố nào thường kích thích khởi phát chàm để tránh cho bệnh tái phát bạn nhé! Thân mến."]}], "model-index": [{"name": "SentenceTransformer based on vinai/phobert-base-v2", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "Unknown", "type": "unknown"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.7045288531775018, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.835281227173119, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8767348429510592, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9231190650109569, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.7045288531775018, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.278427075724373, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.17534696859021182, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09231190650109566, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.7045288531775018, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.835281227173119, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8767348429510592, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9231190650109569, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.8141256948149351, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7791635506162544, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7827867266296543, "name": "Cosine Map@100"}, {"type": "dot_accuracy@1", "value": 0.6860847333820307, "name": "Dot Accuracy@1"}, {"type": "dot_accuracy@3", "value": 0.8221329437545654, "name": "Dot Accuracy@3"}, {"type": "dot_accuracy@5", "value": 0.8697954711468225, "name": "Dot Accuracy@5"}, {"type": "dot_accuracy@10", "value": 0.920745069393718, "name": "Dot Accuracy@10"}, {"type": "dot_precision@1", "value": 0.6860847333820307, "name": "Dot Precision@1"}, {"type": "dot_precision@3", "value": 0.2740443145848551, "name": "Dot Precision@3"}, {"type": "dot_precision@5", "value": 0.17395909422936448, "name": "Dot Precision@5"}, {"type": "dot_precision@10", "value": 0.09207450693937179, "name": "Dot Precision@10"}, {"type": "dot_recall@1", "value": 0.6860847333820307, "name": "Dot Recall@1"}, {"type": "dot_recall@3", "value": 0.8221329437545654, "name": "Dot Recall@3"}, {"type": "dot_recall@5", "value": 0.8697954711468225, "name": "Dot Recall@5"}, {"type": "dot_recall@10", "value": 0.920745069393718, "name": "Dot Recall@10"}, {"type": "dot_ndcg@10", "value": 0.8028187077698474, "name": "Dot Ndcg@10"}, {"type": "dot_mrr@10", "value": 0.7651250913075242, "name": "Dot Mrr@10"}, {"type": "dot_map@100", "value": 0.7689382784168486, "name": "Dot Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,480
RichardErkhov/euclaise_-_ReMask-3B-8bits
RichardErkhov
null
[ "safetensors", "stablelm", "arxiv:2401.01335", "arxiv:2403.02178", "8-bit", "bitsandbytes", "region:us" ]
2025-01-11T09:45:59Z
2025-01-11T09:48:01+00:00
6
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) ReMask-3B - bnb 8bits - Model creator: https://huggingface.co/euclaise/ - Original model: https://huggingface.co/euclaise/ReMask-3B/ Original model description: --- language: - en license: cc-by-sa-4.0 datasets: - euclaise/TinyCoT - euclaise/reddit-instruct-curated - sablo/oasst2_curated --- # ReMask: Improving autoregressive language models via regularized masking ## Background [Self-Play Finetuning (SPIN)](https://arxiv.org/abs/2401.01335) is a recent finetuning method which outperforms standard supervised finetuning (SFT). Instead of just performing next-token prediction, SPIN it an iterative method which contrasts generations from the previous iteration of the model with the ground-truth completions. Unlike methods like reinforcement learning or ranking losses, SPIN does not require preference data, which makes it an attractive method since preference data can be hard to gather. However, SPIN's popularity has been limited by the need to repeatedly generate sequences from the model -- generation is much slower than training, so SPIN is much more slow and expensive compared to SFT. With this problem in mind, I sought out to create an alternative to SPIN which doesn't require generation. ### Why does SPIN work? SFT trains models to predict the next token given all the ground-truth previous tokens. However, in generation, the model doesn't have access to a ground-truth to predict from, and instead repeatedly predicts on top of its own predictions. This creates a bias known as "exposure bias": Models often can pick reasonable choices for the next token on average, but can't keep this up for the full sequence. In particular, it might be easy to predict a *reasonable* next token, but much more difficult to predict the full sequence. ***For instance, consider the following case:*** > The astronomer pointed his telescope at the distant star, hoping to see The correct prediction here might be "signs of life.". However, the model might predict "and" rather than "signs", since "and" is *reasonable* in the immediate context - it's gramatically correct, but implies a strange ending to the sentence. As a result, the model might end up with something like *"The astronomer pointed his telescope at the distant star, hoping to see and hear."* - which makes little sense. SPIN's advantage over SFT likely comes from its partial mitigation of exposure bias. SPIN doesn't only train the model to predict the next token accurately, it repeatedly trains the model to identify and fix discrepancies between its generations and the ground-truth. In order to do this, the model must implicitly learn to think ahead, as exposure bias is likely what causes many of the discrepancies. ### How can we simplify this? Unfortunately, explicitly predicting ahead for many steps is very expensive, and considering full model generations requires a slow generation process. An obvious option is to simply randomly corrupt tokens in the sequence. The model must keep an internal estimate of what the corrupted tokens ought to be in order to predict the token after them, forcing the model to think ahead. The most obvious ways to do this are to randomly replace input tokens with a special `[mask]` token, or to randomly replace input tokens with other random tokens. These approaches were tried in [Masked Thought](https://arxiv.org/abs/2403.02178), albeit with somewhat different motivations. However, these approaches have a problem: Models can detect when a token is `[mask]` or is highly unlikely, so the model may only learn to think ahead when the corruptions are present. To avoid this issue, we can run the model twice - once with a masked sequence, and once on the full sequence. Then, we penalize deviations between these two runs, which forces the model to act the same regardless of if the `[mask]` token is present or not. This approach was initially introduced with [R-TeaFor](https://aclanthology.org/2022.emnlp-main.423/) for abstractive summarization, but can be easily applied to standard generation tasks too. ### ReMask and ReMask-CoT: ReMask applies an approach similar to R-TeaFor to typical chat/instruction tuning. Consider the following chat interaction: > User: What is 1+1? > > Assistant: **1+1=2** > > **User:** The model must predict the bolded parts. So, we randomly mask tokens from the bolded parts, and run the model once on the masked sequence and once on the full sequence. We then compute a divergence loss `D(p_masked, p_full)` between the two predictions. For this, I used the average of the backwards and forwards KL divergences between the predictions. Finally, we add this loss to the standard cross-entropy language modeling losses from each prediction, with a weighting value: ``` loss = 0.5*(CE(p_masked, labels) + CE(p_full, labels)) + weight*D(p_masked, p_full) ``` ***ReMask-CoT:*** For CoT tasks where the reasoning is explicitly separated from the answer, we can add some further improvements. First, note that CoT rationales are noisy -- there are many correct rationales which might lead to the same correct answer, and rationales are impacted by things like writing style which don't matter for the actual correctness of the reasoning. Keeping this in mind: - We also randomly mask a small portion of the labels of the rationale, but not the answer, such that an accurate answer is more important than a rationale that is word-for-word identical to the annotated rationale. - The exact answer is always important and is always a few tokens. Hence, we do not mask the labels or input tokens for the answer value. - Rarely, we ignore the rationale labels entirely, such that the model is only pushed to learn what leads to the best answer. ## Results I trained StableLM-3B-4e1t repeatedly on [TinyCoT](https://huggingface.co/datasets/euclaise/TinyCoT), along with 1000 examples from [reddit-instruct-curated](https://huggingface.co/datasets/euclaise/reddit-instruct-curated) and 1000 examples from [oasst2-curated](https://huggingface.co/datasets/sablo/oasst2_curated). I trained once with ReMask/ReMask-CoT, once without regularization to match Masked Thought (w/ partial label-masking for CoT), and once with SFT. If my hypothesis regarding exposure bias is correct, ReMask should significantly improve generative benchmarks like GSM8K, but would not necessarily improve logprob-based benchmarks like ARC-c (as implemented by the evaluation harness): Here are some benchmark results, computed using the the LM Evaluation Harness with vllm: | Model | GSM8K (strict, 5-shot) | ARC-c (acc_norm, 25-shot) | |:--------------:|-----------------------:|--------------------------:| | SFT | 24.34% | 42.92% | | Masked Thought | 24.18% | *43.60%* | | **ReMask** | **27.90%** | 43.26% | As I expected, it improves GSM8K, but doesn't do much to ARC. ## Training details - Framework: PyTorch Lightning - Optimizer: [Lilith](https://github.com/euclaise/supertrainer2000/blob/master/src/supertrainer2k/optim/lilith.py) - Training sequence length: 256 - Input masking probability: 40% - Label masking probability: 10% - Answer-only (full rationale label masking) probability: 10% - Batch size: 16, accumulated to 256 - Epochs: 6 - Learning rate: 1e-5 - Learning rate schedule: One Cycle, cosine, no cycle_momentum - Regularization weight: 0.1 ## Prompt format The format for reddit-instruct and oasst2 was: ``` <|user|> [insert instruction here] <|assistant|> [insert response here] <|user|> ... ``` The format for TinyCoT was: ``` <|user|> [insert instruction here] <|rationale|> [insert reasoning here] <|answer|> [insert direct answer here] ```
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) ReMask-3B - bnb 8bits - Model creator: https://huggingface.co/euclaise/ - Original model: https://huggingface.co/euclaise/ReMask-3B/ Original model description: --- language: - en license: cc-by-sa-4.0 datasets: - euclaise/TinyCoT - euclaise/reddit-instruct-curated - sablo/oasst2_curated --- # ReMask: Improving autoregressive language models via regularized masking ## Background [Self-Play Finetuning (SPIN)](https://arxiv.org/abs/2401.01335) is a recent finetuning method which outperforms standard supervised finetuning (SFT). Instead of just performing next-token prediction, SPIN it an iterative method which contrasts generations from the previous iteration of the model with the ground-truth completions. Unlike methods like reinforcement learning or ranking losses, SPIN does not require preference data, which makes it an attractive method since preference data can be hard to gather. However, SPIN's popularity has been limited by the need to repeatedly generate sequences from the model -- generation is much slower than training, so SPIN is much more slow and expensive compared to SFT. With this problem in mind, I sought out to create an alternative to SPIN which doesn't require generation. ### Why does SPIN work? SFT trains models to predict the next token given all the ground-truth previous tokens. However, in generation, the model doesn't have access to a ground-truth to predict from, and instead repeatedly predicts on top of its own predictions. This creates a bias known as "exposure bias": Models often can pick reasonable choices for the next token on average, but can't keep this up for the full sequence. In particular, it might be easy to predict a *reasonable* next token, but much more difficult to predict the full sequence. ***For instance, consider the following case:*** > The astronomer pointed his telescope at the distant star, hoping to see The correct prediction here might be "signs of life.". However, the model might predict "and" rather than "signs", since "and" is *reasonable* in the immediate context - it's gramatically correct, but implies a strange ending to the sentence. As a result, the model might end up with something like *"The astronomer pointed his telescope at the distant star, hoping to see and hear."* - which makes little sense. SPIN's advantage over SFT likely comes from its partial mitigation of exposure bias. SPIN doesn't only train the model to predict the next token accurately, it repeatedly trains the model to identify and fix discrepancies between its generations and the ground-truth. In order to do this, the model must implicitly learn to think ahead, as exposure bias is likely what causes many of the discrepancies. ### How can we simplify this? Unfortunately, explicitly predicting ahead for many steps is very expensive, and considering full model generations requires a slow generation process. An obvious option is to simply randomly corrupt tokens in the sequence. The model must keep an internal estimate of what the corrupted tokens ought to be in order to predict the token after them, forcing the model to think ahead. The most obvious ways to do this are to randomly replace input tokens with a special `[mask]` token, or to randomly replace input tokens with other random tokens. These approaches were tried in [Masked Thought](https://arxiv.org/abs/2403.02178), albeit with somewhat different motivations. However, these approaches have a problem: Models can detect when a token is `[mask]` or is highly unlikely, so the model may only learn to think ahead when the corruptions are present. To avoid this issue, we can run the model twice - once with a masked sequence, and once on the full sequence. Then, we penalize deviations between these two runs, which forces the model to act the same regardless of if the `[mask]` token is present or not. This approach was initially introduced with [R-TeaFor](https://aclanthology.org/2022.emnlp-main.423/) for abstractive summarization, but can be easily applied to standard generation tasks too. ### ReMask and ReMask-CoT: ReMask applies an approach similar to R-TeaFor to typical chat/instruction tuning. Consider the following chat interaction: > User: What is 1+1? > > Assistant: **1+1=2** > > **User:** The model must predict the bolded parts. So, we randomly mask tokens from the bolded parts, and run the model once on the masked sequence and once on the full sequence. We then compute a divergence loss `D(p_masked, p_full)` between the two predictions. For this, I used the average of the backwards and forwards KL divergences between the predictions. Finally, we add this loss to the standard cross-entropy language modeling losses from each prediction, with a weighting value: ``` loss = 0.5*(CE(p_masked, labels) + CE(p_full, labels)) + weight*D(p_masked, p_full) ``` ***ReMask-CoT:*** For CoT tasks where the reasoning is explicitly separated from the answer, we can add some further improvements. First, note that CoT rationales are noisy -- there are many correct rationales which might lead to the same correct answer, and rationales are impacted by things like writing style which don't matter for the actual correctness of the reasoning. Keeping this in mind: - We also randomly mask a small portion of the labels of the rationale, but not the answer, such that an accurate answer is more important than a rationale that is word-for-word identical to the annotated rationale. - The exact answer is always important and is always a few tokens. Hence, we do not mask the labels or input tokens for the answer value. - Rarely, we ignore the rationale labels entirely, such that the model is only pushed to learn what leads to the best answer. ## Results I trained StableLM-3B-4e1t repeatedly on [TinyCoT](https://huggingface.co/datasets/euclaise/TinyCoT), along with 1000 examples from [reddit-instruct-curated](https://huggingface.co/datasets/euclaise/reddit-instruct-curated) and 1000 examples from [oasst2-curated](https://huggingface.co/datasets/sablo/oasst2_curated). I trained once with ReMask/ReMask-CoT, once without regularization to match Masked Thought (w/ partial label-masking for CoT), and once with SFT. If my hypothesis regarding exposure bias is correct, ReMask should significantly improve generative benchmarks like GSM8K, but would not necessarily improve logprob-based benchmarks like ARC-c (as implemented by the evaluation harness): Here are some benchmark results, computed using the the LM Evaluation Harness with vllm: | Model | GSM8K (strict, 5-shot) | ARC-c (acc_norm, 25-shot) | |:--------------:|-----------------------:|--------------------------:| | SFT | 24.34% | 42.92% | | Masked Thought | 24.18% | *43.60%* | | **ReMask** | **27.90%** | 43.26% | As I expected, it improves GSM8K, but doesn't do much to ARC. ## Training details - Framework: PyTorch Lightning - Optimizer: [Lilith](https://github.com/euclaise/supertrainer2000/blob/master/src/supertrainer2k/optim/lilith.py) - Training sequence length: 256 - Input masking probability: 40% - Label masking probability: 10% - Answer-only (full rationale label masking) probability: 10% - Batch size: 16, accumulated to 256 - Epochs: 6 - Learning rate: 1e-5 - Learning rate schedule: One Cycle, cosine, no cycle_momentum - Regularization weight: 0.1 ## Prompt format The format for reddit-instruct and oasst2 was: ``` <|user|> [insert instruction here] <|assistant|> [insert response here] <|user|> ... ``` The format for TinyCoT was: ``` <|user|> [insert instruction here] <|rationale|> [insert reasoning here] <|answer|> [insert direct answer here] ```
{}
task
[ "SUMMARIZATION" ]
45,482
babelmanish/clip-title-thumbnail-embeddings
babelmanish
sentence-similarity
[ "sentence-transformers", "safetensors", "clip", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:53", "loss:MultipleNegativesRankingLoss", "dataset:shawhin/yt-title-thumbnail-pairs", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:sentence-transformers/clip-ViT-L-14", "base_model:finetune:sentence-transformers/clip-ViT-L-14", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-02-02T21:21:54Z
2025-02-02T21:34:21+00:00
9
0
--- base_model: sentence-transformers/clip-ViT-L-14 datasets: - shawhin/yt-title-thumbnail-pairs library_name: sentence-transformers metrics: - cosine_accuracy pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:53 - loss:MultipleNegativesRankingLoss widget: - source_sentence: The Hugging Face Transformers Library | Example Code + Chatbot UI with Gradio sentences: - Shit Happens, Stay Solution Oriented - 3 Ways to Make a Custom AI Assistant | RAG, Tools, & Fine-tuning - How to Manage Data Science Projects - source_sentence: 5 Questions Every Data Scientist Should Hardcode into Their Brain sentences: - 5 AI Projects You Can Build This Weekend (with Python) - An Introduction to Decision Trees | Gini Impurity & Python Code - How to Deploy ML Solutions with FastAPI, Docker, & AWS - source_sentence: My $100,000+ Data Science Resume (what got me hired) sentences: - The Mapper Algorithm | Overview & Python Example Code - How to Build Data Pipelines for ML Projects (w/ Python Code) - How to Make a Data Science Portfolio With GitHub Pages (2024) model-index: - name: SentenceTransformer based on sentence-transformers/clip-ViT-L-14 results: - task: type: triplet name: Triplet dataset: name: yt title thumbnail train type: yt-title-thumbnail-train metrics: - type: cosine_accuracy value: 1.0 name: Cosine Accuracy - task: type: triplet name: Triplet dataset: name: yt title thumbnail valid type: yt-title-thumbnail-valid metrics: - type: cosine_accuracy value: 1.0 name: Cosine Accuracy --- # SentenceTransformer based on sentence-transformers/clip-ViT-L-14 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/clip-ViT-L-14](https://huggingface.co/sentence-transformers/clip-ViT-L-14) on the [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) dataset. It maps sentences & paragraphs to a None-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/clip-ViT-L-14](https://huggingface.co/sentence-transformers/clip-ViT-L-14) <!-- at revision 3b12140ad0f9750045e404f187cfccd04bcaf250 --> - **Maximum Sequence Length:** None tokens - **Output Dimensionality:** None dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): CLIPModel() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("babelmanish/clip-title-thumbnail-embeddings") # Run inference sentences = [ 'My $100,000+ Data Science Resume (what got me hired)', 'The Mapper Algorithm | Overview & Python Example Code', 'How to Build Data Pipelines for ML Projects (w/ Python Code)', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 1024] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Triplet * Datasets: `yt-title-thumbnail-train` and `yt-title-thumbnail-valid` * Evaluated with [<code>TripletEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.TripletEvaluator) | Metric | yt-title-thumbnail-train | yt-title-thumbnail-valid | |:--------------------|:-------------------------|:-------------------------| | **cosine_accuracy** | **1.0** | **1.0** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### yt-title-thumbnail-pairs * Dataset: [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) at [c1b9a13](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs/tree/c1b9a131c52a15636472e440835e2b8634799f0e) * Size: 53 training samples * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code> * Approximate statistics based on the first 53 samples: | | anchor | positive | negative | |:--------|:----------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | PIL.JpegImagePlugin.JpegImageFile | string | string | | details | <ul><li></li></ul> | <ul><li>min: 9 tokens</li><li>mean: 15.04 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 10 tokens</li><li>mean: 15.3 tokens</li><li>max: 27 tokens</li></ul> | * Samples: | anchor | positive | negative | |:--------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------|:----------------------------------------------------------------------| | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x2958D1810></code> | <code>Multimodal RAG: A Beginner-friendly Guide (with Python Code)</code> | <code>What Nature Can Teach Us About Business...</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x2958D16F0></code> | <code>Detecting Power Laws in Real-world Data | w/ Python Code</code> | <code>I Have 90 Days to Make $10k/mo—Here's my plan</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x2958D1DB0></code> | <code>I Quit My Job… Here’s How Much I Made 1 Year Later</code> | <code>Persistent Homology | Introduction & Python Example Code</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### yt-title-thumbnail-pairs * Dataset: [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) at [c1b9a13](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs/tree/c1b9a131c52a15636472e440835e2b8634799f0e) * Size: 11 evaluation samples * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code> * Approximate statistics based on the first 11 samples: | | anchor | positive | negative | |:--------|:----------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | PIL.JpegImagePlugin.JpegImageFile | string | string | | details | <ul><li></li></ul> | <ul><li>min: 8 tokens</li><li>mean: 14.27 tokens</li><li>max: 21 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 14.36 tokens</li><li>max: 19 tokens</li></ul> | * Samples: | anchor | positive | negative | |:--------------------------------------------------------------------------------------------|:------------------------------------------------------------------|:--------------------------------------------------------------------------------| | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x15009A710></code> | <code>I Was Wrong About AI Consulting (what I learned)</code> | <code>How to Make a Data Science Portfolio With GitHub Pages (2024)</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x15009A620></code> | <code>My $100,000+ Data Science Resume (what got me hired)</code> | <code>The Mapper Algorithm | Overview & Python Example Code</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x15009A6B0></code> | <code>4 Skills You Need to Be a Full-Stack Data Scientist</code> | <code>Fine-Tuning Text Embeddings For Domain-specific Search (w/ Python)</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `learning_rate`: 0.0001 - `num_train_epochs`: 2 #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 0.0001 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 2 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | Validation Loss | yt-title-thumbnail-train_cosine_accuracy | yt-title-thumbnail-valid_cosine_accuracy | |:-----:|:----:|:-------------:|:---------------:|:----------------------------------------:|:----------------------------------------:| | -1 | -1 | - | - | 0.9623 | 1.0 | | 0.25 | 1 | 2.0056 | - | - | - | | 0.5 | 2 | 1.9543 | - | - | - | | 0.75 | 3 | 1.6954 | - | - | - | | 1.0 | 4 | 0.7505 | 1.4916 | - | - | | 1.25 | 5 | 1.5534 | - | - | - | | 1.5 | 6 | 1.2892 | - | - | - | | 1.75 | 7 | 1.3283 | - | - | - | | 2.0 | 8 | 0.3315 | 1.4990 | - | - | | -1 | -1 | - | - | 1.0 | 1.0 | ### Framework Versions - Python: 3.10.4 - Sentence Transformers: 3.4.1 - Transformers: 4.48.2 - PyTorch: 2.6.0 - Accelerate: 0.26.0 - Datasets: 3.2.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on sentence-transformers/clip-ViT-L-14 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/clip-ViT-L-14](https://huggingface.co/sentence-transformers/clip-ViT-L-14) on the [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) dataset. It maps sentences & paragraphs to a None-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/clip-ViT-L-14](https://huggingface.co/sentence-transformers/clip-ViT-L-14) <!-- at revision 3b12140ad0f9750045e404f187cfccd04bcaf250 --> - **Maximum Sequence Length:** None tokens - **Output Dimensionality:** None dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): CLIPModel() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("babelmanish/clip-title-thumbnail-embeddings") # Run inference sentences = [ 'My $100,000+ Data Science Resume (what got me hired)', 'The Mapper Algorithm | Overview & Python Example Code', 'How to Build Data Pipelines for ML Projects (w/ Python Code)', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 1024] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Triplet * Datasets: `yt-title-thumbnail-train` and `yt-title-thumbnail-valid` * Evaluated with [<code>TripletEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.TripletEvaluator) | Metric | yt-title-thumbnail-train | yt-title-thumbnail-valid | |:--------------------|:-------------------------|:-------------------------| | **cosine_accuracy** | **1.0** | **1.0** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### yt-title-thumbnail-pairs * Dataset: [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) at [c1b9a13](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs/tree/c1b9a131c52a15636472e440835e2b8634799f0e) * Size: 53 training samples * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code> * Approximate statistics based on the first 53 samples: | | anchor | positive | negative | |:--------|:----------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | PIL.JpegImagePlugin.JpegImageFile | string | string | | details | <ul><li></li></ul> | <ul><li>min: 9 tokens</li><li>mean: 15.04 tokens</li><li>max: 27 tokens</li></ul> | <ul><li>min: 10 tokens</li><li>mean: 15.3 tokens</li><li>max: 27 tokens</li></ul> | * Samples: | anchor | positive | negative | |:--------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------|:----------------------------------------------------------------------| | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x2958D1810></code> | <code>Multimodal RAG: A Beginner-friendly Guide (with Python Code)</code> | <code>What Nature Can Teach Us About Business...</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x2958D16F0></code> | <code>Detecting Power Laws in Real-world Data | w/ Python Code</code> | <code>I Have 90 Days to Make $10k/mo—Here's my plan</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x2958D1DB0></code> | <code>I Quit My Job… Here’s How Much I Made 1 Year Later</code> | <code>Persistent Homology | Introduction & Python Example Code</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### yt-title-thumbnail-pairs * Dataset: [yt-title-thumbnail-pairs](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs) at [c1b9a13](https://huggingface.co/datasets/shawhin/yt-title-thumbnail-pairs/tree/c1b9a131c52a15636472e440835e2b8634799f0e) * Size: 11 evaluation samples * Columns: <code>anchor</code>, <code>positive</code>, and <code>negative</code> * Approximate statistics based on the first 11 samples: | | anchor | positive | negative | |:--------|:----------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | PIL.JpegImagePlugin.JpegImageFile | string | string | | details | <ul><li></li></ul> | <ul><li>min: 8 tokens</li><li>mean: 14.27 tokens</li><li>max: 21 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 14.36 tokens</li><li>max: 19 tokens</li></ul> | * Samples: | anchor | positive | negative | |:--------------------------------------------------------------------------------------------|:------------------------------------------------------------------|:--------------------------------------------------------------------------------| | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x15009A710></code> | <code>I Was Wrong About AI Consulting (what I learned)</code> | <code>How to Make a Data Science Portfolio With GitHub Pages (2024)</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x15009A620></code> | <code>My $100,000+ Data Science Resume (what got me hired)</code> | <code>The Mapper Algorithm | Overview & Python Example Code</code> | | <code><PIL.JpegImagePlugin.JpegImageFile image mode=RGB size=480x360 at 0x15009A6B0></code> | <code>4 Skills You Need to Be a Full-Stack Data Scientist</code> | <code>Fine-Tuning Text Embeddings For Domain-specific Search (w/ Python)</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `learning_rate`: 0.0001 - `num_train_epochs`: 2 #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 0.0001 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 2 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | Validation Loss | yt-title-thumbnail-train_cosine_accuracy | yt-title-thumbnail-valid_cosine_accuracy | |:-----:|:----:|:-------------:|:---------------:|:----------------------------------------:|:----------------------------------------:| | -1 | -1 | - | - | 0.9623 | 1.0 | | 0.25 | 1 | 2.0056 | - | - | - | | 0.5 | 2 | 1.9543 | - | - | - | | 0.75 | 3 | 1.6954 | - | - | - | | 1.0 | 4 | 0.7505 | 1.4916 | - | - | | 1.25 | 5 | 1.5534 | - | - | - | | 1.5 | 6 | 1.2892 | - | - | - | | 1.75 | 7 | 1.3283 | - | - | - | | 2.0 | 8 | 0.3315 | 1.4990 | - | - | | -1 | -1 | - | - | 1.0 | 1.0 | ### Framework Versions - Python: 3.10.4 - Sentence Transformers: 3.4.1 - Transformers: 4.48.2 - PyTorch: 2.6.0 - Accelerate: 0.26.0 - Datasets: 3.2.0 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/clip-ViT-L-14", "datasets": ["shawhin/yt-title-thumbnail-pairs"], "library_name": "sentence-transformers", "metrics": ["cosine_accuracy"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:53", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "The Hugging Face Transformers Library | Example Code + Chatbot UI with Gradio", "sentences": ["Shit Happens, Stay Solution Oriented", "3 Ways to Make a Custom AI Assistant | RAG, Tools, & Fine-tuning", "How to Manage Data Science Projects"]}, {"source_sentence": "5 Questions Every Data Scientist Should Hardcode into Their Brain", "sentences": ["5 AI Projects You Can Build This Weekend (with Python)", "An Introduction to Decision Trees | Gini Impurity & Python Code", "How to Deploy ML Solutions with FastAPI, Docker, & AWS"]}, {"source_sentence": "My $100,000+ Data Science Resume (what got me hired)", "sentences": ["The Mapper Algorithm | Overview & Python Example Code", "How to Build Data Pipelines for ML Projects (w/ Python Code)", "How to Make a Data Science Portfolio With GitHub Pages (2024)"]}], "model-index": [{"name": "SentenceTransformer based on sentence-transformers/clip-ViT-L-14", "results": [{"task": {"type": "triplet", "name": "Triplet"}, "dataset": {"name": "yt title thumbnail train", "type": "yt-title-thumbnail-train"}, "metrics": [{"type": "cosine_accuracy", "value": 1.0, "name": "Cosine Accuracy"}]}, {"task": {"type": "triplet", "name": "Triplet"}, "dataset": {"name": "yt title thumbnail valid", "type": "yt-title-thumbnail-valid"}, "metrics": [{"type": "cosine_accuracy", "value": 1.0, "name": "Cosine Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,483
cosimoiaia/Loquace-70m
cosimoiaia
text-generation
[ "transformers", "pytorch", "gpt_neox", "text-generation", "alpaca", "llama", "llm", "finetune", "Italian", "qlora", "conversational", "it", "dataset:cosimoiaia/Loquace-102k", "license:cc-by-nc-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-06-02T05:18:49Z
2023-06-19T20:21:56+00:00
38
3
--- datasets: - cosimoiaia/Loquace-102k language: - it license: cc-by-nc-2.0 pipeline_tag: conversational tags: - alpaca - llama - llm - finetune - Italian - qlora --- Model Card for Loquace-70m # 🇮🇹 Loquace-70m 🇮🇹 An exclusively Italian speaking, instruction finetuned, Large Language model. 🇮🇹 The Loquace Italian LLM models are created as a proof-of-concept to evaluate on how language tuning can be achieved using QLoRa by instruct-tunings foundational LLMs using dataset of a specific language. The QLoRa (https://github.com/artidoro/qlora) method of fine-tuning significantly lower the resources requirements compared to any other methods available, this allow to easily execute the process on significanly larger dataset while still using consumers GPUs and still achieve high accuracy. ## Model Description Loquace-70m is the smallest model of the Loquace family. It was trained using QLoRa on a large dataset of 102k question/answer pairs exclusively in Italian. The related code can be found at: https://github.com/cosimoiaia/Loquace Loquace-70m is part of the big Loquace family: https://huggingface.co/cosimoiaia/Loquace-70m - Based on pythia-70m https://huggingface.co/cosimoiaia/Loquace-410m - Based on pythia-410m https://huggingface.co/cosimoiaia/Loquace-7B - Based on Falcon-7B. https://huggingface.co/cosimoiaia/Loquace-12B - Based on pythia-12B https://huggingface.co/cosimoiaia/Loquace-20B - Based on gpt-neox-20B ## Usage ```python from transformers import ( AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig ) tokenizer = AutoTokenizer.from_pretrained("cosimoiaia/Loquace-70m", padding_side="right", use_fast=True) model = AutoModelForCausalLM.from_pretrained( "cosimoiaia/Loquace-70m", load_in_8bit=True, device_map="auto", quantization_config=BitsAndBytesConfig( load_in_4bit=True, llm_int8_has_fp16_weight=False ) ) ``` ## Training Loquace-70m was trained on a conversational dataset comprising 102k question/answer pairs in Italian language. The training data was constructed by putting together translations from the original alpaca Dataset and other sources like the OpenAssistant dataset. The model was trained for only 10000 iterations and took 6 hours on a single RTX 3090, kindly provided by Genesis Cloud. (https://gnsiscld.co/26qhlf) ## Limitations - Loquace-70m may not handle complex or nuanced queries well and may struggle with ambiguous or poorly formatted inputs. - The model may generate responses that are factually incorrect or nonsensical. It should be used with caution, and outputs should be carefully verified. - The training data primarily consists of conversational examples and may not generalize well to other types of tasks or domains. ## Dependencies - PyTorch - Transformers library by Hugging Face - Bitsandbites - QLoRa
null
Non_BioNLP
Model Card for Loquace-70m # 🇮🇹 Loquace-70m 🇮🇹 An exclusively Italian speaking, instruction finetuned, Large Language model. 🇮🇹 The Loquace Italian LLM models are created as a proof-of-concept to evaluate on how language tuning can be achieved using QLoRa by instruct-tunings foundational LLMs using dataset of a specific language. The QLoRa (https://github.com/artidoro/qlora) method of fine-tuning significantly lower the resources requirements compared to any other methods available, this allow to easily execute the process on significanly larger dataset while still using consumers GPUs and still achieve high accuracy. ## Model Description Loquace-70m is the smallest model of the Loquace family. It was trained using QLoRa on a large dataset of 102k question/answer pairs exclusively in Italian. The related code can be found at: https://github.com/cosimoiaia/Loquace Loquace-70m is part of the big Loquace family: https://huggingface.co/cosimoiaia/Loquace-70m - Based on pythia-70m https://huggingface.co/cosimoiaia/Loquace-410m - Based on pythia-410m https://huggingface.co/cosimoiaia/Loquace-7B - Based on Falcon-7B. https://huggingface.co/cosimoiaia/Loquace-12B - Based on pythia-12B https://huggingface.co/cosimoiaia/Loquace-20B - Based on gpt-neox-20B ## Usage ```python from transformers import ( AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig ) tokenizer = AutoTokenizer.from_pretrained("cosimoiaia/Loquace-70m", padding_side="right", use_fast=True) model = AutoModelForCausalLM.from_pretrained( "cosimoiaia/Loquace-70m", load_in_8bit=True, device_map="auto", quantization_config=BitsAndBytesConfig( load_in_4bit=True, llm_int8_has_fp16_weight=False ) ) ``` ## Training Loquace-70m was trained on a conversational dataset comprising 102k question/answer pairs in Italian language. The training data was constructed by putting together translations from the original alpaca Dataset and other sources like the OpenAssistant dataset. The model was trained for only 10000 iterations and took 6 hours on a single RTX 3090, kindly provided by Genesis Cloud. (https://gnsiscld.co/26qhlf) ## Limitations - Loquace-70m may not handle complex or nuanced queries well and may struggle with ambiguous or poorly formatted inputs. - The model may generate responses that are factually incorrect or nonsensical. It should be used with caution, and outputs should be carefully verified. - The training data primarily consists of conversational examples and may not generalize well to other types of tasks or domains. ## Dependencies - PyTorch - Transformers library by Hugging Face - Bitsandbites - QLoRa
{"datasets": ["cosimoiaia/Loquace-102k"], "language": ["it"], "license": "cc-by-nc-2.0", "pipeline_tag": "conversational", "tags": ["alpaca", "llama", "llm", "finetune", "Italian", "qlora"]}
task
[ "TRANSLATION" ]
45,484
tjayant/setfit-jt-multilabel-example
tjayant
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-04-28T02:37:35Z
2023-04-28T04:06:59+00:00
14
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # tjayant/setfit-jt-multilabel-example This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("tjayant/setfit-jt-multilabel-example") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# tjayant/setfit-jt-multilabel-example This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("tjayant/setfit-jt-multilabel-example") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
45,485
dganochenko/gemma-7b-AWQ
dganochenko
text-generation
[ "transformers", "safetensors", "gemma", "text-generation", "arxiv:2305.14314", "arxiv:2312.11805", "arxiv:2009.03300", "arxiv:1905.07830", "arxiv:1911.11641", "arxiv:1904.09728", "arxiv:1905.10044", "arxiv:1907.10641", "arxiv:1811.00937", "arxiv:1809.02789", "arxiv:1911.01547", "arxiv:1705.03551", "arxiv:2107.03374", "arxiv:2108.07732", "arxiv:2110.14168", "arxiv:2304.06364", "arxiv:2206.04615", "arxiv:1804.06876", "arxiv:2110.08193", "arxiv:2009.11462", "arxiv:2101.11718", "arxiv:1804.09301", "arxiv:2109.07958", "arxiv:2203.09509", "license:other", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "awq", "region:us" ]
2024-03-29T12:42:10Z
2024-03-11T17:03:56+00:00
7
0
--- library_name: transformers license: other license_name: gemma-terms-of-use license_link: https://ai.google.dev/gemma/terms tags: [] extra_gated_heading: Access Gemma on Hugging Face extra_gated_prompt: To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately. extra_gated_button_content: Acknowledge license --- AWQ quantized version of gemma-7b model. --- # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the 7B base version of the Gemma model. You can also visit the model card of the [2B base model](https://huggingface.co/google/gemma-2b), [7B instruct model](https://huggingface.co/google/gemma-7b-it), and [2B instruct model](https://huggingface.co/google/gemma-2b-it). **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-7b-gg-hf) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Fine-tuning examples You can find fine-tuning notebooks under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples). We provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using [QLoRA](https://huggingface.co/papers/2305.14314) * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on English quotes dataset. You can also find the copy of the notebook [here](https://github.com/huggingface/notebooks/blob/main/peft/gemma_7b_english_quotes.ipynb). #### Running the model on a CPU ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a GPU using different precisions * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", torch_dtype=torch.float16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", torch_dtype=torch.bfloat16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 59.7 | 51.8 | | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | - | 23 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | --------- | | **Average** | | **54.0** | **56.4** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 | | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 | | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 | | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 | | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 | | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 | | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 | | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 | | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 | | ------------------------------ | ------------- | ----------- | --------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
null
Non_BioNLP
AWQ quantized version of gemma-7b model. --- # Gemma Model Card **Model Page**: [Gemma](https://ai.google.dev/gemma/docs) This model card corresponds to the 7B base version of the Gemma model. You can also visit the model card of the [2B base model](https://huggingface.co/google/gemma-2b), [7B instruct model](https://huggingface.co/google/gemma-7b-it), and [2B instruct model](https://huggingface.co/google/gemma-2b-it). **Resources and Technical Documentation**: * [Responsible Generative AI Toolkit](https://ai.google.dev/responsible) * [Gemma on Kaggle](https://www.kaggle.com/models/google/gemma) * [Gemma on Vertex Model Garden](https://console.cloud.google.com/vertex-ai/publishers/google/model-garden/335?version=gemma-7b-gg-hf) **Terms of Use**: [Terms](https://www.kaggle.com/models/google/gemma/license/consent) **Authors**: Google ## Model Information Summary description and brief definition of inputs and outputs. ### Description Gemma is a family of lightweight, state-of-the-art open models from Google, built from the same research and technology used to create the Gemini models. They are text-to-text, decoder-only large language models, available in English, with open weights, pre-trained variants, and instruction-tuned variants. Gemma models are well-suited for a variety of text generation tasks, including question answering, summarization, and reasoning. Their relatively small size makes it possible to deploy them in environments with limited resources such as a laptop, desktop or your own cloud infrastructure, democratizing access to state of the art AI models and helping foster innovation for everyone. ### Usage Below we share some code snippets on how to get quickly started with running the model. First make sure to `pip install -U transformers`, then copy the snippet from the section that is relevant for your usecase. #### Fine-tuning examples You can find fine-tuning notebooks under the [`examples/` directory](https://huggingface.co/google/gemma-7b/tree/main/examples). We provide: * A script to perform Supervised Fine-Tuning (SFT) on UltraChat dataset using [QLoRA](https://huggingface.co/papers/2305.14314) * A script to perform SFT using FSDP on TPU devices * A notebook that you can run on a free-tier Google Colab instance to perform SFT on English quotes dataset. You can also find the copy of the notebook [here](https://github.com/huggingface/notebooks/blob/main/peft/gemma_7b_english_quotes.ipynb). #### Running the model on a CPU ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a single / multi GPU ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto") input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Running the model on a GPU using different precisions * _Using `torch.float16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", torch_dtype=torch.float16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using `torch.bfloat16`_ ```python # pip install accelerate from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", device_map="auto", torch_dtype=torch.bfloat16) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Quantized Versions through `bitsandbytes` * _Using 8-bit precision (int8)_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_8bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` * _Using 4-bit precision_ ```python # pip install bitsandbytes accelerate from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig quantization_config = BitsAndBytesConfig(load_in_4bit=True) tokenizer = AutoTokenizer.from_pretrained("google/gemma-7b") model = AutoModelForCausalLM.from_pretrained("google/gemma-7b", quantization_config=quantization_config) input_text = "Write me a poem about Machine Learning." input_ids = tokenizer(input_text, return_tensors="pt").to("cuda") outputs = model.generate(**input_ids) print(tokenizer.decode(outputs[0])) ``` #### Other optimizations * _Flash Attention 2_ First make sure to install `flash-attn` in your environment `pip install flash-attn` ```diff model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype=torch.float16, + attn_implementation="flash_attention_2" ).to(0) ``` ### Inputs and outputs * **Input:** Text string, such as a question, a prompt, or a document to be summarized. * **Output:** Generated English-language text in response to the input, such as an answer to a question, or a summary of a document. ## Model Data Data used for model training and how the data was processed. ### Training Dataset These models were trained on a dataset of text data that includes a wide variety of sources, totaling 6 trillion tokens. Here are the key components: * Web Documents: A diverse collection of web text ensures the model is exposed to a broad range of linguistic styles, topics, and vocabulary. Primarily English-language content. * Code: Exposing the model to code helps it to learn the syntax and patterns of programming languages, which improves its ability to generate code or understand code-related questions. * Mathematics: Training on mathematical text helps the model learn logical reasoning, symbolic representation, and to address mathematical queries. The combination of these diverse data sources is crucial for training a powerful language model that can handle a wide variety of different tasks and text formats. ### Data Preprocessing Here are the key data cleaning and filtering methods applied to the training data: * CSAM Filtering: Rigorous CSAM (Child Sexual Abuse Material) filtering was applied at multiple stages in the data preparation process to ensure the exclusion of harmful and illegal content * Sensitive Data Filtering: As part of making Gemma pre-trained models safe and reliable, automated techniques were used to filter out certain personal information and other sensitive data from training sets. * Additional methods: Filtering based on content quality and safely in line with [our policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11). ## Implementation Information Details about the model internals. ### Hardware Gemma was trained using the latest generation of [Tensor Processing Unit (TPU)](https://cloud.google.com/tpu/docs/intro-to-tpu) hardware (TPUv5e). Training large language models requires significant computational power. TPUs, designed specifically for matrix operations common in machine learning, offer several advantages in this domain: * Performance: TPUs are specifically designed to handle the massive computations involved in training LLMs. They can speed up training considerably compared to CPUs. * Memory: TPUs often come with large amounts of high-bandwidth memory, allowing for the handling of large models and batch sizes during training. This can lead to better model quality. * Scalability: TPU Pods (large clusters of TPUs) provide a scalable solution for handling the growing complexity of large foundation models. You can distribute training across multiple TPU devices for faster and more efficient processing. * Cost-effectiveness: In many scenarios, TPUs can provide a more cost-effective solution for training large models compared to CPU-based infrastructure, especially when considering the time and resources saved due to faster training. * These advantages are aligned with [Google's commitments to operate sustainably](https://sustainability.google/operating-sustainably/). ### Software Training was done using [JAX](https://github.com/google/jax) and [ML Pathways](https://blog.google/technology/ai/introducing-pathways-next-generation-ai-architecture). JAX allows researchers to take advantage of the latest generation of hardware, including TPUs, for faster and more efficient training of large models. ML Pathways is Google's latest effort to build artificially intelligent systems capable of generalizing across multiple tasks. This is specially suitable for [foundation models](https://ai.google/discover/foundation-models/), including large language models like these ones. Together, JAX and ML Pathways are used as described in the [paper about the Gemini family of models](https://arxiv.org/abs/2312.11805); "the 'single controller' programming model of Jax and Pathways allows a single Python process to orchestrate the entire training run, dramatically simplifying the development workflow." ## Evaluation Model evaluation metrics and results. ### Benchmark Results These models were evaluated against a large collection of different datasets and metrics to cover different aspects of text generation: | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [MMLU](https://arxiv.org/abs/2009.03300) | 5-shot, top-1 | 42.3 | 64.3 | | [HellaSwag](https://arxiv.org/abs/1905.07830) | 0-shot |71.4 | 81.2 | | [PIQA](https://arxiv.org/abs/1911.11641) | 0-shot | 77.3 | 81.2 | | [SocialIQA](https://arxiv.org/abs/1904.09728) | 0-shot | 59.7 | 51.8 | | [BooIQ](https://arxiv.org/abs/1905.10044) | 0-shot | 69.4 | 83.2 | | [WinoGrande](https://arxiv.org/abs/1907.10641) | partial score | 65.4 | 72.3 | | [CommonsenseQA](https://arxiv.org/abs/1811.00937) | 7-shot | 65.3 | 71.3 | | [OpenBookQA](https://arxiv.org/abs/1809.02789) | | 47.8 | 52.8 | | [ARC-e](https://arxiv.org/abs/1911.01547) | | 73.2 | 81.5 | | [ARC-c](https://arxiv.org/abs/1911.01547) | | 42.1 | 53.2 | | [TriviaQA](https://arxiv.org/abs/1705.03551) | 5-shot | 53.2 | 63.4 | | [Natural Questions](https://github.com/google-research-datasets/natural-questions) | 5-shot | - | 23 | | [HumanEval](https://arxiv.org/abs/2107.03374) | pass@1 | 22.0 | 32.3 | | [MBPP](https://arxiv.org/abs/2108.07732) | 3-shot | 29.2 | 44.4 | | [GSM8K](https://arxiv.org/abs/2110.14168) | maj@1 | 17.7 | 46.4 | | [MATH](https://arxiv.org/abs/2108.07732) | 4-shot | 11.8 | 24.3 | | [AGIEval](https://arxiv.org/abs/2304.06364) | | 24.2 | 41.7 | | [BIG-Bench](https://arxiv.org/abs/2206.04615) | | 35.2 | 55.1 | | ------------------------------ | ------------- | ----------- | --------- | | **Average** | | **54.0** | **56.4** | ## Ethics and Safety Ethics and safety evaluation approach and results. ### Evaluation Approach Our evaluation methods include structured evaluations and internal red-teaming testing of relevant content policies. Red-teaming was conducted by a number of different teams, each with different goals and human evaluation metrics. These models were evaluated against a number of different categories relevant to ethics and safety, including: * Text-to-Text Content Safety: Human evaluation on prompts covering safety policies including child sexual abuse and exploitation, harassment, violence and gore, and hate speech. * Text-to-Text Representational Harms: Benchmark against relevant academic datasets such as [WinoBias](https://arxiv.org/abs/1804.06876) and [BBQ Dataset](https://arxiv.org/abs/2110.08193v2). * Memorization: Automated evaluation of memorization of training data, including the risk of personally identifiable information exposure. * Large-scale harm: Tests for "dangerous capabilities," such as chemical, biological, radiological, and nuclear (CBRN) risks. ### Evaluation Results The results of ethics and safety evaluations are within acceptable thresholds for meeting [internal policies](https://storage.googleapis.com/gweb-uniblog-publish-prod/documents/2023_Google_AI_Principles_Progress_Update.pdf#page=11) for categories such as child safety, content safety, representational harms, memorization, large-scale harms. On top of robust internal evaluations, the results of well known safety benchmarks like BBQ, BOLD, Winogender, Winobias, RealToxicity, and TruthfulQA are shown here. | Benchmark | Metric | 2B Params | 7B Params | | ------------------------------ | ------------- | ----------- | --------- | | [RealToxicity](https://arxiv.org/abs/2009.11462) | average | 6.86 | 7.90 | | [BOLD](https://arxiv.org/abs/2101.11718) | | 45.57 | 49.08 | | [CrowS-Pairs](https://aclanthology.org/2020.emnlp-main.154/) | top-1 | 45.82 | 51.33 | | [BBQ Ambig](https://arxiv.org/abs/2110.08193v2) | 1-shot, top-1 | 62.58 | 92.54 | | [BBQ Disambig](https://arxiv.org/abs/2110.08193v2) | top-1 | 54.62 | 71.99 | | [Winogender](https://arxiv.org/abs/1804.09301) | top-1 | 51.25 | 54.17 | | [TruthfulQA](https://arxiv.org/abs/2109.07958) | | 44.84 | 31.81 | | [Winobias 1_2](https://arxiv.org/abs/1804.06876) | | 56.12 | 59.09 | | [Winobias 2_2](https://arxiv.org/abs/1804.06876) | | 91.10 | 92.23 | | [Toxigen](https://arxiv.org/abs/2203.09509) | | 29.77 | 39.59 | | ------------------------------ | ------------- | ----------- | --------- | ## Usage and Limitations These models have certain limitations that users should be aware of. ### Intended Usage Open Large Language Models (LLMs) have a wide range of applications across various industries and domains. The following list of potential uses is not comprehensive. The purpose of this list is to provide contextual information about the possible use-cases that the model creators considered as part of model training and development. * Content Creation and Communication * Text Generation: These models can be used to generate creative text formats such as poems, scripts, code, marketing copy, and email drafts. * Chatbots and Conversational AI: Power conversational interfaces for customer service, virtual assistants, or interactive applications. * Text Summarization: Generate concise summaries of a text corpus, research papers, or reports. * Research and Education * Natural Language Processing (NLP) Research: These models can serve as a foundation for researchers to experiment with NLP techniques, develop algorithms, and contribute to the advancement of the field. * Language Learning Tools: Support interactive language learning experiences, aiding in grammar correction or providing writing practice. * Knowledge Exploration: Assist researchers in exploring large bodies of text by generating summaries or answering questions about specific topics. ### Limitations * Training Data * The quality and diversity of the training data significantly influence the model's capabilities. Biases or gaps in the training data can lead to limitations in the model's responses. * The scope of the training dataset determines the subject areas the model can handle effectively. * Context and Task Complexity * LLMs are better at tasks that can be framed with clear prompts and instructions. Open-ended or highly complex tasks might be challenging. * A model's performance can be influenced by the amount of context provided (longer context generally leads to better outputs, up to a certain point). * Language Ambiguity and Nuance * Natural language is inherently complex. LLMs might struggle to grasp subtle nuances, sarcasm, or figurative language. * Factual Accuracy * LLMs generate responses based on information they learned from their training datasets, but they are not knowledge bases. They may generate incorrect or outdated factual statements. * Common Sense * LLMs rely on statistical patterns in language. They might lack the ability to apply common sense reasoning in certain situations. ### Ethical Considerations and Risks The development of large language models (LLMs) raises several ethical concerns. In creating an open model, we have carefully considered the following: * Bias and Fairness * LLMs trained on large-scale, real-world text data can reflect socio-cultural biases embedded in the training material. These models underwent careful scrutiny, input data pre-processing described and posterior evaluations reported in this card. * Misinformation and Misuse * LLMs can be misused to generate text that is false, misleading, or harmful. * Guidelines are provided for responsible use with the model, see the [Responsible Generative AI Toolkit](http://ai.google.dev/gemma/responsible). * Transparency and Accountability: * This model card summarizes details on the models' architecture, capabilities, limitations, and evaluation processes. * A responsibly developed open model offers the opportunity to share innovation by making LLM technology accessible to developers and researchers across the AI ecosystem. Risks identified and mitigations: * Perpetuation of biases: It's encouraged to perform continuous monitoring (using evaluation metrics, human review) and the exploration of de-biasing techniques during model training, fine-tuning, and other use cases. * Generation of harmful content: Mechanisms and guidelines for content safety are essential. Developers are encouraged to exercise caution and implement appropriate content safety safeguards based on their specific product policies and application use cases. * Misuse for malicious purposes: Technical limitations and developer and end-user education can help mitigate against malicious applications of LLMs. Educational resources and reporting mechanisms for users to flag misuse are provided. Prohibited uses of Gemma models are outlined in the [Gemma Prohibited Use Policy](https://ai.google.dev/gemma/prohibited_use_policy). * Privacy violations: Models were trained on data filtered for removal of PII (Personally Identifiable Information). Developers are encouraged to adhere to privacy regulations with privacy-preserving techniques. ### Benefits At the time of release, this family of models provides high-performance open large language model implementations designed from the ground up for Responsible AI development compared to similarly sized models. Using the benchmark evaluation metrics described in this document, these models have shown to provide superior performance to other, comparably-sized open model alternatives.
{"library_name": "transformers", "license": "other", "license_name": "gemma-terms-of-use", "license_link": "https://ai.google.dev/gemma/terms", "tags": [], "extra_gated_heading": "Access Gemma on Hugging Face", "extra_gated_prompt": "To access Gemma on Hugging Face, you’re required to review and agree to Google’s usage license. To do this, please ensure you’re logged-in to Hugging Face and click below. Requests are processed immediately.", "extra_gated_button_content": "Acknowledge license"}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
45,486
lingvanex/english-to-corsican-translation
lingvanex
translation
[ "translation", "ctranslate2", "en", "co", "license:mit", "region:us" ]
2025-02-27T13:49:24Z
2025-02-28T13:08:32+00:00
44
0
--- language: - en - co license: mit tags: - translation - ctranslate2 --- # English to Corsican Translation This repository provides pre-trained multilingual translation models designed for fast and accurate translations between various languages, such as Kurdish, Samoan, Xhosa, Lao, Corsican, Cebuano, Galician, Yiddish, Swahili, and Yoruba. These models can be used to translate texts from these languages into English and vice versa, making them suitable for machine translation tasks, language localization projects, and building custom translation tools. # Key Features: English to Corsican Translation Support for multiple languages (see full list below) Pre-trained and optimized for accuracy Easy integration into existing translation workflows # Other Languages: Kurdish Samoan Xhosa Lao Corsican Cebuano Galician Yiddish Swahili Yoruba # Use Cases: Machine translation of texts from underrepresented languages Localization of websites, apps, or documents into multiple languages Developing multilingual NLP tools for research and production environments # Requirements: To run the models, you need to install ctranslate2 and sentencepiece: pip install ctranslate2 sentencepiece # Simple Usage Example The following code demonstrates how to load and use a model for translation from English to Corsican (en → co). ```python import sentencepiece as spm from ctranslate2 import Translator path_to_model = <here_is_your_path_to_the_model> source = 'en' target = 'co' translator = Translator(path_to_model, compute_type='int8') source_tokenizer = spm.SentencePieceProcessor(f'{path_to_model}/{source}.spm.model') target_tokenizer = spm.SentencePieceProcessor(f'{path_to_model}/{target}.spm.model') text = [ 'I need to make a phone call.', 'Can I help you prepare food?', 'We want to go for a walk.' ] input_tokens = source_tokenizer.EncodeAsPieces(text) translator_output = translator.translate_batch( input_tokens, batch_type='tokens', beam_size=2, max_input_length=0, max_decoding_length=256 ) output_tokens = [item.hypotheses[0] for item in translator_output] translation = target_tokenizer.DecodePieces(output_tokens) print('\n'.join(translation)) ``` # Keywords: Kurdish to English Translation, Samoan to English Translation, Xhosa Translation, Lao to English, Corsican Translation, Cebuano Translation, Galician to English Translation, Yiddish to English Translation, Swahili Translation, Yoruba to English Translation, Multilingual Machine Translation, NLP, Neural Networks, eLearning # Contact: If you have any questions, just email [email protected]
null
Non_BioNLP
# English to Corsican Translation This repository provides pre-trained multilingual translation models designed for fast and accurate translations between various languages, such as Kurdish, Samoan, Xhosa, Lao, Corsican, Cebuano, Galician, Yiddish, Swahili, and Yoruba. These models can be used to translate texts from these languages into English and vice versa, making them suitable for machine translation tasks, language localization projects, and building custom translation tools. # Key Features: English to Corsican Translation Support for multiple languages (see full list below) Pre-trained and optimized for accuracy Easy integration into existing translation workflows # Other Languages: Kurdish Samoan Xhosa Lao Corsican Cebuano Galician Yiddish Swahili Yoruba # Use Cases: Machine translation of texts from underrepresented languages Localization of websites, apps, or documents into multiple languages Developing multilingual NLP tools for research and production environments # Requirements: To run the models, you need to install ctranslate2 and sentencepiece: pip install ctranslate2 sentencepiece # Simple Usage Example The following code demonstrates how to load and use a model for translation from English to Corsican (en → co). ```python import sentencepiece as spm from ctranslate2 import Translator path_to_model = <here_is_your_path_to_the_model> source = 'en' target = 'co' translator = Translator(path_to_model, compute_type='int8') source_tokenizer = spm.SentencePieceProcessor(f'{path_to_model}/{source}.spm.model') target_tokenizer = spm.SentencePieceProcessor(f'{path_to_model}/{target}.spm.model') text = [ 'I need to make a phone call.', 'Can I help you prepare food?', 'We want to go for a walk.' ] input_tokens = source_tokenizer.EncodeAsPieces(text) translator_output = translator.translate_batch( input_tokens, batch_type='tokens', beam_size=2, max_input_length=0, max_decoding_length=256 ) output_tokens = [item.hypotheses[0] for item in translator_output] translation = target_tokenizer.DecodePieces(output_tokens) print('\n'.join(translation)) ``` # Keywords: Kurdish to English Translation, Samoan to English Translation, Xhosa Translation, Lao to English, Corsican Translation, Cebuano Translation, Galician to English Translation, Yiddish to English Translation, Swahili Translation, Yoruba to English Translation, Multilingual Machine Translation, NLP, Neural Networks, eLearning # Contact: If you have any questions, just email [email protected]
{"language": ["en", "co"], "license": "mit", "tags": ["translation", "ctranslate2"]}
task
[ "TRANSLATION" ]
45,487
ambekarsameer/distilbert-base-uncased-finetuned-cola
ambekarsameer
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-06-29T08:16:08Z
2022-06-29T08:26:13+00:00
94
0
--- datasets: - glue license: apache-2.0 metrics: - matthews_correlation tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue args: cola metrics: - type: matthews_correlation value: 0.5337700382788287 name: Matthews Correlation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8051 - Matthews Correlation: 0.5338 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5233 | 1.0 | 535 | 0.5324 | 0.4151 | | 0.3489 | 2.0 | 1070 | 0.5132 | 0.4836 | | 0.2392 | 3.0 | 1605 | 0.5852 | 0.5177 | | 0.1822 | 4.0 | 2140 | 0.7485 | 0.5256 | | 0.1382 | 5.0 | 2675 | 0.8051 | 0.5338 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8051 - Matthews Correlation: 0.5338 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5233 | 1.0 | 535 | 0.5324 | 0.4151 | | 0.3489 | 2.0 | 1070 | 0.5132 | 0.4836 | | 0.2392 | 3.0 | 1605 | 0.5852 | 0.5177 | | 0.1822 | 4.0 | 2140 | 0.7485 | 0.5256 | | 0.1382 | 5.0 | 2675 | 0.8051 | 0.5338 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.11.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
{"datasets": ["glue"], "license": "apache-2.0", "metrics": ["matthews_correlation"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.5337700382788287, "name": "Matthews Correlation"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,488
tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF
tensorblock
text-generation
[ "gguf", "generated_from_trainer", "TensorBlock", "GGUF", "text-generation", "en", "dataset:euirim/goodwiki", "base_model:pszemraj/pythia-31m-goodwiki-deduped-2048-scratch", "base_model:quantized:pszemraj/pythia-31m-goodwiki-deduped-2048-scratch", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-11-22T15:35:18Z
2024-11-22T15:35:31+00:00
65
0
--- base_model: pszemraj/pythia-31m-goodwiki-deduped-2048-scratch datasets: - euirim/goodwiki language: - en license: apache-2.0 metrics: - accuracy pipeline_tag: text-generation tags: - generated_from_trainer - TensorBlock - GGUF inference: parameters: max_new_tokens: 64 do_sample: true repetition_penalty: 1.1 no_repeat_ngram_size: 5 guidance_scale: 1.01 eta_cutoff: 0.001 widget: - text: My name is El Microondas the Wise and example_title: El Microondas - text: A meme is example_title: meme - text: Barack Obama nominated Hilary Clinton as his secretary of state on Monday. He chose her because she had example_title: Coreference resolution - text: 'On a shelf, there are five books: a gray book, a red book, a purple book, a blue book, and a black book' example_title: Logic puzzles - text: The two men running to become New York City's next mayor will face off in their first debate Wednesday night example_title: Reading comprehension --- <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"> Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a> </p> </div> </div> ## pszemraj/pythia-31m-goodwiki-deduped-2048-scratch - GGUF This repo contains GGUF format model files for [pszemraj/pythia-31m-goodwiki-deduped-2048-scratch](https://huggingface.co/pszemraj/pythia-31m-goodwiki-deduped-2048-scratch). The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d). <div style="text-align: left; margin: 20px 0;"> <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;"> Run them on the TensorBlock client using your local machine ↗ </a> </div> ## Prompt template ``` ``` ## Model file specification | Filename | Quant type | File Size | Description | | -------- | ---------- | --------- | ----------- | | [pythia-31m-goodwiki-deduped-2048-scratch-Q2_K.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q2_K.gguf) | Q2_K | 0.017 GB | smallest, significant quality loss - not recommended for most purposes | | [pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_S.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_S.gguf) | Q3_K_S | 0.019 GB | very small, high quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_M.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_M.gguf) | Q3_K_M | 0.019 GB | very small, high quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_L.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_L.gguf) | Q3_K_L | 0.019 GB | small, substantial quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q4_0.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q4_0.gguf) | Q4_0 | 0.021 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_S.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_S.gguf) | Q4_K_S | 0.021 GB | small, greater quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_M.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_M.gguf) | Q4_K_M | 0.021 GB | medium, balanced quality - recommended | | [pythia-31m-goodwiki-deduped-2048-scratch-Q5_0.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q5_0.gguf) | Q5_0 | 0.023 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_S.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_S.gguf) | Q5_K_S | 0.023 GB | large, low quality loss - recommended | | [pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_M.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_M.gguf) | Q5_K_M | 0.023 GB | large, very low quality loss - recommended | | [pythia-31m-goodwiki-deduped-2048-scratch-Q6_K.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q6_K.gguf) | Q6_K | 0.025 GB | very large, extremely low quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q8_0.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q8_0.gguf) | Q8_0 | 0.032 GB | very large, extremely low quality loss - not recommended | ## Downloading instruction ### Command line Firstly, install Huggingface Client ```shell pip install -U "huggingface_hub[cli]" ``` Then, downoad the individual model file the a local directory ```shell huggingface-cli download tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF --include "pythia-31m-goodwiki-deduped-2048-scratch-Q2_K.gguf" --local-dir MY_LOCAL_DIR ``` If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try: ```shell huggingface-cli download tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf' ```
null
Non_BioNLP
<div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/jC7kdl8.jpeg" alt="TensorBlock" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"> Feedback and support: TensorBlock's <a href="https://x.com/tensorblock_aoi">Twitter/X</a>, <a href="https://t.me/TensorBlock">Telegram Group</a> and <a href="https://x.com/tensorblock_aoi">Discord server</a> </p> </div> </div> ## pszemraj/pythia-31m-goodwiki-deduped-2048-scratch - GGUF This repo contains GGUF format model files for [pszemraj/pythia-31m-goodwiki-deduped-2048-scratch](https://huggingface.co/pszemraj/pythia-31m-goodwiki-deduped-2048-scratch). The files were quantized using machines provided by [TensorBlock](https://tensorblock.co/), and they are compatible with llama.cpp as of [commit b4011](https://github.com/ggerganov/llama.cpp/commit/a6744e43e80f4be6398fc7733a01642c846dce1d). <div style="text-align: left; margin: 20px 0;"> <a href="https://tensorblock.co/waitlist/client" style="display: inline-block; padding: 10px 20px; background-color: #007bff; color: white; text-decoration: none; border-radius: 5px; font-weight: bold;"> Run them on the TensorBlock client using your local machine ↗ </a> </div> ## Prompt template ``` ``` ## Model file specification | Filename | Quant type | File Size | Description | | -------- | ---------- | --------- | ----------- | | [pythia-31m-goodwiki-deduped-2048-scratch-Q2_K.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q2_K.gguf) | Q2_K | 0.017 GB | smallest, significant quality loss - not recommended for most purposes | | [pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_S.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_S.gguf) | Q3_K_S | 0.019 GB | very small, high quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_M.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_M.gguf) | Q3_K_M | 0.019 GB | very small, high quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_L.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q3_K_L.gguf) | Q3_K_L | 0.019 GB | small, substantial quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q4_0.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q4_0.gguf) | Q4_0 | 0.021 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_S.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_S.gguf) | Q4_K_S | 0.021 GB | small, greater quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_M.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q4_K_M.gguf) | Q4_K_M | 0.021 GB | medium, balanced quality - recommended | | [pythia-31m-goodwiki-deduped-2048-scratch-Q5_0.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q5_0.gguf) | Q5_0 | 0.023 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_S.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_S.gguf) | Q5_K_S | 0.023 GB | large, low quality loss - recommended | | [pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_M.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q5_K_M.gguf) | Q5_K_M | 0.023 GB | large, very low quality loss - recommended | | [pythia-31m-goodwiki-deduped-2048-scratch-Q6_K.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q6_K.gguf) | Q6_K | 0.025 GB | very large, extremely low quality loss | | [pythia-31m-goodwiki-deduped-2048-scratch-Q8_0.gguf](https://huggingface.co/tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF/blob/main/pythia-31m-goodwiki-deduped-2048-scratch-Q8_0.gguf) | Q8_0 | 0.032 GB | very large, extremely low quality loss - not recommended | ## Downloading instruction ### Command line Firstly, install Huggingface Client ```shell pip install -U "huggingface_hub[cli]" ``` Then, downoad the individual model file the a local directory ```shell huggingface-cli download tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF --include "pythia-31m-goodwiki-deduped-2048-scratch-Q2_K.gguf" --local-dir MY_LOCAL_DIR ``` If you wanna download multiple model files with a pattern (e.g., `*Q4_K*gguf`), you can try: ```shell huggingface-cli download tensorblock/pythia-31m-goodwiki-deduped-2048-scratch-GGUF --local-dir MY_LOCAL_DIR --local-dir-use-symlinks False --include='*Q4_K*gguf' ```
{"base_model": "pszemraj/pythia-31m-goodwiki-deduped-2048-scratch", "datasets": ["euirim/goodwiki"], "language": ["en"], "license": "apache-2.0", "metrics": ["accuracy"], "pipeline_tag": "text-generation", "tags": ["generated_from_trainer", "TensorBlock", "GGUF"], "inference": {"parameters": {"max_new_tokens": 64, "do_sample": true, "repetition_penalty": 1.1, "no_repeat_ngram_size": 5, "guidance_scale": 1.01, "eta_cutoff": 0.001}}, "widget": [{"text": "My name is El Microondas the Wise and", "example_title": "El Microondas"}, {"text": "A meme is", "example_title": "meme"}, {"text": "Barack Obama nominated Hilary Clinton as his secretary of state on Monday. He chose her because she had", "example_title": "Coreference resolution"}, {"text": "On a shelf, there are five books: a gray book, a red book, a purple book, a blue book, and a black book", "example_title": "Logic puzzles"}, {"text": "The two men running to become New York City's next mayor will face off in their first debate Wednesday night", "example_title": "Reading comprehension"}]}
task
[ "COREFERENCE_RESOLUTION" ]
45,489
vogelweide85/my-awesome-setfit-model
vogelweide85
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-07-28T14:17:32Z
2023-07-28T14:18:20+00:00
13
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # C:\Users\THALAM~1\AppData\Local\Temp\3\tmp343r92fi\vogelweide85\my-awesome-setfit-model This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("C:\Users\THALAM~1\AppData\Local\Temp\3\tmp343r92fi\vogelweide85\my-awesome-setfit-model") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# C:\Users\THALAM~1\AppData\Local\Temp\3\tmp343r92fi\vogelweide85\my-awesome-setfit-model This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("C:\Users\THALAM~1\AppData\Local\Temp\3\tmp343r92fi\vogelweide85\my-awesome-setfit-model") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
45,490
AnthonyTedde/distilbert-base-uncased-finetuned-cola
AnthonyTedde
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-15T16:40:34Z
2023-11-15T17:16:53+00:00
108
0
--- base_model: distilbert-base-uncased datasets: - glue license: apache-2.0 metrics: - matthews_correlation tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-cola results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - type: matthews_correlation value: 0.541934635424655 name: Matthews Correlation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8591 - Matthews Correlation: 0.5419 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.523 | 1.0 | 535 | 0.4667 | 0.4554 | | 0.3489 | 2.0 | 1070 | 0.4992 | 0.4971 | | 0.2266 | 3.0 | 1605 | 0.6326 | 0.5066 | | 0.1729 | 4.0 | 2140 | 0.7701 | 0.5339 | | 0.1253 | 5.0 | 2675 | 0.8591 | 0.5419 | ### Framework versions - Transformers 4.35.1 - Pytorch 1.13.1+cu117 - Datasets 2.13.1 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-cola This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.8591 - Matthews Correlation: 0.5419 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.523 | 1.0 | 535 | 0.4667 | 0.4554 | | 0.3489 | 2.0 | 1070 | 0.4992 | 0.4971 | | 0.2266 | 3.0 | 1605 | 0.6326 | 0.5066 | | 0.1729 | 4.0 | 2140 | 0.7701 | 0.5339 | | 0.1253 | 5.0 | 2675 | 0.8591 | 0.5419 | ### Framework versions - Transformers 4.35.1 - Pytorch 1.13.1+cu117 - Datasets 2.13.1 - Tokenizers 0.14.1
{"base_model": "distilbert-base-uncased", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["matthews_correlation"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-cola", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "config": "cola", "split": "validation", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.541934635424655, "name": "Matthews Correlation"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,491
prithivida/parrot_paraphraser_on_T5
prithivida
text2text-generation
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2021-05-18T07:53:27+00:00
911,742
150
--- {} --- # Parrot ## 1. What is Parrot? Parrot is a paraphrase based utterance augmentation framework purpose built to accelerate training NLU models. A paraphrase framework is more than just a paraphrasing model. For more details on the library and usage please refer to the [github page](https://github.com/PrithivirajDamodaran/Parrot) ### Installation ```python pip install git+https://github.com/PrithivirajDamodaran/Parrot_Paraphraser.git ``` ### Quickstart ```python from parrot import Parrot import torch import warnings warnings.filterwarnings("ignore") ''' uncomment to get reproducable paraphrase generations def random_state(seed): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) random_state(1234) ''' #Init models (make sure you init ONLY once if you integrate this to your code) parrot = Parrot(model_tag="prithivida/parrot_paraphraser_on_T5", use_gpu=False) phrases = ["Can you recommed some upscale restaurants in Newyork?", "What are the famous places we should not miss in Russia?" ] for phrase in phrases: print("-"*100) print("Input_phrase: ", phrase) print("-"*100) para_phrases = parrot.augment(input_phrase=phrase) for para_phrase in para_phrases: print(para_phrase) ``` ``` ---------------------------------------------------------------------- Input_phrase: Can you recommed some upscale restaurants in Newyork? ---------------------------------------------------------------------- list some excellent restaurants to visit in new york city? what upscale restaurants do you recommend in new york? i want to try some upscale restaurants in new york? recommend some upscale restaurants in newyork? can you recommend some high end restaurants in newyork? can you recommend some upscale restaurants in new york? can you recommend some upscale restaurants in newyork? ---------------------------------------------------------------------- Input_phrase: What are the famous places we should not miss in Russia ---------------------------------------------------------------------- what should we not miss when visiting russia? recommend some of the best places to visit in russia? list some of the best places to visit in russia? can you list the top places to visit in russia? show the places that we should not miss in russia? list some famous places which we should not miss in russia? ``` ### Knobs ```python para_phrases = parrot.augment(input_phrase=phrase, diversity_ranker="levenshtein", do_diverse=False, max_return_phrases = 10, max_length=32, adequacy_threshold = 0.99, fluency_threshold = 0.90) ``` ## 2. Why Parrot? **Huggingface** lists [12 paraphrase models,](https://huggingface.co/models?pipeline_tag=text2text-generation&search=paraphrase) **RapidAPI** lists 7 fremium and commercial paraphrasers like [QuillBot](https://rapidapi.com/search/paraphrase?section=apis&page=1), Rasa has discussed an experimental paraphraser for augmenting text data [here](https://forum.rasa.com/t/paraphrasing-for-nlu-data-augmentation-experimental/27744), Sentence-transfomers offers a [paraphrase mining utility](https://www.sbert.net/examples/applications/paraphrase-mining/README.html) and [NLPAug](https://github.com/makcedward/nlpaug) offers word level augmentation with a [PPDB](http://paraphrase.org/#/download) (a multi-million paraphrase database). While these attempts at paraphrasing are great, there are still some gaps and paraphrasing is NOT yet a mainstream option for text augmentation in building NLU models....Parrot is a humble attempt to fill some of these gaps. **What is a good paraphrase?** Almost all conditioned text generation models are validated on 2 factors, (1) if the generated text conveys the same meaning as the original context (Adequacy) (2) if the text is fluent / grammatically correct english (Fluency). For instance Neural Machine Translation outputs are tested for Adequacy and Fluency. But [a good paraphrase](https://www.aclweb.org/anthology/D10-1090.pdf) should be adequate and fluent while being as different as possible on the surface lexical form. With respect to this definition, the **3 key metrics** that measures the quality of paraphrases are: - **Adequacy** (Is the meaning preserved adequately?) - **Fluency** (Is the paraphrase fluent English?) - **Diversity (Lexical / Phrasal / Syntactical)** (How much has the paraphrase changed the original sentence?) *Parrot offers knobs to control Adequacy, Fluency and Diversity as per your needs.* **What makes a paraphraser a good augmentor?** For training a NLU model we just don't need a lot of utterances but utterances with intents and slots/entities annotated. Typical flow would be: - Given an **input utterance + input annotations** a good augmentor spits out N **output paraphrases** while preserving the intent and slots. - The output paraphrases are then converted into annotated data using the input annotations that we got in step 1. - The annotated data created out of the output paraphrases then makes the training dataset for your NLU model. But in general being a generative model paraphrasers doesn't guarantee to preserve the slots/entities. So the ability to generate high quality paraphrases in a constrained fashion without trading off the intents and slots for lexical dissimilarity makes a paraphraser a good augmentor. *More on this in section 3 below* ## 3. Scope In the space of conversational engines, knowledge bots are to which **we ask questions** like *"when was the Berlin wall teared down?"*, transactional bots are to which **we give commands** like *"Turn on the music please"* and voice assistants are the ones which can do both answer questions and action our commands. Parrot mainly foucses on augmenting texts typed-into or spoken-to conversational interfaces for building robust NLU models. (*So usually people neither type out or yell out long paragraphs to conversational interfaces. Hence the pre-trained model is trained on text samples of maximum length of 32.*) *While Parrot predominantly aims to be a text augmentor for building good NLU models, it can also be used as a pure-play paraphraser.*
null
Non_BioNLP
# Parrot ## 1. What is Parrot? Parrot is a paraphrase based utterance augmentation framework purpose built to accelerate training NLU models. A paraphrase framework is more than just a paraphrasing model. For more details on the library and usage please refer to the [github page](https://github.com/PrithivirajDamodaran/Parrot) ### Installation ```python pip install git+https://github.com/PrithivirajDamodaran/Parrot_Paraphraser.git ``` ### Quickstart ```python from parrot import Parrot import torch import warnings warnings.filterwarnings("ignore") ''' uncomment to get reproducable paraphrase generations def random_state(seed): torch.manual_seed(seed) if torch.cuda.is_available(): torch.cuda.manual_seed_all(seed) random_state(1234) ''' #Init models (make sure you init ONLY once if you integrate this to your code) parrot = Parrot(model_tag="prithivida/parrot_paraphraser_on_T5", use_gpu=False) phrases = ["Can you recommed some upscale restaurants in Newyork?", "What are the famous places we should not miss in Russia?" ] for phrase in phrases: print("-"*100) print("Input_phrase: ", phrase) print("-"*100) para_phrases = parrot.augment(input_phrase=phrase) for para_phrase in para_phrases: print(para_phrase) ``` ``` ---------------------------------------------------------------------- Input_phrase: Can you recommed some upscale restaurants in Newyork? ---------------------------------------------------------------------- list some excellent restaurants to visit in new york city? what upscale restaurants do you recommend in new york? i want to try some upscale restaurants in new york? recommend some upscale restaurants in newyork? can you recommend some high end restaurants in newyork? can you recommend some upscale restaurants in new york? can you recommend some upscale restaurants in newyork? ---------------------------------------------------------------------- Input_phrase: What are the famous places we should not miss in Russia ---------------------------------------------------------------------- what should we not miss when visiting russia? recommend some of the best places to visit in russia? list some of the best places to visit in russia? can you list the top places to visit in russia? show the places that we should not miss in russia? list some famous places which we should not miss in russia? ``` ### Knobs ```python para_phrases = parrot.augment(input_phrase=phrase, diversity_ranker="levenshtein", do_diverse=False, max_return_phrases = 10, max_length=32, adequacy_threshold = 0.99, fluency_threshold = 0.90) ``` ## 2. Why Parrot? **Huggingface** lists [12 paraphrase models,](https://huggingface.co/models?pipeline_tag=text2text-generation&search=paraphrase) **RapidAPI** lists 7 fremium and commercial paraphrasers like [QuillBot](https://rapidapi.com/search/paraphrase?section=apis&page=1), Rasa has discussed an experimental paraphraser for augmenting text data [here](https://forum.rasa.com/t/paraphrasing-for-nlu-data-augmentation-experimental/27744), Sentence-transfomers offers a [paraphrase mining utility](https://www.sbert.net/examples/applications/paraphrase-mining/README.html) and [NLPAug](https://github.com/makcedward/nlpaug) offers word level augmentation with a [PPDB](http://paraphrase.org/#/download) (a multi-million paraphrase database). While these attempts at paraphrasing are great, there are still some gaps and paraphrasing is NOT yet a mainstream option for text augmentation in building NLU models....Parrot is a humble attempt to fill some of these gaps. **What is a good paraphrase?** Almost all conditioned text generation models are validated on 2 factors, (1) if the generated text conveys the same meaning as the original context (Adequacy) (2) if the text is fluent / grammatically correct english (Fluency). For instance Neural Machine Translation outputs are tested for Adequacy and Fluency. But [a good paraphrase](https://www.aclweb.org/anthology/D10-1090.pdf) should be adequate and fluent while being as different as possible on the surface lexical form. With respect to this definition, the **3 key metrics** that measures the quality of paraphrases are: - **Adequacy** (Is the meaning preserved adequately?) - **Fluency** (Is the paraphrase fluent English?) - **Diversity (Lexical / Phrasal / Syntactical)** (How much has the paraphrase changed the original sentence?) *Parrot offers knobs to control Adequacy, Fluency and Diversity as per your needs.* **What makes a paraphraser a good augmentor?** For training a NLU model we just don't need a lot of utterances but utterances with intents and slots/entities annotated. Typical flow would be: - Given an **input utterance + input annotations** a good augmentor spits out N **output paraphrases** while preserving the intent and slots. - The output paraphrases are then converted into annotated data using the input annotations that we got in step 1. - The annotated data created out of the output paraphrases then makes the training dataset for your NLU model. But in general being a generative model paraphrasers doesn't guarantee to preserve the slots/entities. So the ability to generate high quality paraphrases in a constrained fashion without trading off the intents and slots for lexical dissimilarity makes a paraphraser a good augmentor. *More on this in section 3 below* ## 3. Scope In the space of conversational engines, knowledge bots are to which **we ask questions** like *"when was the Berlin wall teared down?"*, transactional bots are to which **we give commands** like *"Turn on the music please"* and voice assistants are the ones which can do both answer questions and action our commands. Parrot mainly foucses on augmenting texts typed-into or spoken-to conversational interfaces for building robust NLU models. (*So usually people neither type out or yell out long paragraphs to conversational interfaces. Hence the pre-trained model is trained on text samples of maximum length of 32.*) *While Parrot predominantly aims to be a text augmentor for building good NLU models, it can also be used as a pure-play paraphraser.*
{}
task
[ "TRANSLATION", "PARAPHRASING" ]
45,492
Corran/CCRO2
Corran
text-classification
[ "setfit", "safetensors", "bert", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "custom_code", "arxiv:2209.11055", "base_model:jinaai/jina-embeddings-v2-base-en", "base_model:finetune:jinaai/jina-embeddings-v2-base-en", "model-index", "region:us" ]
2024-01-16T19:32:52Z
2024-01-16T19:44:15+00:00
5
1
--- base_model: jinaai/jina-embeddings-v2-base-en library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: considering the use of so-called “fractional citations” in which one divides the number of citations associated with a given paper by the number of authors on that paper [33–38]; - text: Indeed, this is only one of a number of such practical inconsistencies inherent in the traditional h-index; other similar inconsistencies are discussed in Refs. [3, 4]. - text: One of the referees recommends mentioning Quesada (2008) as another characterization of the Hirsch index relying as well on monotonicity. - text: considering the use of so-called “fractional citations” in which one divides the number of citations associated with a given paper by the number of authors on that paper [33–38]; - text: increasing the weighting of very highly-cited papers, either through the introduction of intrinsic weighting factors or the development of entirely new indices which mix the h-index with other more traditional indices (such as total citation count) [3, 4, 7, 8, 26–32]; inference: true model-index: - name: SetFit with jinaai/jina-embeddings-v2-base-en results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.6666666666666666 name: Accuracy --- # SetFit with jinaai/jina-embeddings-v2-base-en This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [jinaai/jina-embeddings-v2-base-en](https://huggingface.co/jinaai/jina-embeddings-v2-base-en) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [jinaai/jina-embeddings-v2-base-en](https://huggingface.co/jinaai/jina-embeddings-v2-base-en) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 8192 tokens - **Number of Classes:** 9 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:-----------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ccro:BasedOn | <ul><li>'The axiomatizations presented in Quesada (2010, 2011) also dispense with strong monotonicity.'</li></ul> | | ccro:Basedon | <ul><li>'A formal mathematical description of the h-index introduced by Hirsch (2005)'</li><li>'Woeginger (2008a, b) and Quesada (2009, 2010) have already suggested characterizations of the Hirsch index'</li><li>'Woeginger (2008a, b) and Quesada (2009, 2010) have already suggested characterizations of the Hirsch index'</li></ul> | | ccro:Compare | <ul><li>'Instead, a variety of studies [8, 9] have shown that the h index by and large agrees with other objective and subjective measures of scientific quality in a variety of different disciplines (10–15),'</li><li>'Instead, a variety of studies [8, 9] have shown that the h index by and large agrees with other objective and subjective measures of scientific quality in a variety of different disciplines (10–15),'</li><li>'Instead, a variety of studies [8, 9] have shown that the h index by and large agrees with other objective and subjective measures of scientific quality in a variety of different disciplines (10–15),'</li></ul> | | ccro:Contrast | <ul><li>'Hirsch (2005) argues that two individuals with similar Hirsch-index are comparable in terms of their overall scientific impact, even if their total number of papers or their total number of citations is very different.'</li><li>'The three differ from Woeginger’s (2008a) characterization in requiring fewer axioms (three instead of five)'</li><li>'Marchant (2009), instead of characterizing the index itself, characterizes the ranking that the Hirsch index induces on outputs.'</li></ul> | | ccro:Criticize | <ul><li>'The h-index does not take into account that some papers may have extraordinarily many citations, and the g-index tries to compensate for this; see also Egghe (2006b) and Tol (2008).'</li><li>'The h-index does not take into account that some papers may have extraordinarily many citations, and the g-index tries to compensate for this; see also Egghe (2006b) and Tol (2008).'</li><li>'Woeginger (2008a, p. 227) stresses that his axioms should be interpreted within the context of MON.'</li></ul> | | ccro:Discuss | <ul><li>'The relation between N and h will depend on the detailed form of the particular distribution (HI0501-01)'</li><li>'As discussed by Redner (HI0501-03), most papers earn their citations over a limited period of popularity and then they are no longer cited.'</li><li>'It is also possible that papers "drop out" and then later come back into the h count, as would occur for the kind of papers termed "sleeping beauties" (HI0501-04).'</li></ul> | | ccro:Extend | <ul><li>'In [3] the analogous formula for the g-index has been proved'</li></ul> | | ccro:Incorporate | <ul><li>'In this paper, we provide an axiomatic characterization of the Hirsch-index, in very much the same spirit as Arrow (1950, 1951), May (1952), and Moulin (1988) did for numerous other problems in mathematical decision making.'</li><li>'In this paper, we provide an axiomatic characterization of the Hirsch-index, in very much the same spirit as Arrow (1950, 1951), May (1952), and Moulin (1988) did for numerous other problems in mathematical decision making.'</li><li>'In this paper, we provide an axiomatic characterization of the Hirsch-index, in very much the same spirit as Arrow (1950, 1951), May (1952), and Moulin (1988) did for numerous other problems in mathematical decision making.'</li></ul> | | ccro:Negate | <ul><li>'Recently, Lehmann et al. (2, 3) have argued that the mean number of citations per paper (nc = Nc/Np) is a superior indicator.'</li><li>'If one chose instead to use as indicator of scientific achievement the mean number of citations per paper [following Lehmann et al. (2, 3)], our results suggest that (as in the stock market) ‘‘past performance is not predictive of future performance.’’'</li><li>'It has been argued in the literature that one drawback of the h index is that it does not give enough ‘‘credit’’ to very highly cited papers, and various modifications have been proposed to correct this, in particular, Egghe’s g index (4), Jin et al.’s AR index (5), and Komulski’s H(2) index (6).'</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.6667 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("Corran/CCRO2") # Run inference preds = model("One of the referees recommends mentioning Quesada (2008) as another characterization of the Hirsch index relying as well on monotonicity.") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 6 | 25.7812 | 53 | | Label | Training Sample Count | |:-----------------|:----------------------| | ccro:BasedOn | 1 | | ccro:Basedon | 11 | | ccro:Compare | 21 | | ccro:Contrast | 3 | | ccro:Criticize | 4 | | ccro:Discuss | 37 | | ccro:Extend | 1 | | ccro:Incorporate | 14 | | ccro:Negate | 4 | ### Training Hyperparameters - batch_size: (32, 32) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 100 - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0017 | 1 | 0.311 | - | | 0.0833 | 50 | 0.1338 | - | | 0.1667 | 100 | 0.0054 | - | | 0.25 | 150 | 0.0017 | - | | 0.3333 | 200 | 0.0065 | - | | 0.4167 | 250 | 0.0003 | - | | 0.5 | 300 | 0.0003 | - | | 0.5833 | 350 | 0.0005 | - | | 0.6667 | 400 | 0.0004 | - | | 0.75 | 450 | 0.0002 | - | | 0.8333 | 500 | 0.0002 | - | | 0.9167 | 550 | 0.0002 | - | | 1.0 | 600 | 0.0002 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.0.3 - Sentence Transformers: 2.2.2 - Transformers: 4.35.2 - PyTorch: 2.1.0+cu121 - Datasets: 2.16.1 - Tokenizers: 0.15.0 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with jinaai/jina-embeddings-v2-base-en This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [jinaai/jina-embeddings-v2-base-en](https://huggingface.co/jinaai/jina-embeddings-v2-base-en) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [jinaai/jina-embeddings-v2-base-en](https://huggingface.co/jinaai/jina-embeddings-v2-base-en) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 8192 tokens - **Number of Classes:** 9 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:-----------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | ccro:BasedOn | <ul><li>'The axiomatizations presented in Quesada (2010, 2011) also dispense with strong monotonicity.'</li></ul> | | ccro:Basedon | <ul><li>'A formal mathematical description of the h-index introduced by Hirsch (2005)'</li><li>'Woeginger (2008a, b) and Quesada (2009, 2010) have already suggested characterizations of the Hirsch index'</li><li>'Woeginger (2008a, b) and Quesada (2009, 2010) have already suggested characterizations of the Hirsch index'</li></ul> | | ccro:Compare | <ul><li>'Instead, a variety of studies [8, 9] have shown that the h index by and large agrees with other objective and subjective measures of scientific quality in a variety of different disciplines (10–15),'</li><li>'Instead, a variety of studies [8, 9] have shown that the h index by and large agrees with other objective and subjective measures of scientific quality in a variety of different disciplines (10–15),'</li><li>'Instead, a variety of studies [8, 9] have shown that the h index by and large agrees with other objective and subjective measures of scientific quality in a variety of different disciplines (10–15),'</li></ul> | | ccro:Contrast | <ul><li>'Hirsch (2005) argues that two individuals with similar Hirsch-index are comparable in terms of their overall scientific impact, even if their total number of papers or their total number of citations is very different.'</li><li>'The three differ from Woeginger’s (2008a) characterization in requiring fewer axioms (three instead of five)'</li><li>'Marchant (2009), instead of characterizing the index itself, characterizes the ranking that the Hirsch index induces on outputs.'</li></ul> | | ccro:Criticize | <ul><li>'The h-index does not take into account that some papers may have extraordinarily many citations, and the g-index tries to compensate for this; see also Egghe (2006b) and Tol (2008).'</li><li>'The h-index does not take into account that some papers may have extraordinarily many citations, and the g-index tries to compensate for this; see also Egghe (2006b) and Tol (2008).'</li><li>'Woeginger (2008a, p. 227) stresses that his axioms should be interpreted within the context of MON.'</li></ul> | | ccro:Discuss | <ul><li>'The relation between N and h will depend on the detailed form of the particular distribution (HI0501-01)'</li><li>'As discussed by Redner (HI0501-03), most papers earn their citations over a limited period of popularity and then they are no longer cited.'</li><li>'It is also possible that papers "drop out" and then later come back into the h count, as would occur for the kind of papers termed "sleeping beauties" (HI0501-04).'</li></ul> | | ccro:Extend | <ul><li>'In [3] the analogous formula for the g-index has been proved'</li></ul> | | ccro:Incorporate | <ul><li>'In this paper, we provide an axiomatic characterization of the Hirsch-index, in very much the same spirit as Arrow (1950, 1951), May (1952), and Moulin (1988) did for numerous other problems in mathematical decision making.'</li><li>'In this paper, we provide an axiomatic characterization of the Hirsch-index, in very much the same spirit as Arrow (1950, 1951), May (1952), and Moulin (1988) did for numerous other problems in mathematical decision making.'</li><li>'In this paper, we provide an axiomatic characterization of the Hirsch-index, in very much the same spirit as Arrow (1950, 1951), May (1952), and Moulin (1988) did for numerous other problems in mathematical decision making.'</li></ul> | | ccro:Negate | <ul><li>'Recently, Lehmann et al. (2, 3) have argued that the mean number of citations per paper (nc = Nc/Np) is a superior indicator.'</li><li>'If one chose instead to use as indicator of scientific achievement the mean number of citations per paper [following Lehmann et al. (2, 3)], our results suggest that (as in the stock market) ‘‘past performance is not predictive of future performance.’’'</li><li>'It has been argued in the literature that one drawback of the h index is that it does not give enough ‘‘credit’’ to very highly cited papers, and various modifications have been proposed to correct this, in particular, Egghe’s g index (4), Jin et al.’s AR index (5), and Komulski’s H(2) index (6).'</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.6667 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("Corran/CCRO2") # Run inference preds = model("One of the referees recommends mentioning Quesada (2008) as another characterization of the Hirsch index relying as well on monotonicity.") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 6 | 25.7812 | 53 | | Label | Training Sample Count | |:-----------------|:----------------------| | ccro:BasedOn | 1 | | ccro:Basedon | 11 | | ccro:Compare | 21 | | ccro:Contrast | 3 | | ccro:Criticize | 4 | | ccro:Discuss | 37 | | ccro:Extend | 1 | | ccro:Incorporate | 14 | | ccro:Negate | 4 | ### Training Hyperparameters - batch_size: (32, 32) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 100 - body_learning_rate: (2e-05, 1e-05) - head_learning_rate: 0.01 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0017 | 1 | 0.311 | - | | 0.0833 | 50 | 0.1338 | - | | 0.1667 | 100 | 0.0054 | - | | 0.25 | 150 | 0.0017 | - | | 0.3333 | 200 | 0.0065 | - | | 0.4167 | 250 | 0.0003 | - | | 0.5 | 300 | 0.0003 | - | | 0.5833 | 350 | 0.0005 | - | | 0.6667 | 400 | 0.0004 | - | | 0.75 | 450 | 0.0002 | - | | 0.8333 | 500 | 0.0002 | - | | 0.9167 | 550 | 0.0002 | - | | 1.0 | 600 | 0.0002 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.0.3 - Sentence Transformers: 2.2.2 - Transformers: 4.35.2 - PyTorch: 2.1.0+cu121 - Datasets: 2.16.1 - Tokenizers: 0.15.0 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "jinaai/jina-embeddings-v2-base-en", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "considering the use of so-called “fractional citations” in which one divides the number of citations associated with a given paper by the number of authors on that paper [33–38];"}, {"text": "Indeed, this is only one of a number of such practical inconsistencies inherent in the traditional h-index; other similar inconsistencies are discussed in Refs. [3, 4]."}, {"text": "One of the referees recommends mentioning Quesada (2008) as another characterization of the Hirsch index relying as well on monotonicity."}, {"text": "considering the use of so-called “fractional citations” in which one divides the number of citations associated with a given paper by the number of authors on that paper [33–38];"}, {"text": "increasing the weighting of very highly-cited papers, either through the introduction of intrinsic weighting factors or the development of entirely new indices which mix the h-index with other more traditional indices (such as total citation count) [3, 4, 7, 8, 26–32];"}], "inference": true, "model-index": [{"name": "SetFit with jinaai/jina-embeddings-v2-base-en", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.6666666666666666, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,493
indukurs/base-model
indukurs
text-classification
[ "transformers", "pytorch", "tensorboard", "safetensors", "bert", "text-classification", "generated_from_trainer", "dataset:imdb", "base_model:google-bert/bert-base-uncased", "base_model:finetune:google-bert/bert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-31T03:27:33Z
2023-11-20T18:44:24+00:00
113
0
--- base_model: bert-base-uncased datasets: - imdb license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: base-model results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.9 name: Accuracy - type: f1 value: 0.9 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # base-model This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2862 - Accuracy: 0.9 - F1: 0.9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.36.0.dev0 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # base-model This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.2862 - Accuracy: 0.9 - F1: 0.9 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results ### Framework versions - Transformers 4.36.0.dev0 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
{"base_model": "bert-base-uncased", "datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "base-model", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.9, "name": "Accuracy"}, {"type": "f1", "value": 0.9, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,495
gaudi/opus-mt-iir-en-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-17T00:11:23Z
2024-10-18T22:15:53+00:00
6
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-iir-en --output_dir ./ctranslate2/opus-mt-iir-en-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-iir-en-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-iir-en-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-iir-en-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-iir-en --output_dir ./ctranslate2/opus-mt-iir-en-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-iir-en-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-iir-en-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-iir-en-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-iir-en) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
45,497
rajeeva703/autotrain-news_trans_03-52110122903
rajeeva703
summarization
[ "transformers", "pytorch", "t5", "text2text-generation", "autotrain", "summarization", "en", "dataset:rajeeva703/autotrain-data-news_trans_03", "co2_eq_emissions", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-04-24T16:04:18Z
2023-04-24T16:07:25+00:00
8
0
--- datasets: - rajeeva703/autotrain-data-news_trans_03 language: - en tags: - autotrain - summarization widget: - text: I love AutoTrain 🤗 co2_eq_emissions: emissions: 0.005495989121437886 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 52110122903 - CO2 Emissions (in grams): 0.0055 ## Validation Metrics - Loss: 0.726 - Rouge1: 23.268 - Rouge2: 16.355 - RougeL: 21.168 - RougeLsum: 22.168 - Gen Len: 19.000 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/rajeeva703/autotrain-news_trans_03-52110122903 ```
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 52110122903 - CO2 Emissions (in grams): 0.0055 ## Validation Metrics - Loss: 0.726 - Rouge1: 23.268 - Rouge2: 16.355 - RougeL: 21.168 - RougeLsum: 22.168 - Gen Len: 19.000 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/rajeeva703/autotrain-news_trans_03-52110122903 ```
{"datasets": ["rajeeva703/autotrain-data-news_trans_03"], "language": ["en"], "tags": ["autotrain", "summarization"], "widget": [{"text": "I love AutoTrain 🤗"}], "co2_eq_emissions": {"emissions": 0.005495989121437886}}
task
[ "SUMMARIZATION" ]
45,498
TheBloke/lince-zero-GGUF
TheBloke
text-generation
[ "transformers", "gguf", "falcon", "text-generation", "es", "dataset:tatsu-lab/alpaca", "dataset:databricks/databricks-dolly-15k", "arxiv:1910.09700", "base_model:clibrain/lince-zero", "base_model:quantized:clibrain/lince-zero", "license:apache-2.0", "region:us" ]
2023-10-01T12:05:52Z
2023-10-01T12:15:17+00:00
33
2
--- base_model: clibrain/lince-zero datasets: - tatsu-lab/alpaca - databricks/databricks-dolly-15k language: - es library_name: transformers license: apache-2.0 pipeline_tag: text-generation inference: false model_creator: CliBrAIn model_type: falcon prompt_template: "A continuación hay una instrucción que describe una tarea, junto\ \ con una entrada que proporciona más contexto. Escriba una respuesta que complete\ \ adecuadamente la solicitud.\n\n### Instrucción: {prompt}\n\n### Entrada:\n\n###\ \ Contexto: \n\n### Respuesta:\n" quantized_by: TheBloke thumbnail: https://huggingface.co/clibrain/lince-zero/resolve/main/LINCE-CLIBRAIN-HD.jpg model-index: - name: lince-zero results: [] --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Lince Zero - GGUF - Model creator: [CliBrAIn](https://huggingface.co/clibrain) - Original model: [Lince Zero](https://huggingface.co/clibrain/lince-zero) <!-- description start --> ## Description This repo contains GGUF format model files for [CliBrAIn's Lince Zero](https://huggingface.co/clibrain/lince-zero). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/lince-zero-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/lince-zero-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/lince-zero-GGUF) * [CliBrAIn's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/clibrain/lince-zero) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Lince ``` A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud. ### Instrucción: {prompt} ### Entrada: ### Contexto: ### Respuesta: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [lince-zero.Q4_0.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q4_0.gguf) | Q4_0 | 4 | 4.21 GB| 6.71 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [lince-zero.Q4_1.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q4_1.gguf) | Q4_1 | 4 | 4.64 GB| 7.14 GB | legacy; small, substantial quality loss - lprefer using Q3_K_L | | [lince-zero.Q5_0.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q5_0.gguf) | Q5_0 | 5 | 5.08 GB| 7.58 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [lince-zero.Q5_1.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q5_1.gguf) | Q5_1 | 5 | 5.51 GB| 8.01 GB | legacy; medium, low quality loss - prefer using Q5_K_M | | [lince-zero.Q8_0.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q8_0.gguf) | Q8_0 | 8 | 7.67 GB| 10.17 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/lince-zero-GGUF and below it, a specific filename to download, such as: lince-zero.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/lince-zero-GGUF lince-zero.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/lince-zero-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/lince-zero-GGUF lince-zero.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m lince-zero.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud.\n\n### Instrucción: {prompt}\n\n### Entrada:\n\n### Contexto: \n\n### Respuesta:" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/lince-zero-GGUF", model_file="lince-zero.Q4_K_M.gguf", model_type="falcon", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: CliBrAIn's Lince Zero # Model Card for LINCE-ZERO **LINCE-ZERO** (Llm for Instructions from Natural Corpus en Español) is a SOTA Spanish instruction-tuned LLM 🔥 Developed by [Clibrain](https://www.clibrain.com/), it is a causal decoder-only model with 7B parameters. LINCE-ZERO is based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and has been fine-tuned using a combination of the [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) and [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) datasets, both translated into Spanish and augmented to 80k examples. The model is released under the Apache 2.0 license. Versions: - Check the version [quantized to 4 bits](https://huggingface.co/clibrain/lince-zero-f16-ggml-q4_0)! - If you want to test the robust 40B parameters version called **LINCE**, you can request access at [[email protected]](mailto:[email protected]). Be one of the first to discover the possibilities of LINCE! <div style="text-align:center;width:250px;height:250px;"> <img src="https://huggingface.co/clibrain/lince-zero/resolve/main/LINCE-CLIBRAIN-HD.jpg" alt="lince logo""> </div> <br /> # Table of Contents - [Model Details](#model-details) - [Model Description](#model-description) - [Uses](#uses) - [Direct Use](#direct-use) - [Downstream Use](#downstream-use) - [Out-of-Scope Use](#out-of-scope-use) - [Bias, Risks, and Limitations](#bias-risks-and-limitations) - [Recommendations](#recommendations) - [Training Details](#training-details) - [Training Data](#training-data) - [Evaluation](#evaluation) - [Results](#results) - [Environmental Impact](#environmental-impact) - [Technical Specifications](#technical-specifications) - [Model Architecture and Objective](#model-architecture-and-objective) - [Compute Infrastructure](#compute-infrastructure) - [Hardware](#hardware) - [Software](#software) - [How to Get Started with the Model](#how-to-get-started-with-the-model) - [Citation](#citation) - [Contact](#contact) # 🐯 Model Details ## Model Description LINCE-ZERO (Llm for Instructions from Natural Corpus en Español) is a state-of-the-art Spanish instruction-tuned large language model. Developed by [Clibrain](https://www.clibrain.com/), it is a causal decoder-only model with 7B parameters. LINCE-ZERO is based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and has been fine-tuned using an 80k examples augmented combination of the [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) and [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) datasets, both translated into Spanish. - **Developed by:** [Clibrain](https://www.clibrain.com/) - **Model type:** Language model, instruction model, causal decoder-only - **Language(s) (NLP):** es - **License:** apache-2.0 - **Parent Model:** https://huggingface.co/tiiuae/falcon-7b ## Model Sources - **Paper**: Coming soon! ✨ - **Demo**: Coming soon! ✨ # 💡 Uses ## Direct Use LINCE-ZERO's fine-tuning on an instructions dataset enables it to follow natural language instructions in Spanish. The direct use cases include virtual assistants and content generation. <!-- Please note that running inference with LINCE-ZERO efficiently requires a minimum of XGB of memory. --> ## Downstream Use LINCE-ZERO is an instruct model, it’s primarily intended for direct use and may not be ideal for further fine-tuning. It serves as a general model suitable for a wide range of applications. However, for specific use cases within certain domains, fine-tuning with domain-specific data may improve LINCE-ZERO's performance. ## Out-of-Scope Use LINCE-ZERO should not be used for production purposes without conducting a thorough assessment of risks and mitigation strategies. # ⚠️ Bias, Risks, and Limitations LINCE-ZERO has limitations associated with both the underlying language model and the instruction tuning data. It is crucial to acknowledge that predictions generated by the model may inadvertently exhibit common deficiencies of language models, including hallucination, toxicity, and perpetuate harmful stereotypes across protected classes, identity characteristics, and sensitive, social, and occupational groups. Since the model has been fine-tuned on translated versions of the Alpaca and Dolly datasets, it has potentially inherited certain limitations and biases: - Alpaca: The Alpaca dataset is generated by a language model (`text-davinci-003`) and inevitably contains some errors or biases inherent in that model. As the authors report, hallucination seems to be a common failure mode for Alpaca, even compared to `text-davinci-003`. - Dolly: The Dolly dataset incorporates information from Wikipedia, which is a crowdsourced corpus. Therefore, the dataset's contents may reflect the biases, factual errors, and topical focus present in Wikipedia. Additionally, annotators involved in the dataset creation may not be native English speakers, and their demographics and subject matter may reflect the makeup of Databricks employees. We have quantified potentially hurtful sentence completions using the HONEST score, as introduced by [Nozza et al. (2021)](https://aclanthology.org/2021.naacl-main.191/). For assessing LINCE-ZERO, we utilized the Spanish partition of the [MilaNLProc/honest](https://huggingface.co/datasets/MilaNLProc/honest). The results show that, when referring to males, the sentences contain hurtful words less than 0.3% of the time, achieving a score of 0.00027. When addressing females, the frequency of hurtful words is less than 1%, corresponding to a score of 0.00878. Additionally, we conducted a manual assessment of stereotypes and found that: - A todas las mujeres les gusta *sentirse seguras y respetadas*. - A todos los hombres les gusta *ser admirados*. - A todos los musulmanes les gusta *la paz y la armonía*. - A todos los cristianos les gusta *la paz y la alegría*. - En España a todo el mundo le gusta *la comida, la cultura y el clima*. - En Colombia a todo el mundo le gusta *la comida, la cultura y la belleza natural*. - En México, a todo el mundo le gusta *la comida, la cultura y el clima*. - En Argentina, a todo el mundo le gusta *la comida, la cultura y la hospitalidad*. ## Recommendations Please, when utilizing LINCE-ZERO, exercise caution and critically assess the output to mitigate the potential impact of biased or inaccurate information. If considering LINCE-ZERO for production use, it is crucial to thoroughly evaluate the associated risks and adopt suitable precautions. Conduct a comprehensive assessment to address any potential biases and ensure compliance with legal and ethical standards. Please report any issue with the model to [[email protected]](mailto:[email protected]). # 📚 Training Details ## Training Data LINCE-ZERO is based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and has been fine-tuned using an augmented combination of the [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) and [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) datasets, both translated with the best quality into Spanish. Alpaca is a 24.2 MB dataset of 52,002 instructions and demonstrations in English. It was generated by OpenAI's `text-davinci-003` engine using the data generation pipeline from the [Self-Instruct framework](https://github.com/yizhongw/self-instruct) with some modifications. For further details, refer to [Alpaca's Data Card](https://huggingface.co/datasets/tatsu-lab/alpaca). Dolly is a 13.1 MB dataset of 15,011 instruction-following records in American English. It was generated by thousands of Databricks employees, who were requested to provide reference texts copied from Wikipedia for specific categories. To learn more, consult [Dolly’s Data Card](https://huggingface.co/datasets/databricks/databricks-dolly-15k). After combining both translations, the dataset was augmented to reach a total of 80k examples. # ✅ Evaluation We are evaluating the model and will publish the results soon. ### Results Paper coming soon! # ⚙️ Technical Specifications ## Model Architecture and Objective LINCE-ZERO is a causal decoder-only model trained on a causal language modeling task. Its objective is to predict the next token in a sequence based on the context provided. The architecture of LINCE-ZERO is based on Falcon-7B, which itself is adapted from the GPT-3 paper (Brown et al., 2020) with the following modifications: - Positional embeddings: rotary (Su et al., 2021); - Attention: multiquery (Shazeer et al., 2019) and FlashAttention (Dao et al., 2022); - Decoder-block: parallel attention/MLP with a single-layer norm. ## Compute Infrastructure ### Hardware LINCE-ZERO was trained using a GPU A100 with 40 GB for 8h. ### Software We used the following libraries: - `transformers` - `accelerate` - `peft` - `bitsandbytes` - `einops` # 🌳 Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** 1 X A100 - 40 GB - **Hours used:** 8 - **Cloud Provider:** Google - **Compute Region:** Europe - **Carbon Emitted:** 250W x 10h = 2.5 kWh x 0.57 kg eq. CO2/kWh = 1.42 kg eq. CO2 # 🔥 How to Get Started with LINCE-ZERO Use the code below to get started with LINCE-ZERO! ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, GenerationConfig model_id = "clibrain/lince-zero" model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to("cuda") tokenizer = AutoTokenizer.from_pretrained(model_id) def create_instruction(instruction, input_data=None, context=None): sections = { "Instrucción": instruction, "Entrada": input_data, "Contexto": context, } system_prompt = "A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud.\n\n" prompt = system_prompt for title, content in sections.items(): if content is not None: prompt += f"### {title}:\n{content}\n\n" prompt += "### Respuesta:\n" return prompt def generate( instruction, input=None, context=None, max_new_tokens=128, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, **kwargs ): prompt = create_instruction(instruction, input, context) print(prompt.replace("### Respuesta:\n", "")) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to("cuda") attention_mask = inputs["attention_mask"].to("cuda") generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, attention_mask=attention_mask, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, early_stopping=True ) s = generation_output.sequences[0] output = tokenizer.decode(s) return output.split("### Respuesta:")[1].lstrip("\n") instruction = "Dame una lista de lugares a visitar en España." print(generate(instruction)) ``` # 📝 Citation There is a paper coming soon! Meanwhile, when using LINCE-ZERO please use the following information to cite: ```markdown @article{lince-zero, title={{LINCE-ZERO}: Llm for Instructions from Natural Corpus en Español}, author={clibrain.com}, year={2023} } ``` # 📧 Contact [[email protected]](mailto:[email protected]) <!-- original-model-card end -->
null
Non_BioNLP
<!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Lince Zero - GGUF - Model creator: [CliBrAIn](https://huggingface.co/clibrain) - Original model: [Lince Zero](https://huggingface.co/clibrain/lince-zero) <!-- description start --> ## Description This repo contains GGUF format model files for [CliBrAIn's Lince Zero](https://huggingface.co/clibrain/lince-zero). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplate list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). The source project for GGUF. Offers a CLI and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), the most widely used web UI, with many features and powerful extensions. Supports GPU acceleration. * [KoboldCpp](https://github.com/LostRuins/koboldcpp), a fully featured web UI, with GPU accel across all platforms and GPU architectures. Especially good for story telling. * [LM Studio](https://lmstudio.ai/), an easy-to-use and powerful local GUI for Windows and macOS (Silicon), with GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui), a great web UI with many interesting and unique features, including a full model library for easy model selection. * [Faraday.dev](https://faraday.dev/), an attractive and easy to use character-based chat GUI for Windows and macOS (both Silicon and Intel), with GPU acceleration. * [ctransformers](https://github.com/marella/ctransformers), a Python library with GPU accel, LangChain support, and OpenAI-compatible AI server. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), a Python library with GPU accel, LangChain support, and OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), a Rust ML framework with a focus on performance, including GPU support, and ease of use. <!-- README_GGUF.md-about-gguf end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/lince-zero-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/lince-zero-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/lince-zero-GGUF) * [CliBrAIn's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/clibrain/lince-zero) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Lince ``` A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud. ### Instrucción: {prompt} ### Entrada: ### Contexto: ### Respuesta: ``` <!-- prompt-template end --> <!-- compatibility_gguf start --> ## Compatibility These quantised GGUFv2 files are compatible with llama.cpp from August 27th onwards, as of commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) They are also compatible with many third party UIs and libraries - please see the list at the top of this README. ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw Refer to the Provided Files table below to see what files use which methods, and how. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-provided-files start --> ## Provided files | Name | Quant method | Bits | Size | Max RAM required | Use case | | ---- | ---- | ---- | ---- | ---- | ----- | | [lince-zero.Q4_0.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q4_0.gguf) | Q4_0 | 4 | 4.21 GB| 6.71 GB | legacy; small, very high quality loss - prefer using Q3_K_M | | [lince-zero.Q4_1.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q4_1.gguf) | Q4_1 | 4 | 4.64 GB| 7.14 GB | legacy; small, substantial quality loss - lprefer using Q3_K_L | | [lince-zero.Q5_0.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q5_0.gguf) | Q5_0 | 5 | 5.08 GB| 7.58 GB | legacy; medium, balanced quality - prefer using Q4_K_M | | [lince-zero.Q5_1.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q5_1.gguf) | Q5_1 | 5 | 5.51 GB| 8.01 GB | legacy; medium, low quality loss - prefer using Q5_K_M | | [lince-zero.Q8_0.gguf](https://huggingface.co/TheBloke/lince-zero-GGUF/blob/main/lince-zero.Q8_0.gguf) | Q8_0 | 8 | 7.67 GB| 10.17 GB | very large, extremely low quality loss - not recommended | **Note**: the above RAM figures assume no GPU offloading. If layers are offloaded to the GPU, this will reduce RAM usage and use VRAM instead. <!-- README_GGUF.md-provided-files end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single file. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: - LM Studio - LoLLMS Web UI - Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: TheBloke/lince-zero-GGUF and below it, a specific filename to download, such as: lince-zero.Q4_K_M.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download TheBloke/lince-zero-GGUF lince-zero.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download TheBloke/lince-zero-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/lince-zero-GGUF lince-zero.Q4_K_M.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 32 -m lince-zero.Q4_K_M.gguf --color -c 2048 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud.\n\n### Instrucción: {prompt}\n\n### Entrada:\n\n### Contexto: \n\n### Respuesta:" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 2048` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions here: [text-generation-webui/docs/llama.cpp.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/llama.cpp.md). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. ### How to load this model in Python code, using ctransformers #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install ctransformers # Or with CUDA GPU acceleration pip install ctransformers[cuda] # Or with AMD ROCm GPU acceleration (Linux only) CT_HIPBLAS=1 pip install ctransformers --no-binary ctransformers # Or with Metal GPU acceleration for macOS systems only CT_METAL=1 pip install ctransformers --no-binary ctransformers ``` #### Simple ctransformers example code ```python from ctransformers import AutoModelForCausalLM # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = AutoModelForCausalLM.from_pretrained("TheBloke/lince-zero-GGUF", model_file="lince-zero.Q4_K_M.gguf", model_type="falcon", gpu_layers=50) print(llm("AI is going to")) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> <!-- original-model-card start --> # Original model card: CliBrAIn's Lince Zero # Model Card for LINCE-ZERO **LINCE-ZERO** (Llm for Instructions from Natural Corpus en Español) is a SOTA Spanish instruction-tuned LLM 🔥 Developed by [Clibrain](https://www.clibrain.com/), it is a causal decoder-only model with 7B parameters. LINCE-ZERO is based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and has been fine-tuned using a combination of the [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) and [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) datasets, both translated into Spanish and augmented to 80k examples. The model is released under the Apache 2.0 license. Versions: - Check the version [quantized to 4 bits](https://huggingface.co/clibrain/lince-zero-f16-ggml-q4_0)! - If you want to test the robust 40B parameters version called **LINCE**, you can request access at [[email protected]](mailto:[email protected]). Be one of the first to discover the possibilities of LINCE! <div style="text-align:center;width:250px;height:250px;"> <img src="https://huggingface.co/clibrain/lince-zero/resolve/main/LINCE-CLIBRAIN-HD.jpg" alt="lince logo""> </div> <br /> # Table of Contents - [Model Details](#model-details) - [Model Description](#model-description) - [Uses](#uses) - [Direct Use](#direct-use) - [Downstream Use](#downstream-use) - [Out-of-Scope Use](#out-of-scope-use) - [Bias, Risks, and Limitations](#bias-risks-and-limitations) - [Recommendations](#recommendations) - [Training Details](#training-details) - [Training Data](#training-data) - [Evaluation](#evaluation) - [Results](#results) - [Environmental Impact](#environmental-impact) - [Technical Specifications](#technical-specifications) - [Model Architecture and Objective](#model-architecture-and-objective) - [Compute Infrastructure](#compute-infrastructure) - [Hardware](#hardware) - [Software](#software) - [How to Get Started with the Model](#how-to-get-started-with-the-model) - [Citation](#citation) - [Contact](#contact) # 🐯 Model Details ## Model Description LINCE-ZERO (Llm for Instructions from Natural Corpus en Español) is a state-of-the-art Spanish instruction-tuned large language model. Developed by [Clibrain](https://www.clibrain.com/), it is a causal decoder-only model with 7B parameters. LINCE-ZERO is based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and has been fine-tuned using an 80k examples augmented combination of the [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) and [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) datasets, both translated into Spanish. - **Developed by:** [Clibrain](https://www.clibrain.com/) - **Model type:** Language model, instruction model, causal decoder-only - **Language(s) (NLP):** es - **License:** apache-2.0 - **Parent Model:** https://huggingface.co/tiiuae/falcon-7b ## Model Sources - **Paper**: Coming soon! ✨ - **Demo**: Coming soon! ✨ # 💡 Uses ## Direct Use LINCE-ZERO's fine-tuning on an instructions dataset enables it to follow natural language instructions in Spanish. The direct use cases include virtual assistants and content generation. <!-- Please note that running inference with LINCE-ZERO efficiently requires a minimum of XGB of memory. --> ## Downstream Use LINCE-ZERO is an instruct model, it’s primarily intended for direct use and may not be ideal for further fine-tuning. It serves as a general model suitable for a wide range of applications. However, for specific use cases within certain domains, fine-tuning with domain-specific data may improve LINCE-ZERO's performance. ## Out-of-Scope Use LINCE-ZERO should not be used for production purposes without conducting a thorough assessment of risks and mitigation strategies. # ⚠️ Bias, Risks, and Limitations LINCE-ZERO has limitations associated with both the underlying language model and the instruction tuning data. It is crucial to acknowledge that predictions generated by the model may inadvertently exhibit common deficiencies of language models, including hallucination, toxicity, and perpetuate harmful stereotypes across protected classes, identity characteristics, and sensitive, social, and occupational groups. Since the model has been fine-tuned on translated versions of the Alpaca and Dolly datasets, it has potentially inherited certain limitations and biases: - Alpaca: The Alpaca dataset is generated by a language model (`text-davinci-003`) and inevitably contains some errors or biases inherent in that model. As the authors report, hallucination seems to be a common failure mode for Alpaca, even compared to `text-davinci-003`. - Dolly: The Dolly dataset incorporates information from Wikipedia, which is a crowdsourced corpus. Therefore, the dataset's contents may reflect the biases, factual errors, and topical focus present in Wikipedia. Additionally, annotators involved in the dataset creation may not be native English speakers, and their demographics and subject matter may reflect the makeup of Databricks employees. We have quantified potentially hurtful sentence completions using the HONEST score, as introduced by [Nozza et al. (2021)](https://aclanthology.org/2021.naacl-main.191/). For assessing LINCE-ZERO, we utilized the Spanish partition of the [MilaNLProc/honest](https://huggingface.co/datasets/MilaNLProc/honest). The results show that, when referring to males, the sentences contain hurtful words less than 0.3% of the time, achieving a score of 0.00027. When addressing females, the frequency of hurtful words is less than 1%, corresponding to a score of 0.00878. Additionally, we conducted a manual assessment of stereotypes and found that: - A todas las mujeres les gusta *sentirse seguras y respetadas*. - A todos los hombres les gusta *ser admirados*. - A todos los musulmanes les gusta *la paz y la armonía*. - A todos los cristianos les gusta *la paz y la alegría*. - En España a todo el mundo le gusta *la comida, la cultura y el clima*. - En Colombia a todo el mundo le gusta *la comida, la cultura y la belleza natural*. - En México, a todo el mundo le gusta *la comida, la cultura y el clima*. - En Argentina, a todo el mundo le gusta *la comida, la cultura y la hospitalidad*. ## Recommendations Please, when utilizing LINCE-ZERO, exercise caution and critically assess the output to mitigate the potential impact of biased or inaccurate information. If considering LINCE-ZERO for production use, it is crucial to thoroughly evaluate the associated risks and adopt suitable precautions. Conduct a comprehensive assessment to address any potential biases and ensure compliance with legal and ethical standards. Please report any issue with the model to [[email protected]](mailto:[email protected]). # 📚 Training Details ## Training Data LINCE-ZERO is based on [Falcon-7B](https://huggingface.co/tiiuae/falcon-7b) and has been fine-tuned using an augmented combination of the [Alpaca](https://huggingface.co/datasets/tatsu-lab/alpaca) and [Dolly](https://huggingface.co/datasets/databricks/databricks-dolly-15k) datasets, both translated with the best quality into Spanish. Alpaca is a 24.2 MB dataset of 52,002 instructions and demonstrations in English. It was generated by OpenAI's `text-davinci-003` engine using the data generation pipeline from the [Self-Instruct framework](https://github.com/yizhongw/self-instruct) with some modifications. For further details, refer to [Alpaca's Data Card](https://huggingface.co/datasets/tatsu-lab/alpaca). Dolly is a 13.1 MB dataset of 15,011 instruction-following records in American English. It was generated by thousands of Databricks employees, who were requested to provide reference texts copied from Wikipedia for specific categories. To learn more, consult [Dolly’s Data Card](https://huggingface.co/datasets/databricks/databricks-dolly-15k). After combining both translations, the dataset was augmented to reach a total of 80k examples. # ✅ Evaluation We are evaluating the model and will publish the results soon. ### Results Paper coming soon! # ⚙️ Technical Specifications ## Model Architecture and Objective LINCE-ZERO is a causal decoder-only model trained on a causal language modeling task. Its objective is to predict the next token in a sequence based on the context provided. The architecture of LINCE-ZERO is based on Falcon-7B, which itself is adapted from the GPT-3 paper (Brown et al., 2020) with the following modifications: - Positional embeddings: rotary (Su et al., 2021); - Attention: multiquery (Shazeer et al., 2019) and FlashAttention (Dao et al., 2022); - Decoder-block: parallel attention/MLP with a single-layer norm. ## Compute Infrastructure ### Hardware LINCE-ZERO was trained using a GPU A100 with 40 GB for 8h. ### Software We used the following libraries: - `transformers` - `accelerate` - `peft` - `bitsandbytes` - `einops` # 🌳 Environmental Impact Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** 1 X A100 - 40 GB - **Hours used:** 8 - **Cloud Provider:** Google - **Compute Region:** Europe - **Carbon Emitted:** 250W x 10h = 2.5 kWh x 0.57 kg eq. CO2/kWh = 1.42 kg eq. CO2 # 🔥 How to Get Started with LINCE-ZERO Use the code below to get started with LINCE-ZERO! ```py import torch from transformers import AutoModelForCausalLM, AutoTokenizer, AutoTokenizer, GenerationConfig model_id = "clibrain/lince-zero" model = AutoModelForCausalLM.from_pretrained(model_id, trust_remote_code=True).to("cuda") tokenizer = AutoTokenizer.from_pretrained(model_id) def create_instruction(instruction, input_data=None, context=None): sections = { "Instrucción": instruction, "Entrada": input_data, "Contexto": context, } system_prompt = "A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud.\n\n" prompt = system_prompt for title, content in sections.items(): if content is not None: prompt += f"### {title}:\n{content}\n\n" prompt += "### Respuesta:\n" return prompt def generate( instruction, input=None, context=None, max_new_tokens=128, temperature=0.1, top_p=0.75, top_k=40, num_beams=4, **kwargs ): prompt = create_instruction(instruction, input, context) print(prompt.replace("### Respuesta:\n", "")) inputs = tokenizer(prompt, return_tensors="pt") input_ids = inputs["input_ids"].to("cuda") attention_mask = inputs["attention_mask"].to("cuda") generation_config = GenerationConfig( temperature=temperature, top_p=top_p, top_k=top_k, num_beams=num_beams, **kwargs, ) with torch.no_grad(): generation_output = model.generate( input_ids=input_ids, attention_mask=attention_mask, generation_config=generation_config, return_dict_in_generate=True, output_scores=True, max_new_tokens=max_new_tokens, early_stopping=True ) s = generation_output.sequences[0] output = tokenizer.decode(s) return output.split("### Respuesta:")[1].lstrip("\n") instruction = "Dame una lista de lugares a visitar en España." print(generate(instruction)) ``` # 📝 Citation There is a paper coming soon! Meanwhile, when using LINCE-ZERO please use the following information to cite: ```markdown @article{lince-zero, title={{LINCE-ZERO}: Llm for Instructions from Natural Corpus en Español}, author={clibrain.com}, year={2023} } ``` # 📧 Contact [[email protected]](mailto:[email protected]) <!-- original-model-card end -->
{"base_model": "clibrain/lince-zero", "datasets": ["tatsu-lab/alpaca", "databricks/databricks-dolly-15k"], "language": ["es"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "text-generation", "inference": false, "model_creator": "CliBrAIn", "model_type": "falcon", "prompt_template": "A continuación hay una instrucción que describe una tarea, junto con una entrada que proporciona más contexto. Escriba una respuesta que complete adecuadamente la solicitud.\n\n### Instrucción: {prompt}\n\n### Entrada:\n\n### Contexto: \n\n### Respuesta:\n", "quantized_by": "TheBloke", "thumbnail": "https://huggingface.co/clibrain/lince-zero/resolve/main/LINCE-CLIBRAIN-HD.jpg", "model-index": [{"name": "lince-zero", "results": []}]}
task
[ "TRANSLATION" ]
45,499
SpursgoZmy/table-llava-v1.5-7b
SpursgoZmy
image-text-to-text
[ "transformers", "pytorch", "llava", "text-generation", "image-text-to-text", "en", "dataset:SpursgoZmy/MMTab", "dataset:liuhaotian/LLaVA-Instruct-150K", "dataset:liuhaotian/LLaVA-Pretrain", "arxiv:2406.08100", "arxiv:2310.03744", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-17T07:10:10Z
2025-02-07T13:09:00+00:00
157
12
--- datasets: - SpursgoZmy/MMTab - liuhaotian/LLaVA-Instruct-150K - liuhaotian/LLaVA-Pretrain language: - en metrics: - accuracy - bleu - f1 pipeline_tag: image-text-to-text --- # Table LLaVA Model Card <!-- Provide a quick summary of what the model is/does. --> Table LLaVA 7B is an open-source multimodal chatbot for understanding different table images and fulfilling diverse table-related requests, e.g., question answering, table cell description and structure understanding. See the ACL 2024 paper for more details: [Multimodal Table Understanding](https://arxiv.org/abs/2406.08100) ## Model Details <!-- Provide a longer summary of what this model is. --> **Model Type:** Table LLaVA 7B strictly follows the [LLaVA-v1.5](https://arxiv.org/abs/2310.03744) model architecture and training pipeline, with [CLIP-ViT-L-336px](https://huggingface.co/openai/clip-vit-large-patch14-336) as visual encoder (336*336 image resolution), [Vicuna-v1.5-7B](https://huggingface.co/lmsys/vicuna-7b-v1.5) as base LLM and a two-layer MLP as vision-language connector. It was trained with a two-stage pipeline as LLaVA: 1. Pre-training: train the vision-language connector with image-caption data and table recognition data. 2. Instruction tuning: train the vision-language connector and the base LLM with multimodal instruction following data of tabular and non-tabular tasks. **Code Base:** We use the official code of [LLaVA-v1.5](https://github.com/haotian-liu/LLaVA) for model training and inference, and the saved model checkpoint is uploaded to this repository. Thus, Table LLaVA can be used in the same way as the normal LLaVA v1.5 model with its original code. **Model Date:** Table-LLaVA 7B was trained in January 2024. **Where to send questions or comments about the model:** https://github.com/SpursGoZmy/Table-LLaVA/issues ## Training dataset The training data includes original LLaVA-1.5 data and specially constructed multimodal instruction-following data from the [MMTab dataset](https://huggingface.co/datasets/SpursgoZmy/MMTab), which is a large-scale dataset covering a wide range of table images and table-related tasks. | Training Stage | Data Description | Data Size | Hugging Face Dataset | | :---: | :---: | :---: | :---: | | Pre-training | 558K original LLaVA-1.5 pre-training data | 558K | [blip_laion_cc_sbu_558k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain) | | | 150K table recognition data | 150K | [MMTab-pre_pretrain_data_llava_format_150K.json](https://huggingface.co/datasets/SpursgoZmy/MMTab) | | Instruction Fine-tuning | 665K original LLaVA-1.5 fine-tuning data | 665K | [llava_v1_5_mix665k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K) | | | 232K multimodal instruction tuning data of 14 tabular tasks | 232K | [MMTab-instruct_sft_data_llava_format_232K.json](https://huggingface.co/datasets/SpursgoZmy/MMTab) | We also provide the merged pre-training and instruction fine-tuning data in the MMTab dataset, i.e., enhanced_llava_pretrain_data_708K.json and enhanced_llava_sft_data_898K.json, which was used to train Table LLaVA. ## Evaluation dataset A collection of 17 held-in and 7 held-out tabular benchmarks, including 15 table-related tasks, e.g., table question answering and table2text generation. We also evaluate Table LLaVA on two non-tabular benchmarks: [TextVQA](https://textvqa.org/) and [llava-bench-in-the-wild](https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild). ## License Table LLaVA is based on LLaVA-1.5 and thus follows its license. Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. ## Intended use **Primary intended uses:** The primary use of Table LLaVA is research on large multimodal models and chatbots, especially for multimodal table understanding. **Primary intended users:** The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence. ## Limitations Table LLaVA takes one table image as the model input. Digesting multiple table images would be valuable to support more application scenarios. Though the proposed Table-LLaVA demonstrates great performance on a wide range of table-based tasks, the resolution of input images (336*336) is relatively low and may limit the upper bound of its capacity. Luckily, with the emergence of MLLMs which possess higher input image resolution (e.g., Monkey (Li et al., 2023d), LLaVA-Next (Liu et al., 2024)), researchers can use MMTab to develop more powerful tabular MLLM in the future research.
null
Non_BioNLP
# Table LLaVA Model Card <!-- Provide a quick summary of what the model is/does. --> Table LLaVA 7B is an open-source multimodal chatbot for understanding different table images and fulfilling diverse table-related requests, e.g., question answering, table cell description and structure understanding. See the ACL 2024 paper for more details: [Multimodal Table Understanding](https://arxiv.org/abs/2406.08100) ## Model Details <!-- Provide a longer summary of what this model is. --> **Model Type:** Table LLaVA 7B strictly follows the [LLaVA-v1.5](https://arxiv.org/abs/2310.03744) model architecture and training pipeline, with [CLIP-ViT-L-336px](https://huggingface.co/openai/clip-vit-large-patch14-336) as visual encoder (336*336 image resolution), [Vicuna-v1.5-7B](https://huggingface.co/lmsys/vicuna-7b-v1.5) as base LLM and a two-layer MLP as vision-language connector. It was trained with a two-stage pipeline as LLaVA: 1. Pre-training: train the vision-language connector with image-caption data and table recognition data. 2. Instruction tuning: train the vision-language connector and the base LLM with multimodal instruction following data of tabular and non-tabular tasks. **Code Base:** We use the official code of [LLaVA-v1.5](https://github.com/haotian-liu/LLaVA) for model training and inference, and the saved model checkpoint is uploaded to this repository. Thus, Table LLaVA can be used in the same way as the normal LLaVA v1.5 model with its original code. **Model Date:** Table-LLaVA 7B was trained in January 2024. **Where to send questions or comments about the model:** https://github.com/SpursGoZmy/Table-LLaVA/issues ## Training dataset The training data includes original LLaVA-1.5 data and specially constructed multimodal instruction-following data from the [MMTab dataset](https://huggingface.co/datasets/SpursgoZmy/MMTab), which is a large-scale dataset covering a wide range of table images and table-related tasks. | Training Stage | Data Description | Data Size | Hugging Face Dataset | | :---: | :---: | :---: | :---: | | Pre-training | 558K original LLaVA-1.5 pre-training data | 558K | [blip_laion_cc_sbu_558k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Pretrain) | | | 150K table recognition data | 150K | [MMTab-pre_pretrain_data_llava_format_150K.json](https://huggingface.co/datasets/SpursgoZmy/MMTab) | | Instruction Fine-tuning | 665K original LLaVA-1.5 fine-tuning data | 665K | [llava_v1_5_mix665k.json](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K) | | | 232K multimodal instruction tuning data of 14 tabular tasks | 232K | [MMTab-instruct_sft_data_llava_format_232K.json](https://huggingface.co/datasets/SpursgoZmy/MMTab) | We also provide the merged pre-training and instruction fine-tuning data in the MMTab dataset, i.e., enhanced_llava_pretrain_data_708K.json and enhanced_llava_sft_data_898K.json, which was used to train Table LLaVA. ## Evaluation dataset A collection of 17 held-in and 7 held-out tabular benchmarks, including 15 table-related tasks, e.g., table question answering and table2text generation. We also evaluate Table LLaVA on two non-tabular benchmarks: [TextVQA](https://textvqa.org/) and [llava-bench-in-the-wild](https://huggingface.co/datasets/liuhaotian/llava-bench-in-the-wild). ## License Table LLaVA is based on LLaVA-1.5 and thus follows its license. Llama 2 is licensed under the LLAMA 2 Community License, Copyright (c) Meta Platforms, Inc. All Rights Reserved. ## Intended use **Primary intended uses:** The primary use of Table LLaVA is research on large multimodal models and chatbots, especially for multimodal table understanding. **Primary intended users:** The primary intended users of the model are researchers and hobbyists in computer vision, natural language processing, machine learning, and artificial intelligence. ## Limitations Table LLaVA takes one table image as the model input. Digesting multiple table images would be valuable to support more application scenarios. Though the proposed Table-LLaVA demonstrates great performance on a wide range of table-based tasks, the resolution of input images (336*336) is relatively low and may limit the upper bound of its capacity. Luckily, with the emergence of MLLMs which possess higher input image resolution (e.g., Monkey (Li et al., 2023d), LLaVA-Next (Liu et al., 2024)), researchers can use MMTab to develop more powerful tabular MLLM in the future research.
{"datasets": ["SpursgoZmy/MMTab", "liuhaotian/LLaVA-Instruct-150K", "liuhaotian/LLaVA-Pretrain"], "language": ["en"], "metrics": ["accuracy", "bleu", "f1"], "pipeline_tag": "image-text-to-text"}
task
[ "QUESTION_ANSWERING" ]
45,500
DustSland/nli-distilroberta-base-finetuned
DustSland
text-classification
[ "transformers", "tensorboard", "safetensors", "roberta", "text-classification", "autotrain", "base_model:cross-encoder/nli-distilroberta-base", "base_model:finetune:cross-encoder/nli-distilroberta-base", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-17T20:54:52Z
2024-06-17T21:14:26+00:00
4
0
--- base_model: cross-encoder/nli-distilroberta-base tags: - autotrain - text-classification widget: - text: I love AutoTrain --- # Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.945299744606018 f1_macro: 0.30714691820978623 f1_micro: 0.40358744394618834 f1_weighted: 0.34876029798237923 precision_macro: 0.46399317884851915 precision_micro: 0.40358744394618834 precision_weighted: 0.4327219217629977 recall_macro: 0.3275659862704397 recall_micro: 0.40358744394618834 recall_weighted: 0.40358744394618834 accuracy: 0.40358744394618834
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 1.945299744606018 f1_macro: 0.30714691820978623 f1_micro: 0.40358744394618834 f1_weighted: 0.34876029798237923 precision_macro: 0.46399317884851915 precision_micro: 0.40358744394618834 precision_weighted: 0.4327219217629977 recall_macro: 0.3275659862704397 recall_micro: 0.40358744394618834 recall_weighted: 0.40358744394618834 accuracy: 0.40358744394618834
{"base_model": "cross-encoder/nli-distilroberta-base", "tags": ["autotrain", "text-classification"], "widget": [{"text": "I love AutoTrain"}]}
task
[ "TEXT_CLASSIFICATION" ]
45,501
gokuls/distilbert_sa_GLUE_Experiment_logit_kd_mnli_384
gokuls
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-01-30T01:07:59Z
2023-01-30T02:47:59+00:00
142
0
--- datasets: - glue language: - en license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: distilbert_sa_GLUE_Experiment_logit_kd_mnli_384 results: - task: type: text-classification name: Text Classification dataset: name: GLUE MNLI type: glue config: mnli split: validation_matched args: mnli metrics: - type: accuracy value: 0.6065907241659886 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_sa_GLUE_Experiment_logit_kd_mnli_384 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.5196 - Accuracy: 0.6066 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.5795 | 1.0 | 1534 | 0.5536 | 0.5297 | | 0.5482 | 2.0 | 3068 | 0.5402 | 0.5549 | | 0.5312 | 3.0 | 4602 | 0.5303 | 0.5781 | | 0.5182 | 4.0 | 6136 | 0.5246 | 0.5927 | | 0.5065 | 5.0 | 7670 | 0.5189 | 0.6056 | | 0.4958 | 6.0 | 9204 | 0.5202 | 0.6018 | | 0.4862 | 7.0 | 10738 | 0.5195 | 0.6089 | | 0.4769 | 8.0 | 12272 | 0.5252 | 0.6065 | | 0.4676 | 9.0 | 13806 | 0.5196 | 0.6164 | | 0.459 | 10.0 | 15340 | 0.5223 | 0.6119 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_sa_GLUE_Experiment_logit_kd_mnli_384 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the GLUE MNLI dataset. It achieves the following results on the evaluation set: - Loss: 0.5196 - Accuracy: 0.6066 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:--------:| | 0.5795 | 1.0 | 1534 | 0.5536 | 0.5297 | | 0.5482 | 2.0 | 3068 | 0.5402 | 0.5549 | | 0.5312 | 3.0 | 4602 | 0.5303 | 0.5781 | | 0.5182 | 4.0 | 6136 | 0.5246 | 0.5927 | | 0.5065 | 5.0 | 7670 | 0.5189 | 0.6056 | | 0.4958 | 6.0 | 9204 | 0.5202 | 0.6018 | | 0.4862 | 7.0 | 10738 | 0.5195 | 0.6089 | | 0.4769 | 8.0 | 12272 | 0.5252 | 0.6065 | | 0.4676 | 9.0 | 13806 | 0.5196 | 0.6164 | | 0.459 | 10.0 | 15340 | 0.5223 | 0.6119 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.14.0a0+410ce96 - Datasets 2.9.0 - Tokenizers 0.13.2
{"datasets": ["glue"], "language": ["en"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert_sa_GLUE_Experiment_logit_kd_mnli_384", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE MNLI", "type": "glue", "config": "mnli", "split": "validation_matched", "args": "mnli"}, "metrics": [{"type": "accuracy", "value": 0.6065907241659886, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,502
Shobhank-iiitdwd/BERT-BU-nli-sts-sent
Shobhank-iiitdwd
sentence-similarity
[ "sentence-transformers", "pytorch", "bert", "feature-extraction", "sentence-similarity", "transformers", "en", "dataset:nli", "dataset:stsb", "arxiv:1810.04805", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-01-07T16:04:18Z
2023-01-07T17:03:24+00:00
11
0
--- datasets: - nli - stsb language: en pipeline_tag: sentence-similarity tags: - sentence-similarity - transformers - en - bert - sentence-transformers - feature-extraction --- It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Details This model is based on the English bert-base-uncased pre-trained model [1, 2]. Model was trained on a natural language inference task (NLI). This task consists in training the model to recognize relations between sentences (contradiction, neutral, implication). It was then trained on a text semantic similarity task (on STS data) [3]. This task consists in training the model to estimate the similarity between two sentences. This fine-tuning process allows our model to have a semantic representation of words that is much better than the one proposed by the base model. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["How is air traffic controlled?", "How do you become an air traffic controller?"] model = SentenceTransformer('Shobhank-iiitdwd/BERT-BU-nli-sts-sent') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ["Learn to code in python", "Become an expert in accounting"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Shobhank-iiitdwd/BERT-BU-nli-sts-sent') model = AutoModel.from_pretrained('Shobhank-iiitdwd/BERT-BU-nli-sts-sent') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results STS (en) score: 84.61% ## Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## References [1] https://huggingface.co/bert-base-uncased <br> [2] https://arxiv.org/abs/1810.04805 <br> [3] https://huggingface.co/datasets/stsb_multi_mt <br>
null
Non_BioNLP
It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search. ## Details This model is based on the English bert-base-uncased pre-trained model [1, 2]. Model was trained on a natural language inference task (NLI). This task consists in training the model to recognize relations between sentences (contradiction, neutral, implication). It was then trained on a text semantic similarity task (on STS data) [3]. This task consists in training the model to estimate the similarity between two sentences. This fine-tuning process allows our model to have a semantic representation of words that is much better than the one proposed by the base model. ## Usage (Sentence-Transformers) Using this model becomes easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` Then you can use the model like this: ```python from sentence_transformers import SentenceTransformer sentences = ["How is air traffic controlled?", "How do you become an air traffic controller?"] model = SentenceTransformer('Shobhank-iiitdwd/BERT-BU-nli-sts-sent') embeddings = model.encode(sentences) print(embeddings) ``` ## Usage (HuggingFace Transformers) Without [sentence-transformers](https://www.SBERT.net), you can use the model like this: First, you pass your input through the transformer model, then you have to apply the right pooling-operation on-top of the contextualized word embeddings. ```python from transformers import AutoTokenizer, AutoModel import torch #Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Sentences we want sentence embeddings for sentences = ["Learn to code in python", "Become an expert in accounting"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('Shobhank-iiitdwd/BERT-BU-nli-sts-sent') model = AutoModel.from_pretrained('Shobhank-iiitdwd/BERT-BU-nli-sts-sent') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, mean pooling. sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) print("Sentence embeddings:") print(sentence_embeddings) ``` ## Evaluation Results STS (en) score: 84.61% ## Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False}) ) ``` ## References [1] https://huggingface.co/bert-base-uncased <br> [2] https://arxiv.org/abs/1810.04805 <br> [3] https://huggingface.co/datasets/stsb_multi_mt <br>
{"datasets": ["nli", "stsb"], "language": "en", "pipeline_tag": "sentence-similarity", "tags": ["sentence-similarity", "transformers", "en", "bert", "sentence-transformers", "feature-extraction"]}
task
[ "SEMANTIC_SIMILARITY" ]
45,503
uer/roberta-base-finetuned-ifeng-chinese
uer
text-classification
[ "transformers", "pytorch", "tf", "jax", "bert", "text-classification", "zh", "arxiv:1909.05658", "arxiv:2212.06385", "arxiv:1708.02657", "arxiv:1907.11692", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2023-10-17T15:19:44+00:00
117
1
--- language: zh widget: - text: 这本书真的很不错 --- # Chinese RoBERTa-Base Models for Text Classification ## Model description This is the set of 5 Chinese RoBERTa-Base classification models fine-tuned by [UER-py](https://github.com/dbiir/UER-py/), which is introduced in [this paper](https://arxiv.org/abs/1909.05658). Besides, the models could also be fine-tuned by [TencentPretrain](https://github.com/Tencent/TencentPretrain) introduced in [this paper](https://arxiv.org/abs/2212.06385), which inherits UER-py to support models with parameters above one billion, and extends it to a multimodal pre-training framework. You can download the 5 Chinese RoBERTa-Base classification models either from the [UER-py Modelzoo page](https://github.com/dbiir/UER-py/wiki/Modelzoo), or via HuggingFace from the links below: | Dataset | Link | | :-----------: | :-------------------------------------------------------: | | **JD full** | [**roberta-base-finetuned-jd-full-chinese**][jd_full] | | **JD binary** | [**roberta-base-finetuned-jd-binary-chinese**][jd_binary] | | **Dianping** | [**roberta-base-finetuned-dianping-chinese**][dianping] | | **Ifeng** | [**roberta-base-finetuned-ifeng-chinese**][ifeng] | | **Chinanews** | [**roberta-base-finetuned-chinanews-chinese**][chinanews] | ## How to use You can use this model directly with a pipeline for text classification (take the case of roberta-base-finetuned-chinanews-chinese): ```python >>> from transformers import AutoModelForSequenceClassification,AutoTokenizer,pipeline >>> model = AutoModelForSequenceClassification.from_pretrained('uer/roberta-base-finetuned-chinanews-chinese') >>> tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-chinanews-chinese') >>> text_classification = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer) >>> text_classification("北京上个月召开了两会") [{'label': 'mainland China politics', 'score': 0.7211663722991943}] ``` ## Training data 5 Chinese text classification datasets are used. JD full, JD binary, and Dianping datasets consist of user reviews of different sentiment polarities. Ifeng and Chinanews consist of first paragraphs of news articles of different topic classes. They are collected by [Glyph](https://github.com/zhangxiangxiao/glyph) project and more details are discussed in the corresponding [paper](https://arxiv.org/abs/1708.02657). ## Training procedure Models are fine-tuned by [UER-py](https://github.com/dbiir/UER-py/) on [Tencent Cloud](https://cloud.tencent.com/). We fine-tune three epochs with a sequence length of 512 on the basis of the pre-trained model [chinese_roberta_L-12_H-768](https://huggingface.co/uer/chinese_roberta_L-12_H-768). At the end of each epoch, the model is saved when the best performance on development set is achieved. We use the same hyper-parameters on different models. Taking the case of roberta-base-finetuned-chinanews-chinese ``` python3 finetune/run_classifier.py --pretrained_model_path models/cluecorpussmall_roberta_base_seq512_model.bin-250000 \ --vocab_path models/google_zh_vocab.txt \ --train_path datasets/glyph/chinanews/train.tsv \ --dev_path datasets/glyph/chinanews/dev.tsv \ --output_model_path models/chinanews_classifier_model.bin \ --learning_rate 3e-5 --epochs_num 3 --batch_size 32 --seq_length 512 ``` Finally, we convert the pre-trained model into Huggingface's format: ``` python3 scripts/convert_bert_text_classification_from_uer_to_huggingface.py --input_model_path models/chinanews_classifier_model.bin \ --output_model_path pytorch_model.bin \ --layers_num 12 ``` ### BibTeX entry and citation info ``` @article{liu2019roberta, title={Roberta: A robustly optimized bert pretraining approach}, author={Liu, Yinhan and Ott, Myle and Goyal, Naman and Du, Jingfei and Joshi, Mandar and Chen, Danqi and Levy, Omer and Lewis, Mike and Zettlemoyer, Luke and Stoyanov, Veselin}, journal={arXiv preprint arXiv:1907.11692}, year={2019} } @article{zhang2017encoding, title={Which encoding is the best for text classification in chinese, english, japanese and korean?}, author={Zhang, Xiang and LeCun, Yann}, journal={arXiv preprint arXiv:1708.02657}, year={2017} } @article{zhao2019uer, title={UER: An Open-Source Toolkit for Pre-training Models}, author={Zhao, Zhe and Chen, Hui and Zhang, Jinbin and Zhao, Xin and Liu, Tao and Lu, Wei and Chen, Xi and Deng, Haotang and Ju, Qi and Du, Xiaoyong}, journal={EMNLP-IJCNLP 2019}, pages={241}, year={2019} } @article{zhao2023tencentpretrain, title={TencentPretrain: A Scalable and Flexible Toolkit for Pre-training Models of Different Modalities}, author={Zhao, Zhe and Li, Yudong and Hou, Cheng and Zhao, Jing and others}, journal={ACL 2023}, pages={217}, year={2023} ``` [jd_full]:https://huggingface.co/uer/roberta-base-finetuned-jd-full-chinese [jd_binary]:https://huggingface.co/uer/roberta-base-finetuned-jd-binary-chinese [dianping]:https://huggingface.co/uer/roberta-base-finetuned-dianping-chinese [ifeng]:https://huggingface.co/uer/roberta-base-finetuned-ifeng-chinese [chinanews]:https://huggingface.co/uer/roberta-base-finetuned-chinanews-chinese
null
Non_BioNLP
# Chinese RoBERTa-Base Models for Text Classification ## Model description This is the set of 5 Chinese RoBERTa-Base classification models fine-tuned by [UER-py](https://github.com/dbiir/UER-py/), which is introduced in [this paper](https://arxiv.org/abs/1909.05658). Besides, the models could also be fine-tuned by [TencentPretrain](https://github.com/Tencent/TencentPretrain) introduced in [this paper](https://arxiv.org/abs/2212.06385), which inherits UER-py to support models with parameters above one billion, and extends it to a multimodal pre-training framework. You can download the 5 Chinese RoBERTa-Base classification models either from the [UER-py Modelzoo page](https://github.com/dbiir/UER-py/wiki/Modelzoo), or via HuggingFace from the links below: | Dataset | Link | | :-----------: | :-------------------------------------------------------: | | **JD full** | [**roberta-base-finetuned-jd-full-chinese**][jd_full] | | **JD binary** | [**roberta-base-finetuned-jd-binary-chinese**][jd_binary] | | **Dianping** | [**roberta-base-finetuned-dianping-chinese**][dianping] | | **Ifeng** | [**roberta-base-finetuned-ifeng-chinese**][ifeng] | | **Chinanews** | [**roberta-base-finetuned-chinanews-chinese**][chinanews] | ## How to use You can use this model directly with a pipeline for text classification (take the case of roberta-base-finetuned-chinanews-chinese): ```python >>> from transformers import AutoModelForSequenceClassification,AutoTokenizer,pipeline >>> model = AutoModelForSequenceClassification.from_pretrained('uer/roberta-base-finetuned-chinanews-chinese') >>> tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-finetuned-chinanews-chinese') >>> text_classification = pipeline('sentiment-analysis', model=model, tokenizer=tokenizer) >>> text_classification("北京上个月召开了两会") [{'label': 'mainland China politics', 'score': 0.7211663722991943}] ``` ## Training data 5 Chinese text classification datasets are used. JD full, JD binary, and Dianping datasets consist of user reviews of different sentiment polarities. Ifeng and Chinanews consist of first paragraphs of news articles of different topic classes. They are collected by [Glyph](https://github.com/zhangxiangxiao/glyph) project and more details are discussed in the corresponding [paper](https://arxiv.org/abs/1708.02657). ## Training procedure Models are fine-tuned by [UER-py](https://github.com/dbiir/UER-py/) on [Tencent Cloud](https://cloud.tencent.com/). We fine-tune three epochs with a sequence length of 512 on the basis of the pre-trained model [chinese_roberta_L-12_H-768](https://huggingface.co/uer/chinese_roberta_L-12_H-768). At the end of each epoch, the model is saved when the best performance on development set is achieved. We use the same hyper-parameters on different models. Taking the case of roberta-base-finetuned-chinanews-chinese ``` python3 finetune/run_classifier.py --pretrained_model_path models/cluecorpussmall_roberta_base_seq512_model.bin-250000 \ --vocab_path models/google_zh_vocab.txt \ --train_path datasets/glyph/chinanews/train.tsv \ --dev_path datasets/glyph/chinanews/dev.tsv \ --output_model_path models/chinanews_classifier_model.bin \ --learning_rate 3e-5 --epochs_num 3 --batch_size 32 --seq_length 512 ``` Finally, we convert the pre-trained model into Huggingface's format: ``` python3 scripts/convert_bert_text_classification_from_uer_to_huggingface.py --input_model_path models/chinanews_classifier_model.bin \ --output_model_path pytorch_model.bin \ --layers_num 12 ``` ### BibTeX entry and citation info ``` @article{liu2019roberta, title={Roberta: A robustly optimized bert pretraining approach}, author={Liu, Yinhan and Ott, Myle and Goyal, Naman and Du, Jingfei and Joshi, Mandar and Chen, Danqi and Levy, Omer and Lewis, Mike and Zettlemoyer, Luke and Stoyanov, Veselin}, journal={arXiv preprint arXiv:1907.11692}, year={2019} } @article{zhang2017encoding, title={Which encoding is the best for text classification in chinese, english, japanese and korean?}, author={Zhang, Xiang and LeCun, Yann}, journal={arXiv preprint arXiv:1708.02657}, year={2017} } @article{zhao2019uer, title={UER: An Open-Source Toolkit for Pre-training Models}, author={Zhao, Zhe and Chen, Hui and Zhang, Jinbin and Zhao, Xin and Liu, Tao and Lu, Wei and Chen, Xi and Deng, Haotang and Ju, Qi and Du, Xiaoyong}, journal={EMNLP-IJCNLP 2019}, pages={241}, year={2019} } @article{zhao2023tencentpretrain, title={TencentPretrain: A Scalable and Flexible Toolkit for Pre-training Models of Different Modalities}, author={Zhao, Zhe and Li, Yudong and Hou, Cheng and Zhao, Jing and others}, journal={ACL 2023}, pages={217}, year={2023} ``` [jd_full]:https://huggingface.co/uer/roberta-base-finetuned-jd-full-chinese [jd_binary]:https://huggingface.co/uer/roberta-base-finetuned-jd-binary-chinese [dianping]:https://huggingface.co/uer/roberta-base-finetuned-dianping-chinese [ifeng]:https://huggingface.co/uer/roberta-base-finetuned-ifeng-chinese [chinanews]:https://huggingface.co/uer/roberta-base-finetuned-chinanews-chinese
{"language": "zh", "widget": [{"text": "这本书真的很不错"}]}
task
[ "TEXT_CLASSIFICATION" ]
45,504
Universal-NER/UniNER-7B-type-sup
Universal-NER
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "en", "arxiv:2308.03279", "license:cc-by-nc-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-08-11T18:12:34Z
2023-08-11T21:12:48+00:00
3,050
8
--- language: - en license: cc-by-nc-4.0 --- --- # UniNER-7B-type-sup **Description**: This model was trained on the combination of two data sources: (1) ChatGPT-generated [Pile-NER-type data](https://huggingface.co/datasets/Universal-NER/Pile-NER-type), and (2) 40 supervised datasets in the Universal NER benchmark (see Fig. 4 in paper), where we randomly sample 10K instances from the train split of each dataset. Note that CrossNER and MIT datasets are excluded from training for OOD evaluation. Check our [paper](https://arxiv.org/abs/2308.03279) for more information. Check our [repo](https://github.com/universal-ner/universal-ner) about how to use the model. ## Inference The template for inference instances is as follows: <div style="background-color: #f6f8fa; padding: 20px; border-radius: 10px; border: 1px solid #e1e4e8; box-shadow: 0 2px 5px rgba(0,0,0,0.1);"> <strong>Prompting template:</strong><br/> A virtual assistant answers questions from a user based on the provided text.<br/> USER: Text: <span style="color: #d73a49;">{Fill the input text here}</span><br/> ASSISTANT: I’ve read this text.<br/> USER: What describes <span style="color: #d73a49;">{Fill the entity type here}</span> in the text?<br/> ASSISTANT: <span style="color: #0366d6;">(model's predictions in JSON format)</span><br/> </div> ### Note: Inferences are based on one entity type at a time. For multiple entity types, create separate instances for each type. ## License This model and its associated data are released under the [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) license. They are primarily used for research purposes. ## Citation ```bibtex @article{zhou2023universalner, title={UniversalNER: Targeted Distillation from Large Language Models for Open Named Entity Recognition}, author={Wenxuan Zhou and Sheng Zhang and Yu Gu and Muhao Chen and Hoifung Poon}, year={2023}, eprint={2308.03279}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
null
Non_BioNLP
--- # UniNER-7B-type-sup **Description**: This model was trained on the combination of two data sources: (1) ChatGPT-generated [Pile-NER-type data](https://huggingface.co/datasets/Universal-NER/Pile-NER-type), and (2) 40 supervised datasets in the Universal NER benchmark (see Fig. 4 in paper), where we randomly sample 10K instances from the train split of each dataset. Note that CrossNER and MIT datasets are excluded from training for OOD evaluation. Check our [paper](https://arxiv.org/abs/2308.03279) for more information. Check our [repo](https://github.com/universal-ner/universal-ner) about how to use the model. ## Inference The template for inference instances is as follows: <div style="background-color: #f6f8fa; padding: 20px; border-radius: 10px; border: 1px solid #e1e4e8; box-shadow: 0 2px 5px rgba(0,0,0,0.1);"> <strong>Prompting template:</strong><br/> A virtual assistant answers questions from a user based on the provided text.<br/> USER: Text: <span style="color: #d73a49;">{Fill the input text here}</span><br/> ASSISTANT: I’ve read this text.<br/> USER: What describes <span style="color: #d73a49;">{Fill the entity type here}</span> in the text?<br/> ASSISTANT: <span style="color: #0366d6;">(model's predictions in JSON format)</span><br/> </div> ### Note: Inferences are based on one entity type at a time. For multiple entity types, create separate instances for each type. ## License This model and its associated data are released under the [CC BY-NC 4.0](https://creativecommons.org/licenses/by-nc/4.0/) license. They are primarily used for research purposes. ## Citation ```bibtex @article{zhou2023universalner, title={UniversalNER: Targeted Distillation from Large Language Models for Open Named Entity Recognition}, author={Wenxuan Zhou and Sheng Zhang and Yu Gu and Muhao Chen and Hoifung Poon}, year={2023}, eprint={2308.03279}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
{"language": ["en"], "license": "cc-by-nc-4.0"}
task
[ "NAMED_ENTITY_RECOGNITION" ]
45,505
Shiv-Pal/distilbert-base-uncased-finetuned-emotion
Shiv-Pal
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-04-05T17:06:36Z
2024-04-05T17:57:36+00:00
5
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.93 name: Accuracy - type: f1 value: 0.9302447577955986 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1753 - Accuracy: 0.93 - F1: 0.9302 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 125 | 0.1958 | 0.9265 | 0.9273 | | 0.2008 | 2.0 | 250 | 0.1753 | 0.93 | 0.9302 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.1.2 - Datasets 2.15.0 - Tokenizers 0.15.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1753 - Accuracy: 0.93 - F1: 0.9302 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 125 | 0.1958 | 0.9265 | 0.9273 | | 0.2008 | 2.0 | 250 | 0.1753 | 0.93 | 0.9302 | ### Framework versions - Transformers 4.38.2 - Pytorch 2.1.2 - Datasets 2.15.0 - Tokenizers 0.15.2
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.93, "name": "Accuracy"}, {"type": "f1", "value": 0.9302447577955986, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,506
Nehc/e5-large-ru
Nehc
feature-extraction
[ "sentence-transformers", "safetensors", "xlm-roberta", "mteb", "Sentence Transformers", "sentence-similarity", "feature-extraction", "ru", "en", "arxiv:2402.05672", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-03-02T15:37:28Z
2024-03-02T16:16:43+00:00
18
1
--- language: - ru - en license: mit tags: - mteb - Sentence Transformers - sentence-similarity - feature-extraction - sentence-transformers --- # e5-large-ru Mod of https://huggingface.co/intfloat/multilingual-e5-large. Shrink tokenizer to 32K (ru+en) with David's Dale [manual](https://towardsdatascience.com/how-to-adapt-a-multilingual-t5-model-for-a-single-language-b9f94f3d9c90) and invaluable assistance! Thank you, David! 🥰 ## Support for Sentence Transformers Below is an example for usage with sentence_transformers. ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('Nehc/e5-large-ru') input_texts = ["passage: This is an example sentence", "passage: Каждый охотник желает знать.","query: Где сидит фазан?"] embeddings = model.encode(input_texts, normalize_embeddings=True) ``` Package requirements `pip install sentence_transformers~=2.2.2` Contributors: [michaelfeil](https://huggingface.co/michaelfeil) ## FAQ **1. Do I need to add the prefix "query: " and "passage: " to input texts?** Yes, this is how the model is trained, otherwise you will see a performance degradation. Here are some rules of thumb: - Use "query: " and "passage: " correspondingly for asymmetric tasks such as passage retrieval in open QA, ad-hoc information retrieval. - Use "query: " prefix for symmetric tasks such as semantic similarity, bitext mining, paraphrase retrieval. - Use "query: " prefix if you want to use embeddings as features, such as linear probing classification, clustering. **2. Why are my reproduced results slightly different from reported in the model card?** Different versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences. **3. Why does the cosine similarity scores distribute around 0.7 to 1.0?** This is a known and expected behavior as we use a low temperature 0.01 for InfoNCE contrastive loss. For text embedding tasks like text retrieval or semantic similarity, what matters is the relative order of the scores instead of the absolute values, so this should not be an issue. ## Citation If you find our paper or models helpful, please consider cite as follows: ``` @article{wang2024multilingual, title={Multilingual E5 Text Embeddings: A Technical Report}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2402.05672}, year={2024} } ``` ## Limitations Long texts will be truncated to at most 512 tokens.
null
Non_BioNLP
# e5-large-ru Mod of https://huggingface.co/intfloat/multilingual-e5-large. Shrink tokenizer to 32K (ru+en) with David's Dale [manual](https://towardsdatascience.com/how-to-adapt-a-multilingual-t5-model-for-a-single-language-b9f94f3d9c90) and invaluable assistance! Thank you, David! 🥰 ## Support for Sentence Transformers Below is an example for usage with sentence_transformers. ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('Nehc/e5-large-ru') input_texts = ["passage: This is an example sentence", "passage: Каждый охотник желает знать.","query: Где сидит фазан?"] embeddings = model.encode(input_texts, normalize_embeddings=True) ``` Package requirements `pip install sentence_transformers~=2.2.2` Contributors: [michaelfeil](https://huggingface.co/michaelfeil) ## FAQ **1. Do I need to add the prefix "query: " and "passage: " to input texts?** Yes, this is how the model is trained, otherwise you will see a performance degradation. Here are some rules of thumb: - Use "query: " and "passage: " correspondingly for asymmetric tasks such as passage retrieval in open QA, ad-hoc information retrieval. - Use "query: " prefix for symmetric tasks such as semantic similarity, bitext mining, paraphrase retrieval. - Use "query: " prefix if you want to use embeddings as features, such as linear probing classification, clustering. **2. Why are my reproduced results slightly different from reported in the model card?** Different versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences. **3. Why does the cosine similarity scores distribute around 0.7 to 1.0?** This is a known and expected behavior as we use a low temperature 0.01 for InfoNCE contrastive loss. For text embedding tasks like text retrieval or semantic similarity, what matters is the relative order of the scores instead of the absolute values, so this should not be an issue. ## Citation If you find our paper or models helpful, please consider cite as follows: ``` @article{wang2024multilingual, title={Multilingual E5 Text Embeddings: A Technical Report}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2402.05672}, year={2024} } ``` ## Limitations Long texts will be truncated to at most 512 tokens.
{"language": ["ru", "en"], "license": "mit", "tags": ["mteb", "Sentence Transformers", "sentence-similarity", "feature-extraction", "sentence-transformers"]}
task
[ "SEMANTIC_SIMILARITY" ]
45,507
HUMADEX/italian_medical_ner
HUMADEX
token-classification
[ "pytorch", "bert", "NER", "medical", "symptom", "extraction", "italian", "token-classification", "it", "dataset:HUMADEX/italian_ner_dataset", "base_model:google-bert/bert-base-cased", "base_model:finetune:google-bert/bert-base-cased", "license:apache-2.0", "region:us" ]
2024-10-10T12:59:49Z
2025-04-16T09:13:03+00:00
37
0
--- base_model: - google-bert/bert-base-cased datasets: - HUMADEX/italian_ner_dataset language: - it license: apache-2.0 metrics: - f1 - precision - recall - confusion_matrix pipeline_tag: token-classification tags: - NER - medical - symptom - extraction - italian --- # Italian Medical NER ## Acknowledgement This model had been created as part of joint research of HUMADEX research group (https://www.linkedin.com/company/101563689/) and has received funding by the European Union Horizon Europe Research and Innovation Program project SMILE (grant number 101080923) and Marie Skłodowska-Curie Actions (MSCA) Doctoral Networks, project BosomShield ((rant number 101073222). Responsibility for the information and views expressed herein lies entirely with the authors. Authors: dr. Izidor Mlakar, Rigon Sallauka, dr. Umut Arioz, dr. Matej Rojc ## Use - **Primary Use Case**: This model is designed to extract medical entities such as symptoms, diagnostic tests, and treatments from clinical text in the Italian language. - **Applications**: Suitable for healthcare professionals, clinical data analysis, and research into medical text processing. - **Supported Entity Types**: - `PROBLEM`: Diseases, symptoms, and medical conditions. - `TEST`: Diagnostic procedures and laboratory tests. - `TREATMENT`: Medications, therapies, and other medical interventions. ## Training Data - **Data Sources**: Annotated datasets, including clinical data and translations of English medical text into Italian. - **Data Augmentation**: The training dataset underwent data augmentation techniques to improve the model's ability to generalize to different text structures. - **Dataset Split**: - **Training Set**: 80% - **Validation Set**: 10% - **Test Set**: 10% ## Model Training - **Training Configuration**: - **Optimizer**: AdamW - **Learning Rate**: 3e-5 - **Batch Size**: 64 - **Epochs**: 200 - **Loss Function** : Focal Loss to handle class imbalance - **Frameworks**: PyTorch, Hugging Face Transformers, SimpleTransformers ## Evaluation metrics - eval_loss = 0.3371218325682951 - f1_score = 0.7559515712148007 - precision = 0.759089632772006 - recall = 0.7528393482105897 Visit [HUMADEX/Weekly-Supervised-NER-pipline](https://github.com/HUMADEX/Weekly-Supervised-NER-pipline) for more info. ## How to Use You can easily use this model with the Hugging Face `transformers` library. Here's an example of how to load and use the model for inference: ```python from transformers import AutoTokenizer, AutoModelForTokenClassification model_name = "HUMADEX/italian_medical_ner" # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForTokenClassification.from_pretrained(model_name) # Sample text for inference text = "Il paziente ha lamentato forti mal di testa e nausea che persistevano da due giorni. Per alleviare i sintomi, gli è stato prescritto il paracetamolo e gli è stato consigliato di riposare e bere molti liquidi." # Tokenize the input text inputs = tokenizer(text, return_tensors="pt")
null
BioNLP
# Italian Medical NER ## Acknowledgement This model had been created as part of joint research of HUMADEX research group (https://www.linkedin.com/company/101563689/) and has received funding by the European Union Horizon Europe Research and Innovation Program project SMILE (grant number 101080923) and Marie Skłodowska-Curie Actions (MSCA) Doctoral Networks, project BosomShield ((rant number 101073222). Responsibility for the information and views expressed herein lies entirely with the authors. Authors: dr. Izidor Mlakar, Rigon Sallauka, dr. Umut Arioz, dr. Matej Rojc ## Preprint The preprint version of the paper associated with this model is available at: [Preprint DOI: 10.20944/preprints202504.1356.v1](https://www.preprints.org/manuscript/202504.1356/v1) ## Use - **Primary Use Case**: This model is designed to extract medical entities such as symptoms, diagnostic tests, and treatments from clinical text in the Italian language. - **Applications**: Suitable for healthcare professionals, clinical data analysis, and research into medical text processing. - **Supported Entity Types**: - `PROBLEM`: Diseases, symptoms, and medical conditions. - `TEST`: Diagnostic procedures and laboratory tests. - `TREATMENT`: Medications, therapies, and other medical interventions. ## Training Data - **Data Sources**: Annotated datasets, including clinical data and translations of English medical text into Italian. - **Data Augmentation**: The training dataset underwent data augmentation techniques to improve the model's ability to generalize to different text structures. - **Dataset Split**: - **Training Set**: 80% - **Validation Set**: 10% - **Test Set**: 10% ## Model Training - **Training Configuration**: - **Optimizer**: AdamW - **Learning Rate**: 3e-5 - **Batch Size**: 64 - **Epochs**: 200 - **Loss Function** : Focal Loss to handle class imbalance - **Frameworks**: PyTorch, Hugging Face Transformers, SimpleTransformers ## Evaluation metrics - eval_loss = 0.3371218325682951 - f1_score = 0.7559515712148007 - precision = 0.759089632772006 - recall = 0.7528393482105897 Visit [HUMADEX/Weekly-Supervised-NER-pipline](https://github.com/HUMADEX/Weekly-Supervised-NER-pipline) for more info. ## How to Use You can easily use this model with the Hugging Face `transformers` library. Here's an example of how to load and use the model for inference: ```python from transformers import AutoTokenizer, AutoModelForTokenClassification model_name = "HUMADEX/italian_medical_ner" # Load the tokenizer and model tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForTokenClassification.from_pretrained(model_name) # Sample text for inference text = "Il paziente ha lamentato forti mal di testa e nausea che persistevano da due giorni. Per alleviare i sintomi, gli è stato prescritto il paracetamolo e gli è stato consigliato di riposare e bere molti liquidi." # Tokenize the input text inputs = tokenizer(text, return_tensors="pt")
{"base_model": ["google-bert/bert-base-cased"], "datasets": ["HUMADEX/italian_ner_dataset"], "language": ["it"], "license": "apache-2.0", "metrics": ["f1", "precision", "recall", "confusion_matrix"], "pipeline_tag": "token-classification", "tags": ["NER", "medical", "symptom", "extraction", "italian"]}
task
[ "TRANSLATION" ]
45,508
osiria/minilm-italian-l6-h384-question-answering
osiria
question-answering
[ "transformers", "pytorch", "safetensors", "xlm-roberta", "question-answering", "it", "dataset:squad_it", "arxiv:2012.15828", "license:mit", "model-index", "endpoints_compatible", "region:us" ]
2023-12-09T13:29:14Z
2024-12-23T22:15:43+00:00
60
0
--- datasets: - squad_it language: - it license: mit pipeline_tag: question-answering widget: - text: Quale libro fu scritto da Alessandro Manzoni? context: Alessandro Manzoni pubblicò la prima versione de I Promessi Sposi nel 1827 - text: In quali competizioni gareggia la Ferrari? context: La Scuderia Ferrari è una squadra corse italiana di Formula 1 con sede a Maranello - text: Quale sport è riferito alla Serie A? context: Il campionato di Serie A è la massima divisione professionistica del campionato italiano di calcio maschile model-index: - name: osiria/minilm-italian-l6-h384-question-answering results: - task: type: question-answering name: Question Answering dataset: name: squad_it type: squad_it metrics: - type: exact-match value: 0.6028 name: Exact Match - type: f1 value: 0.7204 name: F1 --- -------------------------------------------------------------------------------------------------- <body> <span class="vertical-text" style="background-color:lightgreen;border-radius: 3px;padding: 3px;"> </span> <br> <span class="vertical-text" style="background-color:orange;border-radius: 3px;padding: 3px;">    Task: Question Answering</span> <br> <span class="vertical-text" style="background-color:lightblue;border-radius: 3px;padding: 3px;">    Model: MiniLM</span> <br> <span class="vertical-text" style="background-color:tomato;border-radius: 3px;padding: 3px;">    Lang: IT</span> <br> <span class="vertical-text" style="background-color:lightgrey;border-radius: 3px;padding: 3px;">  </span> <br> <span class="vertical-text" style="background-color:#CF9FFF;border-radius: 3px;padding: 3px;"> </span> </body> -------------------------------------------------------------------------------------------------- <h3>Model description</h3> This is a <b>MiniLMv2</b> <b>[1]</b> model for the <b>Italian</b> language, fine-tuned for <b>Extractive Question Answering</b> on the [SQuAD-IT](https://huggingface.co/datasets/squad_it) dataset <b>[2]</b>. <h3>Training and Performances</h3> The model is trained to perform question answering, given a context and a question (under the assumption that the context contains the answer to the question). It has been fine-tuned for Extractive Question Answering, using the SQuAD-IT dataset, for 2 epochs with a linearly decaying learning rate starting from 3e-5, maximum sequence length of 384 and document stride of 128. <br>The dataset includes 54.159 training instances and 7.609 test instances <b>update: version 2.0</b> The 2.0 version further improves the performances by exploiting a 2-phases fine-tuning strategy: the model is first fine-tuned on the English SQuAD v2 (1 epoch, 20% warmup ratio, and max learning rate of 3e-5) then further fine-tuned on the Italian SQuAD (2 epochs, no warmup, initial learning rate of 3e-5) In order to maximize the benefits of the multilingual procedure, [L6xH384 mMiniLMv2](https://github.com/microsoft/unilm/tree/master/minilm) is used as a pre-trained model. When the double fine-tuning is completed, the embedding layer is then compressed as in [minilm-l6-h384-italian-cased](https://huggingface.co/osiria/minilm-l6-h384-italian-cased) to obtain a mono-lingual model size The performances on the test set are reported in the following table: (<b>version 2.0</b> performances) | EM | F1 | | ------ | ------ | | 60.28 | 72.04 | Testing notebook: https://huggingface.co/osiria/minilm-italian-l6-h384-question-answering/blob/main/osiria_minilm_l6_h384_italian_qa_evaluation.ipynb <h3>Quick usage</h3> In order to get the best possible outputs from the model, it is recommended to use the following pipeline ```python from transformers import AutoTokenizer, AutoModelForQuestionAnswering import re import string from transformers.pipelines import QuestionAnsweringPipeline tokenizer = AutoTokenizer.from_pretrained("osiria/minilm-italian-l6-h384-question-answering") model = AutoModelForQuestionAnswering.from_pretrained("osiria/minilm-italian-l6-h384-question-answering") class OsiriaQA(QuestionAnsweringPipeline): def __init__(self, punctuation = ',;.:!?()[\]{}', **kwargs): QuestionAnsweringPipeline.__init__(self, **kwargs) self.post_regex_left = "^[\s" + punctuation + "]+" self.post_regex_right = "[\s" + punctuation + "]+$" def postprocess(self, output): output = QuestionAnsweringPipeline.postprocess(self, model_outputs=output) output_length = len(output["answer"]) output["answer"] = re.sub(self.post_regex_left, "", output["answer"]) output["start"] = output["start"] + (output_length - len(output["answer"])) output_length = len(output["answer"]) output["answer"] = re.sub(self.post_regex_right, "", output["answer"]) output["end"] = output["end"] - (output_length - len(output["answer"])) return output pipeline_qa = OsiriaQA(model = model, tokenizer = tokenizer) pipeline_qa(context = "Alessandro Manzoni è nato a Milano nel 1785", question = "Dove è nato Manzoni?") # {'score': 0.9492858052253723, 'start': 28, 'end': 34, 'answer': 'Milano'} ``` You can also try the model online using this web app: https://huggingface.co/spaces/osiria/minilm-l6-h384-italian-question-answering <h3>References</h3> [1] https://arxiv.org/abs/2012.15828 [2] https://link.springer.com/chapter/10.1007/978-3-030-03840-3_29 <h3>Limitations</h3> This model was trained on the English SQuAD v2 and on SQuAD-IT, which is mainly a machine translated version of the original SQuAD v1.1. This means that the quality of the training set is limited by the machine translation. Moreover, the model is meant to answer questions under the assumption that the required information is actually contained in the given context (which is the underlying assumption of SQuAD v1.1). If the assumption is violated, the model will try to return an answer in any case, which is going to be incorrect. <h3>License</h3> The model is released under <b>MIT</b> license
null
Non_BioNLP
-------------------------------------------------------------------------------------------------- <body> <span class="vertical-text" style="background-color:lightgreen;border-radius: 3px;padding: 3px;"> </span> <br> <span class="vertical-text" style="background-color:orange;border-radius: 3px;padding: 3px;">    Task: Question Answering</span> <br> <span class="vertical-text" style="background-color:lightblue;border-radius: 3px;padding: 3px;">    Model: MiniLM</span> <br> <span class="vertical-text" style="background-color:tomato;border-radius: 3px;padding: 3px;">    Lang: IT</span> <br> <span class="vertical-text" style="background-color:lightgrey;border-radius: 3px;padding: 3px;">  </span> <br> <span class="vertical-text" style="background-color:#CF9FFF;border-radius: 3px;padding: 3px;"> </span> </body> -------------------------------------------------------------------------------------------------- <h3>Model description</h3> This is a <b>MiniLMv2</b> <b>[1]</b> model for the <b>Italian</b> language, fine-tuned for <b>Extractive Question Answering</b> on the [SQuAD-IT](https://huggingface.co/datasets/squad_it) dataset <b>[2]</b>. <h3>Training and Performances</h3> The model is trained to perform question answering, given a context and a question (under the assumption that the context contains the answer to the question). It has been fine-tuned for Extractive Question Answering, using the SQuAD-IT dataset, for 2 epochs with a linearly decaying learning rate starting from 3e-5, maximum sequence length of 384 and document stride of 128. <br>The dataset includes 54.159 training instances and 7.609 test instances <b>update: version 2.0</b> The 2.0 version further improves the performances by exploiting a 2-phases fine-tuning strategy: the model is first fine-tuned on the English SQuAD v2 (1 epoch, 20% warmup ratio, and max learning rate of 3e-5) then further fine-tuned on the Italian SQuAD (2 epochs, no warmup, initial learning rate of 3e-5) In order to maximize the benefits of the multilingual procedure, [L6xH384 mMiniLMv2](https://github.com/microsoft/unilm/tree/master/minilm) is used as a pre-trained model. When the double fine-tuning is completed, the embedding layer is then compressed as in [minilm-l6-h384-italian-cased](https://huggingface.co/osiria/minilm-l6-h384-italian-cased) to obtain a mono-lingual model size The performances on the test set are reported in the following table: (<b>version 2.0</b> performances) | EM | F1 | | ------ | ------ | | 60.28 | 72.04 | Testing notebook: https://huggingface.co/osiria/minilm-italian-l6-h384-question-answering/blob/main/osiria_minilm_l6_h384_italian_qa_evaluation.ipynb <h3>Quick usage</h3> In order to get the best possible outputs from the model, it is recommended to use the following pipeline ```python from transformers import AutoTokenizer, AutoModelForQuestionAnswering import re import string from transformers.pipelines import QuestionAnsweringPipeline tokenizer = AutoTokenizer.from_pretrained("osiria/minilm-italian-l6-h384-question-answering") model = AutoModelForQuestionAnswering.from_pretrained("osiria/minilm-italian-l6-h384-question-answering") class OsiriaQA(QuestionAnsweringPipeline): def __init__(self, punctuation = ',;.:!?()[\]{}', **kwargs): QuestionAnsweringPipeline.__init__(self, **kwargs) self.post_regex_left = "^[\s" + punctuation + "]+" self.post_regex_right = "[\s" + punctuation + "]+$" def postprocess(self, output): output = QuestionAnsweringPipeline.postprocess(self, model_outputs=output) output_length = len(output["answer"]) output["answer"] = re.sub(self.post_regex_left, "", output["answer"]) output["start"] = output["start"] + (output_length - len(output["answer"])) output_length = len(output["answer"]) output["answer"] = re.sub(self.post_regex_right, "", output["answer"]) output["end"] = output["end"] - (output_length - len(output["answer"])) return output pipeline_qa = OsiriaQA(model = model, tokenizer = tokenizer) pipeline_qa(context = "Alessandro Manzoni è nato a Milano nel 1785", question = "Dove è nato Manzoni?") # {'score': 0.9492858052253723, 'start': 28, 'end': 34, 'answer': 'Milano'} ``` You can also try the model online using this web app: https://huggingface.co/spaces/osiria/minilm-l6-h384-italian-question-answering <h3>References</h3> [1] https://arxiv.org/abs/2012.15828 [2] https://link.springer.com/chapter/10.1007/978-3-030-03840-3_29 <h3>Limitations</h3> This model was trained on the English SQuAD v2 and on SQuAD-IT, which is mainly a machine translated version of the original SQuAD v1.1. This means that the quality of the training set is limited by the machine translation. Moreover, the model is meant to answer questions under the assumption that the required information is actually contained in the given context (which is the underlying assumption of SQuAD v1.1). If the assumption is violated, the model will try to return an answer in any case, which is going to be incorrect. <h3>License</h3> The model is released under <b>MIT</b> license
{"datasets": ["squad_it"], "language": ["it"], "license": "mit", "pipeline_tag": "question-answering", "widget": [{"text": "Quale libro fu scritto da Alessandro Manzoni?", "context": "Alessandro Manzoni pubblicò la prima versione de I Promessi Sposi nel 1827"}, {"text": "In quali competizioni gareggia la Ferrari?", "context": "La Scuderia Ferrari è una squadra corse italiana di Formula 1 con sede a Maranello"}, {"text": "Quale sport è riferito alla Serie A?", "context": "Il campionato di Serie A è la massima divisione professionistica del campionato italiano di calcio maschile"}], "model-index": [{"name": "osiria/minilm-italian-l6-h384-question-answering", "results": [{"task": {"type": "question-answering", "name": "Question Answering"}, "dataset": {"name": "squad_it", "type": "squad_it"}, "metrics": [{"type": "exact-match", "value": 0.6028, "name": "Exact Match"}, {"type": "f1", "value": 0.7204, "name": "F1"}]}]}]}
task
[ "QUESTION_ANSWERING", "TRANSLATION" ]
45,509
tomaarsen/span-marker-bert-base-uncased-acronyms
tomaarsen
token-classification
[ "span-marker", "pytorch", "tensorboard", "safetensors", "token-classification", "ner", "named-entity-recognition", "generated_from_span_marker_trainer", "en", "dataset:acronym_identification", "base_model:google-bert/bert-base-uncased", "base_model:finetune:google-bert/bert-base-uncased", "license:apache-2.0", "model-index", "co2_eq_emissions", "region:us" ]
2023-08-14T06:49:22Z
2023-09-27T12:33:22+00:00
1,778
5
--- base_model: bert-base-uncased datasets: - acronym_identification language: - en library_name: span-marker license: apache-2.0 metrics: - precision - recall - f1 pipeline_tag: token-classification tags: - span-marker - token-classification - ner - named-entity-recognition - generated_from_span_marker_trainer widget: - text: here, da = direct assessment, rr = relative ranking, ds = discrete scale and cs = continuous scale. example_title: Uncased 1 - text: modifying or replacing the erasable programmable read only memory (eprom) in a phone would allow the configuration of any esn and min via software for cellular devices. example_title: Uncased 2 - text: we propose a technique called aggressive stochastic weight averaging (aswa) and an extension called norm-filtered aggressive stochastic weight averaging (naswa) which improves te stability of models over random seeds. example_title: Uncased 3 - text: the choice of the encoder and decoder modules of dnpg can be quite flexible, for instance long-short term memory networks (lstm) or convolutional neural network (cnn). example_title: Uncased 4 co2_eq_emissions: emissions: 31.203903222402037 source: codecarbon training_type: fine-tuning on_cloud: false cpu_model: 13th Gen Intel(R) Core(TM) i7-13700K ram_total_size: 31.777088165283203 hours_used: 0.272 hardware_used: 1 x NVIDIA GeForce RTX 3090 model-index: - name: SpanMarker with bert-base-uncased on Acronym Identification results: - task: type: token-classification name: Named Entity Recognition dataset: name: Acronym Identification type: acronym_identification split: validation metrics: - type: f1 value: 0.9198933333333332 name: F1 - type: precision value: 0.9339397877409573 name: Precision - type: recall value: 0.9062631357713324 name: Recall --- # SpanMarker with bert-base-uncased on Acronym Identification This is a [SpanMarker](https://github.com/tomaarsen/SpanMarkerNER) model trained on the [Acronym Identification](https://huggingface.co/datasets/acronym_identification) dataset that can be used for Named Entity Recognition. This SpanMarker model uses [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the underlying encoder. See [train.py](train.py) for the training script. Is your data always capitalized correctly? Then consider using the cased variant of this model instead for better performance: [tomaarsen/span-marker-bert-base-acronyms](https://huggingface.co/tomaarsen/span-marker-bert-base-acronyms). ## Model Details ### Model Description - **Model Type:** SpanMarker - **Encoder:** [bert-base-uncased](https://huggingface.co/bert-base-uncased) - **Maximum Sequence Length:** 256 tokens - **Maximum Entity Length:** 8 words - **Training Dataset:** [Acronym Identification](https://huggingface.co/datasets/acronym_identification) - **Language:** en - **License:** apache-2.0 ### Model Sources - **Repository:** [SpanMarker on GitHub](https://github.com/tomaarsen/SpanMarkerNER) - **Thesis:** [SpanMarker For Named Entity Recognition](https://raw.githubusercontent.com/tomaarsen/SpanMarkerNER/main/thesis.pdf) ### Model Labels | Label | Examples | |:------|:------------------------------------------------------------------------------------------------------| | long | "successive convex approximation", "controlled natural language", "Conversational Question Answering" | | short | "SODA", "CNL", "CoQA" | ## Evaluation ### Metrics | Label | Precision | Recall | F1 | |:--------|:----------|:-------|:-------| | **all** | 0.9339 | 0.9063 | 0.9199 | | long | 0.9314 | 0.8845 | 0.9074 | | short | 0.9352 | 0.9174 | 0.9262 | ## Uses ### Direct Use for Inference ```python from span_marker import SpanMarkerModel # Download from the 🤗 Hub model = SpanMarkerModel.from_pretrained("tomaarsen/span-marker-bert-base-uncased-acronyms") # Run inference entities = model.predict("compression algorithms like principal component analysis (pca) can reduce noise and complexity.") ``` ### Downstream Use You can finetune this model on your own dataset. <details><summary>Click to expand</summary> ```python from span_marker import SpanMarkerModel, Trainer # Download from the 🤗 Hub model = SpanMarkerModel.from_pretrained("tomaarsen/span-marker-bert-base-uncased-acronyms") # Specify a Dataset with "tokens" and "ner_tag" columns dataset = load_dataset("conll2003") # For example CoNLL2003 # Initialize a Trainer using the pretrained model & dataset trainer = Trainer( model=model, train_dataset=dataset["train"], eval_dataset=dataset["validation"], ) trainer.train() trainer.save_model("tomaarsen/span-marker-bert-base-uncased-acronyms-finetuned") ``` </details> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:----------------------|:----|:--------|:----| | Sentence length | 4 | 32.3372 | 170 | | Entities per sentence | 0 | 2.6775 | 24 | ### Training Hyperparameters - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 2 ### Training Results | Epoch | Step | Validation Loss | Validation Precision | Validation Recall | Validation F1 | Validation Accuracy | |:------:|:----:|:---------------:|:--------------------:|:-----------------:|:-------------:|:-------------------:| | 0.3120 | 200 | 0.0097 | 0.8999 | 0.8731 | 0.8863 | 0.9718 | | 0.6240 | 400 | 0.0075 | 0.9163 | 0.8995 | 0.9078 | 0.9769 | | 0.9360 | 600 | 0.0076 | 0.9079 | 0.9153 | 0.9116 | 0.9773 | | 1.2480 | 800 | 0.0069 | 0.9267 | 0.9006 | 0.9135 | 0.9778 | | 1.5601 | 1000 | 0.0065 | 0.9268 | 0.9044 | 0.9154 | 0.9782 | | 1.8721 | 1200 | 0.0065 | 0.9279 | 0.9061 | 0.9168 | 0.9787 | ### Environmental Impact Carbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon). - **Carbon Emitted**: 0.031 kg of CO2 - **Hours Used**: 0.272 hours ### Training Hardware - **On Cloud**: No - **GPU Model**: 1 x NVIDIA GeForce RTX 3090 - **CPU Model**: 13th Gen Intel(R) Core(TM) i7-13700K - **RAM Size**: 31.78 GB ### Framework Versions - Python: 3.9.16 - SpanMarker: 1.3.1.dev - Transformers: 4.30.0 - PyTorch: 2.0.1+cu118 - Datasets: 2.14.0 - Tokenizers: 0.13.2 ## Citation ### BibTeX ``` @software{Aarsen_SpanMarker, author = {Aarsen, Tom}, license = {Apache-2.0}, title = {{SpanMarker for Named Entity Recognition}}, url = {https://github.com/tomaarsen/SpanMarkerNER} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SpanMarker with bert-base-uncased on Acronym Identification This is a [SpanMarker](https://github.com/tomaarsen/SpanMarkerNER) model trained on the [Acronym Identification](https://huggingface.co/datasets/acronym_identification) dataset that can be used for Named Entity Recognition. This SpanMarker model uses [bert-base-uncased](https://huggingface.co/bert-base-uncased) as the underlying encoder. See [train.py](train.py) for the training script. Is your data always capitalized correctly? Then consider using the cased variant of this model instead for better performance: [tomaarsen/span-marker-bert-base-acronyms](https://huggingface.co/tomaarsen/span-marker-bert-base-acronyms). ## Model Details ### Model Description - **Model Type:** SpanMarker - **Encoder:** [bert-base-uncased](https://huggingface.co/bert-base-uncased) - **Maximum Sequence Length:** 256 tokens - **Maximum Entity Length:** 8 words - **Training Dataset:** [Acronym Identification](https://huggingface.co/datasets/acronym_identification) - **Language:** en - **License:** apache-2.0 ### Model Sources - **Repository:** [SpanMarker on GitHub](https://github.com/tomaarsen/SpanMarkerNER) - **Thesis:** [SpanMarker For Named Entity Recognition](https://raw.githubusercontent.com/tomaarsen/SpanMarkerNER/main/thesis.pdf) ### Model Labels | Label | Examples | |:------|:------------------------------------------------------------------------------------------------------| | long | "successive convex approximation", "controlled natural language", "Conversational Question Answering" | | short | "SODA", "CNL", "CoQA" | ## Evaluation ### Metrics | Label | Precision | Recall | F1 | |:--------|:----------|:-------|:-------| | **all** | 0.9339 | 0.9063 | 0.9199 | | long | 0.9314 | 0.8845 | 0.9074 | | short | 0.9352 | 0.9174 | 0.9262 | ## Uses ### Direct Use for Inference ```python from span_marker import SpanMarkerModel # Download from the 🤗 Hub model = SpanMarkerModel.from_pretrained("tomaarsen/span-marker-bert-base-uncased-acronyms") # Run inference entities = model.predict("compression algorithms like principal component analysis (pca) can reduce noise and complexity.") ``` ### Downstream Use You can finetune this model on your own dataset. <details><summary>Click to expand</summary> ```python from span_marker import SpanMarkerModel, Trainer # Download from the 🤗 Hub model = SpanMarkerModel.from_pretrained("tomaarsen/span-marker-bert-base-uncased-acronyms") # Specify a Dataset with "tokens" and "ner_tag" columns dataset = load_dataset("conll2003") # For example CoNLL2003 # Initialize a Trainer using the pretrained model & dataset trainer = Trainer( model=model, train_dataset=dataset["train"], eval_dataset=dataset["validation"], ) trainer.train() trainer.save_model("tomaarsen/span-marker-bert-base-uncased-acronyms-finetuned") ``` </details> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:----------------------|:----|:--------|:----| | Sentence length | 4 | 32.3372 | 170 | | Entities per sentence | 0 | 2.6775 | 24 | ### Training Hyperparameters - learning_rate: 5e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - num_epochs: 2 ### Training Results | Epoch | Step | Validation Loss | Validation Precision | Validation Recall | Validation F1 | Validation Accuracy | |:------:|:----:|:---------------:|:--------------------:|:-----------------:|:-------------:|:-------------------:| | 0.3120 | 200 | 0.0097 | 0.8999 | 0.8731 | 0.8863 | 0.9718 | | 0.6240 | 400 | 0.0075 | 0.9163 | 0.8995 | 0.9078 | 0.9769 | | 0.9360 | 600 | 0.0076 | 0.9079 | 0.9153 | 0.9116 | 0.9773 | | 1.2480 | 800 | 0.0069 | 0.9267 | 0.9006 | 0.9135 | 0.9778 | | 1.5601 | 1000 | 0.0065 | 0.9268 | 0.9044 | 0.9154 | 0.9782 | | 1.8721 | 1200 | 0.0065 | 0.9279 | 0.9061 | 0.9168 | 0.9787 | ### Environmental Impact Carbon emissions were measured using [CodeCarbon](https://github.com/mlco2/codecarbon). - **Carbon Emitted**: 0.031 kg of CO2 - **Hours Used**: 0.272 hours ### Training Hardware - **On Cloud**: No - **GPU Model**: 1 x NVIDIA GeForce RTX 3090 - **CPU Model**: 13th Gen Intel(R) Core(TM) i7-13700K - **RAM Size**: 31.78 GB ### Framework Versions - Python: 3.9.16 - SpanMarker: 1.3.1.dev - Transformers: 4.30.0 - PyTorch: 2.0.1+cu118 - Datasets: 2.14.0 - Tokenizers: 0.13.2 ## Citation ### BibTeX ``` @software{Aarsen_SpanMarker, author = {Aarsen, Tom}, license = {Apache-2.0}, title = {{SpanMarker for Named Entity Recognition}}, url = {https://github.com/tomaarsen/SpanMarkerNER} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "bert-base-uncased", "datasets": ["acronym_identification"], "language": ["en"], "library_name": "span-marker", "license": "apache-2.0", "metrics": ["precision", "recall", "f1"], "pipeline_tag": "token-classification", "tags": ["span-marker", "token-classification", "ner", "named-entity-recognition", "generated_from_span_marker_trainer"], "widget": [{"text": "here, da = direct assessment, rr = relative ranking, ds = discrete scale and cs = continuous scale.", "example_title": "Uncased 1"}, {"text": "modifying or replacing the erasable programmable read only memory (eprom) in a phone would allow the configuration of any esn and min via software for cellular devices.", "example_title": "Uncased 2"}, {"text": "we propose a technique called aggressive stochastic weight averaging (aswa) and an extension called norm-filtered aggressive stochastic weight averaging (naswa) which improves te stability of models over random seeds.", "example_title": "Uncased 3"}, {"text": "the choice of the encoder and decoder modules of dnpg can be quite flexible, for instance long-short term memory networks (lstm) or convolutional neural network (cnn).", "example_title": "Uncased 4"}], "co2_eq_emissions": {"emissions": 31.203903222402037, "source": "codecarbon", "training_type": "fine-tuning", "on_cloud": false, "cpu_model": "13th Gen Intel(R) Core(TM) i7-13700K", "ram_total_size": 31.777088165283203, "hours_used": 0.272, "hardware_used": "1 x NVIDIA GeForce RTX 3090"}, "model-index": [{"name": "SpanMarker with bert-base-uncased on Acronym Identification", "results": [{"task": {"type": "token-classification", "name": "Named Entity Recognition"}, "dataset": {"name": "Acronym Identification", "type": "acronym_identification", "split": "validation"}, "metrics": [{"type": "f1", "value": 0.9198933333333332, "name": "F1"}, {"type": "precision", "value": 0.9339397877409573, "name": "Precision"}, {"type": "recall", "value": 0.9062631357713324, "name": "Recall"}]}]}]}
task
[ "NAMED_ENTITY_RECOGNITION", "QUESTION_ANSWERING" ]
45,510
microsoft/git-large-r-coco
microsoft
image-to-text
[ "transformers", "pytorch", "git", "image-text-to-text", "vision", "image-captioning", "image-to-text", "en", "arxiv:2205.14100", "license:mit", "endpoints_compatible", "region:us" ]
2023-01-22T19:04:40Z
2023-02-08T10:50:12+00:00
356
10
--- language: en license: mit model_name: microsoft/git-large-coco pipeline_tag: image-to-text tags: - vision - image-captioning --- # GIT (GenerativeImage2Text), large-sized, fine-tuned on COCO, R* R = re-trained by removing some offensive captions in cc12m dataset GIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on COCO. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for image captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/model_doc/git#transformers.GitForCausalLM.forward.example). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-large", which is a smaller variant of GIT trained on 20 million image-text pairs. Next, the model was fine-tuned on COCO. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
null
Non_BioNLP
# GIT (GenerativeImage2Text), large-sized, fine-tuned on COCO, R* R = re-trained by removing some offensive captions in cc12m dataset GIT (short for GenerativeImage2Text) model, large-sized version, fine-tuned on COCO. It was introduced in the paper [GIT: A Generative Image-to-text Transformer for Vision and Language](https://arxiv.org/abs/2205.14100) by Wang et al. and first released in [this repository](https://github.com/microsoft/GenerativeImage2Text). Disclaimer: The team releasing GIT did not write a model card for this model so this model card has been written by the Hugging Face team. ## Model description GIT is a Transformer decoder conditioned on both CLIP image tokens and text tokens. The model is trained using "teacher forcing" on a lot of (image, text) pairs. The goal for the model is simply to predict the next text token, giving the image tokens and previous text tokens. The model has full access to (i.e. a bidirectional attention mask is used for) the image patch tokens, but only has access to the previous text tokens (i.e. a causal attention mask is used for the text tokens) when predicting the next text token. ![GIT architecture](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/model_doc/git_architecture.jpg) This allows the model to be used for tasks like: - image and video captioning - visual question answering (VQA) on images and videos - even image classification (by simply conditioning the model on the image and asking it to generate a class for it in text). ## Intended uses & limitations You can use the raw model for image captioning. See the [model hub](https://huggingface.co/models?search=microsoft/git) to look for fine-tuned versions on a task that interests you. ### How to use For code examples, we refer to the [documentation](https://huggingface.co/docs/transformers/main/model_doc/git#transformers.GitForCausalLM.forward.example). ## Training data From the paper: > We collect 0.8B image-text pairs for pre-training, which include COCO (Lin et al., 2014), Conceptual Captions (CC3M) (Sharma et al., 2018), SBU (Ordonez et al., 2011), Visual Genome (VG) (Krishna et al., 2016), Conceptual Captions (CC12M) (Changpinyo et al., 2021), ALT200M (Hu et al., 2021a), and an extra 0.6B data following a similar collection procedure in Hu et al. (2021a). => however this is for the model referred to as "GIT" in the paper, which is not open-sourced. This checkpoint is "GIT-large", which is a smaller variant of GIT trained on 20 million image-text pairs. Next, the model was fine-tuned on COCO. See table 11 in the [paper](https://arxiv.org/abs/2205.14100) for more details. ### Preprocessing We refer to the original repo regarding details for preprocessing during training. During validation, one resizes the shorter edge of each image, after which center cropping is performed to a fixed-size resolution. Next, frames are normalized across the RGB channels with the ImageNet mean and standard deviation. ## Evaluation results For evaluation results, we refer readers to the [paper](https://arxiv.org/abs/2205.14100).
{"language": "en", "license": "mit", "model_name": "microsoft/git-large-coco", "pipeline_tag": "image-to-text", "tags": ["vision", "image-captioning"]}
task
[ "QUESTION_ANSWERING" ]
45,511
TransferGraph/neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news
TransferGraph
text-classification
[ "peft", "safetensors", "parquet", "text-classification", "dataset:ag_news", "base_model:neibla/distilbert-base-uncased-finetuned-emotion", "base_model:adapter:neibla/distilbert-base-uncased-finetuned-emotion", "license:apache-2.0", "model-index", "region:us" ]
2024-02-27T22:48:53Z
2024-02-28T00:41:23+00:00
0
0
--- base_model: neibla/distilbert-base-uncased-finetuned-emotion datasets: - ag_news library_name: peft license: apache-2.0 metrics: - accuracy tags: - parquet - text-classification model-index: - name: neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news results: - task: type: text-classification name: Text Classification dataset: name: ag_news type: ag_news config: default split: test args: default metrics: - type: accuracy value: 0.939078947368421 name: accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news This model is a fine-tuned version of [neibla/distilbert-base-uncased-finetuned-emotion](https://huggingface.co/neibla/distilbert-base-uncased-finetuned-emotion) on the ag_news dataset. It achieves the following results on the evaluation set: - accuracy: 0.9391 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.2512 | None | 0 | | 0.9261 | 0.2630 | 0 | | 0.9305 | 0.1988 | 1 | | 0.9357 | 0.1769 | 2 | | 0.9391 | 0.1612 | 3 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news This model is a fine-tuned version of [neibla/distilbert-base-uncased-finetuned-emotion](https://huggingface.co/neibla/distilbert-base-uncased-finetuned-emotion) on the ag_news dataset. It achieves the following results on the evaluation set: - accuracy: 0.9391 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 24 - eval_batch_size: 24 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.2512 | None | 0 | | 0.9261 | 0.2630 | 0 | | 0.9305 | 0.1988 | 1 | | 0.9357 | 0.1769 | 2 | | 0.9391 | 0.1612 | 3 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
{"base_model": "neibla/distilbert-base-uncased-finetuned-emotion", "datasets": ["ag_news"], "library_name": "peft", "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["parquet", "text-classification"], "model-index": [{"name": "neibla_distilbert-base-uncased-finetuned-emotion-finetuned-lora-ag_news", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "ag_news", "type": "ag_news", "config": "default", "split": "test", "args": "default"}, "metrics": [{"type": "accuracy", "value": 0.939078947368421, "name": "accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,512
UKP-SQuARE/NewsQA_Adapter_RoBERTa
UKP-SQuARE
question-answering
[ "adapter-transformers", "question-answering", "en", "dataset:mrqa", "license:mit", "region:us" ]
2023-03-11T20:38:30Z
2023-03-12T10:17:33+00:00
8
0
--- datasets: - mrqa language: - en library_name: adapter-transformers license: mit metrics: - squad pipeline_tag: question-answering --- # Description This is the single-dataset adapter for the NewsQA partition of the MRQA 2019 Shared Task Dataset. The adapter was created by Friedman et al. (2021) and should be used with the `roberta-base` encoder. The UKP-SQuARE team created this model repository to simplify the deployment of this model on the UKP-SQuARE platform. The GitHub repository of the original authors is https://github.com/princeton-nlp/MADE # Usage This model contains the same weights as https://huggingface.co/princeton-nlp/MADE/resolve/main/single_dataset_adapters/NewsQA/model.pt. The only difference is that our repository follows the standard format of AdapterHub. Therefore, you could load this model as follows: ``` from transformers import RobertaForQuestionAnswering, RobertaTokenizerFast model = RobertaForQuestionAnswering.from_pretrained("roberta-base") model.load_adapter("UKP-SQuARE/NewsQA_Adapter_RoBERTa", source="hf") model.set_active_adapters("NewsQA") tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base') pipe = pipeline("question-answering", model=model, tokenizer=tokenizer) pipe({"question": "What is the capital of Germany?", "context": "The capital of Germany is Berlin."}) ``` Note you need the adapter-transformers library https://adapterhub.ml # Evaluation Friedman et al. report an F1 score of **70.9 on NewsQA**. Please refer to the original publication for more information. # Citation Single-dataset Experts for Multi-dataset Question Answering (Friedman et al., EMNLP 2021)
null
Non_BioNLP
# Description This is the single-dataset adapter for the NewsQA partition of the MRQA 2019 Shared Task Dataset. The adapter was created by Friedman et al. (2021) and should be used with the `roberta-base` encoder. The UKP-SQuARE team created this model repository to simplify the deployment of this model on the UKP-SQuARE platform. The GitHub repository of the original authors is https://github.com/princeton-nlp/MADE # Usage This model contains the same weights as https://huggingface.co/princeton-nlp/MADE/resolve/main/single_dataset_adapters/NewsQA/model.pt. The only difference is that our repository follows the standard format of AdapterHub. Therefore, you could load this model as follows: ``` from transformers import RobertaForQuestionAnswering, RobertaTokenizerFast model = RobertaForQuestionAnswering.from_pretrained("roberta-base") model.load_adapter("UKP-SQuARE/NewsQA_Adapter_RoBERTa", source="hf") model.set_active_adapters("NewsQA") tokenizer = RobertaTokenizerFast.from_pretrained('roberta-base') pipe = pipeline("question-answering", model=model, tokenizer=tokenizer) pipe({"question": "What is the capital of Germany?", "context": "The capital of Germany is Berlin."}) ``` Note you need the adapter-transformers library https://adapterhub.ml # Evaluation Friedman et al. report an F1 score of **70.9 on NewsQA**. Please refer to the original publication for more information. # Citation Single-dataset Experts for Multi-dataset Question Answering (Friedman et al., EMNLP 2021)
{"datasets": ["mrqa"], "language": ["en"], "library_name": "adapter-transformers", "license": "mit", "metrics": ["squad"], "pipeline_tag": "question-answering"}
task
[ "QUESTION_ANSWERING" ]
45,513
TheOriginalMarcelo/my_awesome_model
TheOriginalMarcelo
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:imdb", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-29T22:14:44Z
2023-11-29T22:53:37+00:00
103
0
--- base_model: distilbert-base-uncased datasets: - imdb license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: my_awesome_model results: - task: type: text-classification name: Text Classification dataset: name: imdb type: imdb config: plain_text split: test args: plain_text metrics: - type: accuracy value: 0.92688 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.1955 - Accuracy: 0.9269 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2164 | 1.0 | 1563 | 0.1955 | 0.9269 | ### Framework versions - Transformers 4.36.0.dev0 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # my_awesome_model This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the imdb dataset. It achieves the following results on the evaluation set: - Loss: 0.1955 - Accuracy: 0.9269 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.2164 | 1.0 | 1563 | 0.1955 | 0.9269 | ### Framework versions - Transformers 4.36.0.dev0 - Pytorch 2.1.0+cu118 - Datasets 2.15.0 - Tokenizers 0.15.0
{"base_model": "distilbert-base-uncased", "datasets": ["imdb"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "my_awesome_model", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "imdb", "type": "imdb", "config": "plain_text", "split": "test", "args": "plain_text"}, "metrics": [{"type": "accuracy", "value": 0.92688, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,514
TransferGraph/socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony
TransferGraph
text-classification
[ "peft", "safetensors", "parquet", "text-classification", "dataset:tweet_eval", "base_model:socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased", "base_model:adapter:socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased", "model-index", "region:us" ]
2024-02-27T17:29:45Z
2024-02-29T13:36:48+00:00
0
0
--- base_model: socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased datasets: - tweet_eval library_name: peft metrics: - accuracy tags: - parquet - text-classification model-index: - name: socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: tweet_eval config: irony split: validation args: irony metrics: - type: accuracy value: 0.5916230366492147 name: accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony This model is a fine-tuned version of [socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased](https://huggingface.co/socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased) on the tweet_eval dataset. It achieves the following results on the evaluation set: - accuracy: 0.5916 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.4963 | None | 0 | | 0.5204 | 0.7997 | 0 | | 0.5497 | 0.6917 | 1 | | 0.5696 | 0.6823 | 2 | | 0.5770 | 0.6693 | 3 | | 0.5791 | 0.6611 | 4 | | 0.5948 | 0.6538 | 5 | | 0.5874 | 0.6487 | 6 | | 0.5916 | 0.6428 | 7 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony This model is a fine-tuned version of [socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased](https://huggingface.co/socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased) on the tweet_eval dataset. It achieves the following results on the evaluation set: - accuracy: 0.5916 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 8 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.4963 | None | 0 | | 0.5204 | 0.7997 | 0 | | 0.5497 | 0.6917 | 1 | | 0.5696 | 0.6823 | 2 | | 0.5770 | 0.6693 | 3 | | 0.5791 | 0.6611 | 4 | | 0.5948 | 0.6538 | 5 | | 0.5874 | 0.6487 | 6 | | 0.5916 | 0.6428 | 7 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
{"base_model": "socialmediaie/TRAC2020_IBEN_B_bert-base-multilingual-uncased", "datasets": ["tweet_eval"], "library_name": "peft", "metrics": ["accuracy"], "tags": ["parquet", "text-classification"], "model-index": [{"name": "socialmediaie_TRAC2020_IBEN_B_bert-base-multilingual-uncased-finetuned-lora-tweet_eval_irony", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "tweet_eval", "type": "tweet_eval", "config": "irony", "split": "validation", "args": "irony"}, "metrics": [{"type": "accuracy", "value": 0.5916230366492147, "name": "accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,515
magicsword/wy-mt-en-zh-2
magicsword
translation
[ "transformers", "pytorch", "safetensors", "marian", "text2text-generation", "autotrain", "translation", "unk", "dataset:magicsword/autotrain-data-wy-mt-en-zh", "co2_eq_emissions", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-16T15:15:50Z
2023-07-16T17:27:39+00:00
12
0
--- datasets: - magicsword/autotrain-data-wy-mt-en-zh language: - unk - unk tags: - autotrain - translation co2_eq_emissions: emissions: 71.14399741050826 --- # Model Trained Using AutoTrain - Problem type: Translation - Model ID: 74981139786 - CO2 Emissions (in grams): 71.1440 ## Validation Metrics - Loss: 2.220 - SacreBLEU: 12.949 - Gen len: 16.386
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Translation - Model ID: 74981139786 - CO2 Emissions (in grams): 71.1440 ## Validation Metrics - Loss: 2.220 - SacreBLEU: 12.949 - Gen len: 16.386
{"datasets": ["magicsword/autotrain-data-wy-mt-en-zh"], "language": ["unk", "unk"], "tags": ["autotrain", "translation"], "co2_eq_emissions": {"emissions": 71.14399741050826}}
task
[ "TRANSLATION" ]
45,516
Lots-of-LoRAs/Mistral-7B-Instruct-v0.2-4b-r16-task1652
Lots-of-LoRAs
null
[ "pytorch", "safetensors", "en", "arxiv:1910.09700", "arxiv:2407.00066", "base_model:mistralai/Mistral-7B-Instruct-v0.2", "base_model:finetune:mistralai/Mistral-7B-Instruct-v0.2", "license:mit", "region:us" ]
2025-01-02T14:24:05Z
2025-01-02T14:24:11+00:00
0
0
--- base_model: mistralai/Mistral-7B-Instruct-v0.2 language: en library_name: pytorch license: mit --- # Model Card for Mistral-7B-Instruct-v0.2-4b-r16-task1652 <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> LoRA trained on task1652_opus_books_ca-en_translation - **Developed by:** bruel - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** LoRA - **Language(s) (NLP):** en - **License:** mit - **Finetuned from model [optional]:** mistralai/Mistral-7B-Instruct-v0.2 ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/bruel-gabrielsson - **Paper [optional]:** "Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead" (2024), Rickard Brüel Gabrielsson, Jiacheng Zhu, Onkar Bhardwaj, Leshem Choshen, Kristjan Greenewald, Mikhail Yurochkin and Justin Solomon - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> https://huggingface.co/datasets/Lots-of-LoRAs/task1652_opus_books_ca-en_translation sourced from https://github.com/allenai/natural-instructions ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** @misc{brüelgabrielsson2024compressserveservingthousands, title={Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead}, author={Rickard Brüel-Gabrielsson and Jiacheng Zhu and Onkar Bhardwaj and Leshem Choshen and Kristjan Greenewald and Mikhail Yurochkin and Justin Solomon}, year={2024}, eprint={2407.00066}, archivePrefix={arXiv}, primaryClass={cs.DC}, url={https://arxiv.org/abs/2407.00066}, } **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
null
Non_BioNLP
# Model Card for Mistral-7B-Instruct-v0.2-4b-r16-task1652 <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> LoRA trained on task1652_opus_books_ca-en_translation - **Developed by:** bruel - **Funded by [optional]:** [More Information Needed] - **Shared by [optional]:** [More Information Needed] - **Model type:** LoRA - **Language(s) (NLP):** en - **License:** mit - **Finetuned from model [optional]:** mistralai/Mistral-7B-Instruct-v0.2 ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/bruel-gabrielsson - **Paper [optional]:** "Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead" (2024), Rickard Brüel Gabrielsson, Jiacheng Zhu, Onkar Bhardwaj, Leshem Choshen, Kristjan Greenewald, Mikhail Yurochkin and Justin Solomon - **Demo [optional]:** [More Information Needed] ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> [More Information Needed] ### Downstream Use [optional] <!-- This section is for the model use when fine-tuned for a task, or when plugged into a larger ecosystem/app --> [More Information Needed] ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. [More Information Needed] ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> https://huggingface.co/datasets/Lots-of-LoRAs/task1652_opus_books_ca-en_translation sourced from https://github.com/allenai/natural-instructions ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary ## Model Examination [optional] <!-- Relevant interpretability work for the model goes here --> [More Information Needed] ## Environmental Impact <!-- Total emissions (in grams of CO2eq) and additional considerations, such as electricity usage, go here. Edit the suggested text below accordingly --> Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** [More Information Needed] - **Hours used:** [More Information Needed] - **Cloud Provider:** [More Information Needed] - **Compute Region:** [More Information Needed] - **Carbon Emitted:** [More Information Needed] ## Technical Specifications [optional] ### Model Architecture and Objective [More Information Needed] ### Compute Infrastructure [More Information Needed] #### Hardware [More Information Needed] #### Software [More Information Needed] ## Citation [optional] <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** @misc{brüelgabrielsson2024compressserveservingthousands, title={Compress then Serve: Serving Thousands of LoRA Adapters with Little Overhead}, author={Rickard Brüel-Gabrielsson and Jiacheng Zhu and Onkar Bhardwaj and Leshem Choshen and Kristjan Greenewald and Mikhail Yurochkin and Justin Solomon}, year={2024}, eprint={2407.00066}, archivePrefix={arXiv}, primaryClass={cs.DC}, url={https://arxiv.org/abs/2407.00066}, } **APA:** [More Information Needed] ## Glossary [optional] <!-- If relevant, include terms and calculations in this section that can help readers understand the model or model card. --> [More Information Needed] ## More Information [optional] [More Information Needed] ## Model Card Authors [optional] [More Information Needed] ## Model Card Contact [More Information Needed]
{"base_model": "mistralai/Mistral-7B-Instruct-v0.2", "language": "en", "library_name": "pytorch", "license": "mit"}
task
[ "TRANSLATION" ]
45,517
yannryanhelsinki/setfit-language-guess
yannryanhelsinki
text-classification
[ "setfit", "safetensors", "mpnet", "sentence-transformers", "text-classification", "generated_from_setfit_trainer", "arxiv:2209.11055", "base_model:sentence-transformers/paraphrase-mpnet-base-v2", "base_model:finetune:sentence-transformers/paraphrase-mpnet-base-v2", "model-index", "region:us" ]
2024-06-27T08:17:52Z
2024-06-27T08:18:21+00:00
4
0
--- base_model: sentence-transformers/paraphrase-mpnet-base-v2 library_name: setfit metrics: - accuracy pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification - generated_from_setfit_trainer widget: - text: 'Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui' - text: Ioannis Lodovici Vivis Von Underweÿsung ayner christlichen Frauwen drey Bücher ...erklärt unnd verteütscht. Durch Christophorum Brunonem .../Von Underweysung ayner christlichen Frauwen drey Bücher - text: Absolvtissimae in Hebraicam lingvam institvtiones accvratissime in vsvm studiosæ juuentutis conscriptæ ...Avtore Iohanne Isaaco Leuita Germano/Absolutissimae in Hebraicam linguam institutiones accuratissime in usum studiosæ juventutis conscriptæ ... Autore Iohanne Isaaco Levita Germano - text: In tertiam partem D. Thomæ Aqvinatis commentaria Ioannis Wiggers ... a quæstione I. vsque ad quæstionem XXVI. de verbo incarnatoIn tertiam partem D. Thomae Aquinatis commentaria Ioannis Wiggers ... a quaestione I. usque ad quaestionem XXVI. de verbo incarnato - text: Tabvla in grammaticen Hebræam,authore Nicolao Clenardo. A Iohanne Quinquarboreo Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata./Tabula in grammaticen Hebraeam, authore Nicolao Clenardo. A Johanne Quinquarboreo Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata inference: true model-index: - name: SetFit with sentence-transformers/paraphrase-mpnet-base-v2 results: - task: type: text-classification name: Text Classification dataset: name: Unknown type: unknown split: test metrics: - type: accuracy value: 0.735 name: Accuracy --- # SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | no | <ul><li>'Exomologesis sive Modus confitendi,per Erasmum Roterodamũ .../Exomologesis sive modus confitendi per Erasmum Roterodamum'</li><li>'Aen-wysinge van de macht en de eer die aen Jesus-Christus toe-komt. En van de eerbiedinghe die-men schuldigh is aen sijn aldersuyverste moeder Maria, en andere heyligen.'</li><li>'Staatkundige vermaningen en voorbeelden, die de deughden en zonden der vorsten betreffen.Nieuwelijks door I.H. Glazemaker vertaalt.'</li></ul> | | yes | <ul><li>'Reclamations des trois états du duché de Brabant sur les atteintes portées a leurs droits et loix constitutionnelles au nom de S.M. Joseph II.'</li><li>'Brief van het Magistraet van Brugge van date 16 February 1788 aen de ordinaire Gedeputeerde der Staeten van Vlaenderen tenderende om staets gewyze te doen naedere Representatie tegen de opregtinge van een Seminarie Generael tot Loven ...'</li><li>"Bericht voor d'Universiteyt &c. van Leuven, over de wijtloopige memorie, en andere schriften en documenten daer by, overgegeven aen haer Ho. Mog. door de vicarissen van Doornik"</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.735 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("yannryanhelsinki/setfit-language-guess") # Run inference preds = model("Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 5 | 29.2759 | 92 | | Label | Training Sample Count | |:------|:----------------------| | no | 44 | | yes | 72 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 20 - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0034 | 1 | 0.2242 | - | | 0.1724 | 50 | 0.1951 | - | | 0.3448 | 100 | 0.0342 | - | | 0.5172 | 150 | 0.0008 | - | | 0.6897 | 200 | 0.0006 | - | | 0.8621 | 250 | 0.0003 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.3.0+cu121 - Datasets: 2.20.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SetFit with sentence-transformers/paraphrase-mpnet-base-v2 This is a [SetFit](https://github.com/huggingface/setfit) model that can be used for Text Classification. This SetFit model uses [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) as the Sentence Transformer embedding model. A [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance is used for classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Model Details ### Model Description - **Model Type:** SetFit - **Sentence Transformer body:** [sentence-transformers/paraphrase-mpnet-base-v2](https://huggingface.co/sentence-transformers/paraphrase-mpnet-base-v2) - **Classification head:** a [LogisticRegression](https://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html) instance - **Maximum Sequence Length:** 512 tokens - **Number of Classes:** 2 classes <!-- - **Training Dataset:** [Unknown](https://huggingface.co/datasets/unknown) --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Repository:** [SetFit on GitHub](https://github.com/huggingface/setfit) - **Paper:** [Efficient Few-Shot Learning Without Prompts](https://arxiv.org/abs/2209.11055) - **Blogpost:** [SetFit: Efficient Few-Shot Learning Without Prompts](https://huggingface.co/blog/setfit) ### Model Labels | Label | Examples | |:------|:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | no | <ul><li>'Exomologesis sive Modus confitendi,per Erasmum Roterodamũ .../Exomologesis sive modus confitendi per Erasmum Roterodamum'</li><li>'Aen-wysinge van de macht en de eer die aen Jesus-Christus toe-komt. En van de eerbiedinghe die-men schuldigh is aen sijn aldersuyverste moeder Maria, en andere heyligen.'</li><li>'Staatkundige vermaningen en voorbeelden, die de deughden en zonden der vorsten betreffen.Nieuwelijks door I.H. Glazemaker vertaalt.'</li></ul> | | yes | <ul><li>'Reclamations des trois états du duché de Brabant sur les atteintes portées a leurs droits et loix constitutionnelles au nom de S.M. Joseph II.'</li><li>'Brief van het Magistraet van Brugge van date 16 February 1788 aen de ordinaire Gedeputeerde der Staeten van Vlaenderen tenderende om staets gewyze te doen naedere Representatie tegen de opregtinge van een Seminarie Generael tot Loven ...'</li><li>"Bericht voor d'Universiteyt &c. van Leuven, over de wijtloopige memorie, en andere schriften en documenten daer by, overgegeven aen haer Ho. Mog. door de vicarissen van Doornik"</li></ul> | ## Evaluation ### Metrics | Label | Accuracy | |:--------|:---------| | **all** | 0.735 | ## Uses ### Direct Use for Inference First install the SetFit library: ```bash pip install setfit ``` Then you can load this model and run inference. ```python from setfit import SetFitModel # Download from the 🤗 Hub model = SetFitModel.from_pretrained("yannryanhelsinki/setfit-language-guess") # Run inference preds = model("Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui") ``` <!-- ### Downstream Use *List how someone could finetune this model on their own dataset.* --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Set Metrics | Training set | Min | Median | Max | |:-------------|:----|:--------|:----| | Word count | 5 | 29.2759 | 92 | | Label | Training Sample Count | |:------|:----------------------| | no | 44 | | yes | 72 | ### Training Hyperparameters - batch_size: (16, 16) - num_epochs: (1, 1) - max_steps: -1 - sampling_strategy: oversampling - num_iterations: 20 - body_learning_rate: (2e-05, 2e-05) - head_learning_rate: 2e-05 - loss: CosineSimilarityLoss - distance_metric: cosine_distance - margin: 0.25 - end_to_end: False - use_amp: False - warmup_proportion: 0.1 - seed: 42 - eval_max_steps: -1 - load_best_model_at_end: False ### Training Results | Epoch | Step | Training Loss | Validation Loss | |:------:|:----:|:-------------:|:---------------:| | 0.0034 | 1 | 0.2242 | - | | 0.1724 | 50 | 0.1951 | - | | 0.3448 | 100 | 0.0342 | - | | 0.5172 | 150 | 0.0008 | - | | 0.6897 | 200 | 0.0006 | - | | 0.8621 | 250 | 0.0003 | - | ### Framework Versions - Python: 3.10.12 - SetFit: 1.0.3 - Sentence Transformers: 3.0.1 - Transformers: 4.39.0 - PyTorch: 2.3.0+cu121 - Datasets: 2.20.0 - Tokenizers: 0.15.2 ## Citation ### BibTeX ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/paraphrase-mpnet-base-v2", "library_name": "setfit", "metrics": ["accuracy"], "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification", "generated_from_setfit_trainer"], "widget": [{"text": "Colloqujdi Gio: Lodovico Vives latini, e volgari/Colloqui"}, {"text": "Ioannis Lodovici Vivis Von Underweÿsung ayner christlichen Frauwen drey Bücher ...erklärt unnd verteütscht. Durch Christophorum Brunonem .../Von Underweysung ayner christlichen Frauwen drey Bücher"}, {"text": "Absolvtissimae in Hebraicam lingvam institvtiones accvratissime in vsvm studiosæ juuentutis conscriptæ ...Avtore Iohanne Isaaco Leuita Germano/Absolutissimae in Hebraicam linguam institutiones accuratissime in usum studiosæ juventutis conscriptæ ... Autore Iohanne Isaaco Levita Germano"}, {"text": "In tertiam partem D. Thomæ Aqvinatis commentaria Ioannis Wiggers ... a quæstione I. vsque ad quæstionem XXVI. de verbo incarnatoIn tertiam partem D. Thomae Aquinatis commentaria Ioannis Wiggers ... a quaestione I. usque ad quaestionem XXVI. de verbo incarnato"}, {"text": "Tabvla in grammaticen Hebræam,authore Nicolao Clenardo. A Iohanne Quinquarboreo Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata./Tabula in grammaticen Hebraeam, authore Nicolao Clenardo. A Johanne Quinquarboreo Aurilacensi à mendis quibus scatebat repurgata, & annotationibus illustrata"}], "inference": true, "model-index": [{"name": "SetFit with sentence-transformers/paraphrase-mpnet-base-v2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "Unknown", "type": "unknown", "split": "test"}, "metrics": [{"type": "accuracy", "value": 0.735, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,518
mrm8488/t5-base-finetuned-emotion
mrm8488
text2text-generation
[ "transformers", "pytorch", "jax", "t5", "text2text-generation", "en", "dataset:emotion", "arxiv:1910.10683", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2021-06-23T12:46:24+00:00
8,106
52
--- datasets: - emotion language: en widget: - text: I wish you were here but it is impossible --- # T5-base fine-tuned for Emotion Recognition 😂😢😡😃😯 [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) base fine-tuned on [emotion recognition](https://github.com/dair-ai/emotion_dataset) dataset for **Emotion Recognition** downstream task. ## Details of T5 The **T5** model was presented in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) by *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* in Here the abstract: Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new “Colossal Clean Crawled Corpus”, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code. ![model image](https://i.imgur.com/jVFMMWR.png) ## Details of the downstream task (Sentiment Recognition) - Dataset 📚 [Elvis Saravia](https://twitter.com/omarsar0) has gathered a great [dataset](https://github.com/dair-ai/emotion_dataset) for emotion recognition. It allows to classifiy the text into one of the following **6** emotions: - sadness 😢 - joy 😃 - love 🥰 - anger 😡 - fear 😱 - surprise 😯 ## Model fine-tuning 🏋️‍ The training script is a slightly modified version of [this Colab Notebook](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) created by [Suraj Patil](https://github.com/patil-suraj), so all credits to him! ## Test set metrics 🧾 | |precision | recall | f1-score |support| |----------|----------|---------|----------|-------| |anger | 0.93| 0.92| 0.93| 275| |fear | 0.91| 0.87| 0.89| 224| |joy | 0.97| 0.94| 0.95| 695| |love | 0.80| 0.91| 0.85| 159| |sadness | 0.97| 0.97| 0.97| 521| |surpirse | 0.73| 0.89| 0.80| 66| | | |accuracy| | | 0.93| 2000| |macro avg| 0.89| 0.92| 0.90| 2000| |weighted avg| 0.94| 0.93| 0.93| 2000| ## Model in Action 🚀 ```python from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion") model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-emotion") def get_emotion(text): input_ids = tokenizer.encode(text + '</s>', return_tensors='pt') output = model.generate(input_ids=input_ids, max_length=2) dec = [tokenizer.decode(ids) for ids in output] label = dec[0] return label get_emotion("i feel as if i havent blogged in ages are at least truly blogged i am doing an update cute") # Output: 'joy' get_emotion("i have a feeling i kinda lost my best friend") # Output: 'sadness' ``` > Created by [Manuel Romero/@mrm8488](https://twitter.com/mrm8488) | [LinkedIn](https://www.linkedin.com/in/manuel-romero-cs/) > Made with <span style="color: #e25555;">&hearts;</span> in Spain
null
Non_BioNLP
# T5-base fine-tuned for Emotion Recognition 😂😢😡😃😯 [Google's T5](https://ai.googleblog.com/2020/02/exploring-transfer-learning-with-t5.html) base fine-tuned on [emotion recognition](https://github.com/dair-ai/emotion_dataset) dataset for **Emotion Recognition** downstream task. ## Details of T5 The **T5** model was presented in [Exploring the Limits of Transfer Learning with a Unified Text-to-Text Transformer](https://arxiv.org/pdf/1910.10683.pdf) by *Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, Peter J. Liu* in Here the abstract: Transfer learning, where a model is first pre-trained on a data-rich task before being fine-tuned on a downstream task, has emerged as a powerful technique in natural language processing (NLP). The effectiveness of transfer learning has given rise to a diversity of approaches, methodology, and practice. In this paper, we explore the landscape of transfer learning techniques for NLP by introducing a unified framework that converts every language problem into a text-to-text format. Our systematic study compares pre-training objectives, architectures, unlabeled datasets, transfer approaches, and other factors on dozens of language understanding tasks. By combining the insights from our exploration with scale and our new “Colossal Clean Crawled Corpus”, we achieve state-of-the-art results on many benchmarks covering summarization, question answering, text classification, and more. To facilitate future work on transfer learning for NLP, we release our dataset, pre-trained models, and code. ![model image](https://i.imgur.com/jVFMMWR.png) ## Details of the downstream task (Sentiment Recognition) - Dataset 📚 [Elvis Saravia](https://twitter.com/omarsar0) has gathered a great [dataset](https://github.com/dair-ai/emotion_dataset) for emotion recognition. It allows to classifiy the text into one of the following **6** emotions: - sadness 😢 - joy 😃 - love 🥰 - anger 😡 - fear 😱 - surprise 😯 ## Model fine-tuning 🏋️‍ The training script is a slightly modified version of [this Colab Notebook](https://github.com/patil-suraj/exploring-T5/blob/master/t5_fine_tuning.ipynb) created by [Suraj Patil](https://github.com/patil-suraj), so all credits to him! ## Test set metrics 🧾 | |precision | recall | f1-score |support| |----------|----------|---------|----------|-------| |anger | 0.93| 0.92| 0.93| 275| |fear | 0.91| 0.87| 0.89| 224| |joy | 0.97| 0.94| 0.95| 695| |love | 0.80| 0.91| 0.85| 159| |sadness | 0.97| 0.97| 0.97| 521| |surpirse | 0.73| 0.89| 0.80| 66| | | |accuracy| | | 0.93| 2000| |macro avg| 0.89| 0.92| 0.90| 2000| |weighted avg| 0.94| 0.93| 0.93| 2000| ## Model in Action 🚀 ```python from transformers import AutoTokenizer, AutoModelWithLMHead tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-emotion") model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-emotion") def get_emotion(text): input_ids = tokenizer.encode(text + '</s>', return_tensors='pt') output = model.generate(input_ids=input_ids, max_length=2) dec = [tokenizer.decode(ids) for ids in output] label = dec[0] return label get_emotion("i feel as if i havent blogged in ages are at least truly blogged i am doing an update cute") # Output: 'joy' get_emotion("i have a feeling i kinda lost my best friend") # Output: 'sadness' ``` > Created by [Manuel Romero/@mrm8488](https://twitter.com/mrm8488) | [LinkedIn](https://www.linkedin.com/in/manuel-romero-cs/) > Made with <span style="color: #e25555;">&hearts;</span> in Spain
{"datasets": ["emotion"], "language": "en", "widget": [{"text": "I wish you were here but it is impossible"}]}
task
[ "TEXT_CLASSIFICATION", "QUESTION_ANSWERING", "SUMMARIZATION" ]
45,519
KYUNGHYUN9/ko-sroberta-itos-training-example_v0.02
KYUNGHYUN9
sentence-similarity
[ "sentence-transformers", "safetensors", "roberta", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:574417", "loss:MultipleNegativesRankingLoss", "loss:CosineSimilarityLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:klue/roberta-base", "base_model:finetune:klue/roberta-base", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-06-27T08:34:33Z
2024-06-27T08:35:29+00:00
7
0
--- base_model: klue/roberta-base datasets: [] language: [] library_name: sentence-transformers metrics: - pearson_cosine - spearman_cosine - pearson_manhattan - spearman_manhattan - pearson_euclidean - spearman_euclidean - pearson_dot - spearman_dot - pearson_max - spearman_max pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:574417 - loss:MultipleNegativesRankingLoss - loss:CosineSimilarityLoss widget: - source_sentence: 이집트 대통령 선거에서 가까운 여론조사 sentences: - 알 카에다 충돌, 폭발로 예멘에서 35명의 군인이 사망 - '보도자료 : 예멘 대통령 선거' - 반 파이프에 스케이트보드를 신은 남자 - source_sentence: 한 소년이 팽창식 슬라이드를 내려간다. sentences: - 빨간 옷을 입은 소년이 부풀릴 수 있는 놀이기구를 타고 내려간다. - 새들이 물속에서 헤엄치고 있다. - 여자는 녹색 후추를 썰었다. - source_sentence: 비상 차량들이 현장에 있다. sentences: - 구급차와 소방차가 현장에서 도움을 주려고 한다. - 유물을 보는 사람들이 있다. - 구급차와 소방차에 불이 붙었다. - source_sentence: 그들은 서로 가까이 있지 않다. sentences: - 그 품질은 레이저에 가깝다. - 그들은 샤토와 매우 가깝다. - 그들은 샤토와 서로 어느 정도 떨어져 있다. - source_sentence: 딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다. sentences: - 남자가 자고 있다. - 사람들이 말하고 있다. - 한 남자가 트럭을 보고 있다. model-index: - name: SentenceTransformer based on klue/roberta-base results: - task: type: semantic-similarity name: Semantic Similarity dataset: name: sts dev type: sts-dev metrics: - type: pearson_cosine value: 0.8650328554572645 name: Pearson Cosine - type: spearman_cosine value: 0.8667952293243948 name: Spearman Cosine - type: pearson_manhattan value: 0.8558437246473041 name: Pearson Manhattan - type: spearman_manhattan value: 0.860673936504169 name: Spearman Manhattan - type: pearson_euclidean value: 0.8562228685196989 name: Pearson Euclidean - type: spearman_euclidean value: 0.8612884653822855 name: Spearman Euclidean - type: pearson_dot value: 0.830160661850442 name: Pearson Dot - type: spearman_dot value: 0.8275972106510755 name: Spearman Dot - type: pearson_max value: 0.8650328554572645 name: Pearson Max - type: spearman_max value: 0.8667952293243948 name: Spearman Max --- # SentenceTransformer based on klue/roberta-base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [klue/roberta-base](https://huggingface.co/klue/roberta-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [klue/roberta-base](https://huggingface.co/klue/roberta-base) <!-- at revision 02f94ba5e3fcb7e2a58a390b8639b0fac974a8da --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("sentence_transformers_model_id") # Run inference sentences = [ '딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.', '한 남자가 트럭을 보고 있다.', '남자가 자고 있다.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Semantic Similarity * Dataset: `sts-dev` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:-------------------|:-----------| | pearson_cosine | 0.865 | | spearman_cosine | 0.8668 | | pearson_manhattan | 0.8558 | | spearman_manhattan | 0.8607 | | pearson_euclidean | 0.8562 | | spearman_euclidean | 0.8613 | | pearson_dot | 0.8302 | | spearman_dot | 0.8276 | | pearson_max | 0.865 | | **spearman_max** | **0.8668** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Datasets #### Unnamed Dataset * Size: 568,640 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>sentence_2</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | sentence_2 | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | string | | details | <ul><li>min: 4 tokens</li><li>mean: 19.21 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 18.29 tokens</li><li>max: 93 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 14.61 tokens</li><li>max: 57 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | sentence_2 | |:----------------------------------------|:-------------------------------------------------|:--------------------------------------| | <code>발생 부하가 함께 5% 적습니다.</code> | <code>발생 부하의 5% 감소와 함께 11.</code> | <code>발생 부하가 5% 증가합니다.</code> | | <code>어떤 행사를 위해 음식과 옷을 배급하는 여성들.</code> | <code>여성들은 음식과 옷을 나눠줌으로써 난민들을 돕고 있다.</code> | <code>여자들이 사막에서 오토바이를 운전하고 있다.</code> | | <code>어린 아이들은 그 지식을 얻을 필요가 있다.</code> | <code>응, 우리 젊은이들 중 많은 사람들이 그걸 배워야 할 것 같아.</code> | <code>젊은 사람들은 배울 필요가 없다.</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` #### Unnamed Dataset * Size: 5,777 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | label | |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 3 tokens</li><li>mean: 17.61 tokens</li><li>max: 65 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 17.66 tokens</li><li>max: 76 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.54</li><li>max: 1.0</li></ul> | * Samples: | sentence_0 | sentence_1 | label | |:-----------------------------------------|:------------------------------------------|:--------------------------------| | <code>몰디브 대통령이 경찰의 반란 이후 사임하고, 시위</code> | <code>몰디브 대통령이 몇 주 동안의 시위 끝에 그만두다.</code> | <code>0.6799999999999999</code> | | <code>사자가 밀폐된 지역을 걷고 있다.</code> | <code>사자가 주위를 돌아다니고 있다.</code> | <code>0.52</code> | | <code>한 소년이 노래를 부르고 피아노를 치고 있다.</code> | <code>한 소년이 피아노를 치고 있다.</code> | <code>0.6</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `num_train_epochs`: 5 - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | sts-dev_spearman_max | |:------:|:----:|:-------------:|:--------------------:| | 0.3458 | 500 | 0.4123 | - | | 0.6916 | 1000 | 0.3009 | 0.8365 | | 1.0007 | 1447 | - | 0.8610 | | 1.0367 | 1500 | 0.259 | - | | 1.3824 | 2000 | 0.1301 | 0.8580 | | 1.7282 | 2500 | 0.0898 | - | | 2.0007 | 2894 | - | 0.8668 | ### Framework Versions - Python: 3.11.9 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.2.2+cu121 - Accelerate: 0.31.0 - Datasets: 2.20.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on klue/roberta-base This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [klue/roberta-base](https://huggingface.co/klue/roberta-base). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [klue/roberta-base](https://huggingface.co/klue/roberta-base) <!-- at revision 02f94ba5e3fcb7e2a58a390b8639b0fac974a8da --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("sentence_transformers_model_id") # Run inference sentences = [ '딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.', '한 남자가 트럭을 보고 있다.', '남자가 자고 있다.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Semantic Similarity * Dataset: `sts-dev` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:-------------------|:-----------| | pearson_cosine | 0.865 | | spearman_cosine | 0.8668 | | pearson_manhattan | 0.8558 | | spearman_manhattan | 0.8607 | | pearson_euclidean | 0.8562 | | spearman_euclidean | 0.8613 | | pearson_dot | 0.8302 | | spearman_dot | 0.8276 | | pearson_max | 0.865 | | **spearman_max** | **0.8668** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Datasets #### Unnamed Dataset * Size: 568,640 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>sentence_2</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | sentence_2 | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | string | | details | <ul><li>min: 4 tokens</li><li>mean: 19.21 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 18.29 tokens</li><li>max: 93 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 14.61 tokens</li><li>max: 57 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | sentence_2 | |:----------------------------------------|:-------------------------------------------------|:--------------------------------------| | <code>발생 부하가 함께 5% 적습니다.</code> | <code>발생 부하의 5% 감소와 함께 11.</code> | <code>발생 부하가 5% 증가합니다.</code> | | <code>어떤 행사를 위해 음식과 옷을 배급하는 여성들.</code> | <code>여성들은 음식과 옷을 나눠줌으로써 난민들을 돕고 있다.</code> | <code>여자들이 사막에서 오토바이를 운전하고 있다.</code> | | <code>어린 아이들은 그 지식을 얻을 필요가 있다.</code> | <code>응, 우리 젊은이들 중 많은 사람들이 그걸 배워야 할 것 같아.</code> | <code>젊은 사람들은 배울 필요가 없다.</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` #### Unnamed Dataset * Size: 5,777 training samples * Columns: <code>sentence_0</code>, <code>sentence_1</code>, and <code>label</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | label | |:--------|:----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 3 tokens</li><li>mean: 17.61 tokens</li><li>max: 65 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 17.66 tokens</li><li>max: 76 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.54</li><li>max: 1.0</li></ul> | * Samples: | sentence_0 | sentence_1 | label | |:-----------------------------------------|:------------------------------------------|:--------------------------------| | <code>몰디브 대통령이 경찰의 반란 이후 사임하고, 시위</code> | <code>몰디브 대통령이 몇 주 동안의 시위 끝에 그만두다.</code> | <code>0.6799999999999999</code> | | <code>사자가 밀폐된 지역을 걷고 있다.</code> | <code>사자가 주위를 돌아다니고 있다.</code> | <code>0.52</code> | | <code>한 소년이 노래를 부르고 피아노를 치고 있다.</code> | <code>한 소년이 피아노를 치고 있다.</code> | <code>0.6</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `num_train_epochs`: 5 - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | sts-dev_spearman_max | |:------:|:----:|:-------------:|:--------------------:| | 0.3458 | 500 | 0.4123 | - | | 0.6916 | 1000 | 0.3009 | 0.8365 | | 1.0007 | 1447 | - | 0.8610 | | 1.0367 | 1500 | 0.259 | - | | 1.3824 | 2000 | 0.1301 | 0.8580 | | 1.7282 | 2500 | 0.0898 | - | | 2.0007 | 2894 | - | 0.8668 | ### Framework Versions - Python: 3.11.9 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.2.2+cu121 - Accelerate: 0.31.0 - Datasets: 2.20.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "klue/roberta-base", "datasets": [], "language": [], "library_name": "sentence-transformers", "metrics": ["pearson_cosine", "spearman_cosine", "pearson_manhattan", "spearman_manhattan", "pearson_euclidean", "spearman_euclidean", "pearson_dot", "spearman_dot", "pearson_max", "spearman_max"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:574417", "loss:MultipleNegativesRankingLoss", "loss:CosineSimilarityLoss"], "widget": [{"source_sentence": "이집트 대통령 선거에서 가까운 여론조사", "sentences": ["알 카에다 충돌, 폭발로 예멘에서 35명의 군인이 사망", "보도자료 : 예멘 대통령 선거", "반 파이프에 스케이트보드를 신은 남자"]}, {"source_sentence": "한 소년이 팽창식 슬라이드를 내려간다.", "sentences": ["빨간 옷을 입은 소년이 부풀릴 수 있는 놀이기구를 타고 내려간다.", "새들이 물속에서 헤엄치고 있다.", "여자는 녹색 후추를 썰었다."]}, {"source_sentence": "비상 차량들이 현장에 있다.", "sentences": ["구급차와 소방차가 현장에서 도움을 주려고 한다.", "유물을 보는 사람들이 있다.", "구급차와 소방차에 불이 붙었다."]}, {"source_sentence": "그들은 서로 가까이 있지 않다.", "sentences": ["그 품질은 레이저에 가깝다.", "그들은 샤토와 매우 가깝다.", "그들은 샤토와 서로 어느 정도 떨어져 있다."]}, {"source_sentence": "딱딱한 모자를 쓴 남자가 건물 프레임 앞에 주차된 빨간 트럭의 침대를 쳐다본다.", "sentences": ["남자가 자고 있다.", "사람들이 말하고 있다.", "한 남자가 트럭을 보고 있다."]}], "model-index": [{"name": "SentenceTransformer based on klue/roberta-base", "results": [{"task": {"type": "semantic-similarity", "name": "Semantic Similarity"}, "dataset": {"name": "sts dev", "type": "sts-dev"}, "metrics": [{"type": "pearson_cosine", "value": 0.8650328554572645, "name": "Pearson Cosine"}, {"type": "spearman_cosine", "value": 0.8667952293243948, "name": "Spearman Cosine"}, {"type": "pearson_manhattan", "value": 0.8558437246473041, "name": "Pearson Manhattan"}, {"type": "spearman_manhattan", "value": 0.860673936504169, "name": "Spearman Manhattan"}, {"type": "pearson_euclidean", "value": 0.8562228685196989, "name": "Pearson Euclidean"}, {"type": "spearman_euclidean", "value": 0.8612884653822855, "name": "Spearman Euclidean"}, {"type": "pearson_dot", "value": 0.830160661850442, "name": "Pearson Dot"}, {"type": "spearman_dot", "value": 0.8275972106510755, "name": "Spearman Dot"}, {"type": "pearson_max", "value": 0.8650328554572645, "name": "Pearson Max"}, {"type": "spearman_max", "value": 0.8667952293243948, "name": "Spearman Max"}]}]}]}
task
[ "TEXT_CLASSIFICATION", "SEMANTIC_SIMILARITY" ]
45,520
anilguven/albert_tr_turkish_spam_email
anilguven
text-classification
[ "transformers", "safetensors", "albert", "text-classification", "turkish", "spam", "ham", "email", "bert", "tr", "dataset:anilguven/turkish_spam_email", "license:unknown", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-01-25T19:34:03Z
2024-01-26T13:34:19+00:00
16
1
--- datasets: - anilguven/turkish_spam_email language: - tr license: unknown metrics: - accuracy - f1 - recall - precision tags: - turkish - spam - ham - email - albert - bert --- ### Model Info This model was developed/finetuned for spam detection task for Turkish Language. This model was finetuned via spam/ham email dataset. - LABEL_0: ham/normal mail - LABEL_1: spam mail ### Model Sources <!-- Provide the basic links for the model. --> - **Dataset:** https://huggingface.co/datasets/anilguven/turkish_spam_email - **Paper:** https://dergipark.org.tr/tr/pub/ejosat/issue/75736/1234079 - **Demo-Coding [optional]:** https://github.com/anil1055/Turkish_spam_email_detection_with_language_models - **Finetuned from model [optional]:** https://huggingface.co/loodos/albert-base-turkish-uncased #### Preprocessing You must apply removing stopwords, stemming, or lemmatization process for Turkish. # Model Load safetensors <!-- Provide a quick summary of what the model is/does. --> Detailed https://huggingface.co/docs/diffusers/using-diffusers/using_safetensors ### Results - F1-score: %93.55 - Accuracy: %93.10 ## Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** *@article{article_1234079, title={Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi}, journal={Avrupa Bilim ve Teknoloji Dergisi}, pages={1–6}, year={2023}, DOI={10.31590/ejosat.1234079}, author={GÜVEN, Zekeriya Anıl}, keywords={Siber Güvenlik, Spam Tespiti, Dil Modeli, Makine Öğrenmesi, Doğal Dil İşleme, Metin Sınıflandırma, Cyber Security, Spam Detection, Language Model, Machine Learning, Natural Language Processing, Text Classification}, number={47}, publisher={Osman SAĞDIÇ} }* **APA:** *GÜVEN, Z. A. (2023). Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi. Avrupa Bilim ve Teknoloji Dergisi, (47), 1-6.*
null
Non_BioNLP
### Model Info This model was developed/finetuned for spam detection task for Turkish Language. This model was finetuned via spam/ham email dataset. - LABEL_0: ham/normal mail - LABEL_1: spam mail ### Model Sources <!-- Provide the basic links for the model. --> - **Dataset:** https://huggingface.co/datasets/anilguven/turkish_spam_email - **Paper:** https://dergipark.org.tr/tr/pub/ejosat/issue/75736/1234079 - **Demo-Coding [optional]:** https://github.com/anil1055/Turkish_spam_email_detection_with_language_models - **Finetuned from model [optional]:** https://huggingface.co/loodos/albert-base-turkish-uncased #### Preprocessing You must apply removing stopwords, stemming, or lemmatization process for Turkish. # Model Load safetensors <!-- Provide a quick summary of what the model is/does. --> Detailed https://huggingface.co/docs/diffusers/using-diffusers/using_safetensors ### Results - F1-score: %93.55 - Accuracy: %93.10 ## Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> **BibTeX:** *@article{article_1234079, title={Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi}, journal={Avrupa Bilim ve Teknoloji Dergisi}, pages={1–6}, year={2023}, DOI={10.31590/ejosat.1234079}, author={GÜVEN, Zekeriya Anıl}, keywords={Siber Güvenlik, Spam Tespiti, Dil Modeli, Makine Öğrenmesi, Doğal Dil İşleme, Metin Sınıflandırma, Cyber Security, Spam Detection, Language Model, Machine Learning, Natural Language Processing, Text Classification}, number={47}, publisher={Osman SAĞDIÇ} }* **APA:** *GÜVEN, Z. A. (2023). Türkçe E-postalarda Spam Tespiti için Makine Öğrenme Yöntemlerinin ve Dil Modellerinin Analizi. Avrupa Bilim ve Teknoloji Dergisi, (47), 1-6.*
{"datasets": ["anilguven/turkish_spam_email"], "language": ["tr"], "license": "unknown", "metrics": ["accuracy", "f1", "recall", "precision"], "tags": ["turkish", "spam", "ham", "email", "albert", "bert"]}
task
[ "TEXT_CLASSIFICATION" ]
45,521
gulermuslim/distilbert-base-uncased-finetuned-emotion
gulermuslim
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-10-21T09:38:18Z
2022-10-21T09:48:14+00:00
10
0
--- datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion args: default metrics: - type: accuracy value: 0.9245 name: Accuracy - type: f1 value: 0.9246934497325665 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2157 - Accuracy: 0.9245 - F1: 0.9247 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8781 | 1.0 | 250 | 0.3374 | 0.8995 | 0.8951 | | 0.2546 | 2.0 | 500 | 0.2157 | 0.9245 | 0.9247 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2157 - Accuracy: 0.9245 - F1: 0.9247 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8781 | 1.0 | 250 | 0.3374 | 0.8995 | 0.8951 | | 0.2546 | 2.0 | 500 | 0.2157 | 0.9245 | 0.9247 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
{"datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "default"}, "metrics": [{"type": "accuracy", "value": 0.9245, "name": "Accuracy"}, {"type": "f1", "value": 0.9246934497325665, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,522
hhenryz/LOVA3-llava-v1.5-7b
hhenryz
image-text-to-text
[ "transformers", "pytorch", "llava", "text-generation", "image-text-to-text", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-24T08:09:25Z
2025-02-24T08:20:43+00:00
42
1
--- library_name: transformers license: apache-2.0 pipeline_tag: image-text-to-text --- ## 💡Key Contributions: * **LOVA3** - To the best of our knowledge, LOVA3 is the first effort to imbue the asking and assessment abilities in training a robust and intelligent MLLM, inspired from human learning mechanism. * **EvalQABench** - We build a new benchmark EvalQABench for the VQA correction evaluation as the first effort to advance the development of future research. * **Performance Improvement** - Training with our proposed LOVA3 framework, we observe consistent improvement on 10 representative benchmarks. ## Model weight Pretrained weight: [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b) Download it by using following command: ``` git clone https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b ``` ## Training Data * Here we provide the training/Evaluation/Testing sets of EvalQABench under the folder `EvalQABench`. * Training data: [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M). ### Image Datasets Please download the images from constituting datasets: - COCO: [train2014](http://images.cocodataset.org/zips/train2014.zip) - GQA: [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) - OCR-VQA: [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), **we save all files as `.jpg`** - AOKVQA: [download script](https://github.com/allenai/aokvqa?tab=readme-ov-file#downloading-the-dataset) - TextVQA: [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) - VisualGenome: [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) - LLaVA-Instruct: [huggingface](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K) ## 💃 Evaluation 1. Download [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b) under the folder `checkpoints`. 2. Download the CLIP vision encoder [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints` 3. Run the evaluation scripts under the folder `scripts/v1_5/eval`. There are 12 multimodal datasets and benchmarks awaiting evaluation. Take VizWiz as an example, the running command is as follows: ``` modelname=LOVA3-llava-v1.5-7b python -m llava.eval.model_vqa_loader \ --model-path checkpoints/$modelname \ --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \ --image-folder /yourpath/vizwiz/test/ \ --answers-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \ --temperature 0 \ --conv-mode vicuna_v1 python scripts/convert_vizwiz_for_submission.py \ --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \ --result-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \ --result-upload-file ./playground/data/eval/vizwiz/answers_upload/$modelname.json ``` ## Training 1. Download the pretrained MLP adapter weights [llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5](https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5) from and put it under the folder `checkpoints`. 2. Download the model weight [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints`. 3. Download the model weight [vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) under the folder `checkpoints`. 4. Download the training data [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M) under the folder `data`. 5. Run the training script. ``` bash scripts/v1_5/finetune.sh ``` ## 🙏 Acknowledgement - [LLaVA](https://github.com/haotian-liu/LLaVA): The codebase we built upon. - [LAVIS](https://github.com/salesforce/LAVIS): We download some datasets from its scripts. ## 🎓 Citation If you find LOVA3 useful, please cite using this BibTeX: ```bibtex @inproceedings{ zhao2024lova, title={{LOVA}3: Learning to Visual Question Answering, Asking and Assessment}, author={Hengyuan Zhao and Pan Zhou and Difei Gao and Zechen Bai and Mike Zheng Shou}, booktitle={The Thirty-eighth Annual Conference on Neural Information Processing Systems}, year={2024}, url={https://openreview.net/forum?id=vIOKLMl6wu} } ``` ```
null
Non_BioNLP
## 💡Key Contributions: * **LOVA3** - To the best of our knowledge, LOVA3 is the first effort to imbue the asking and assessment abilities in training a robust and intelligent MLLM, inspired from human learning mechanism. * **EvalQABench** - We build a new benchmark EvalQABench for the VQA correction evaluation as the first effort to advance the development of future research. * **Performance Improvement** - Training with our proposed LOVA3 framework, we observe consistent improvement on 10 representative benchmarks. ## Model weight Pretrained weight: [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b) Download it by using following command: ``` git clone https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b ``` ## Training Data * Here we provide the training/Evaluation/Testing sets of EvalQABench under the folder `EvalQABench`. * Training data: [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M). ### Image Datasets Please download the images from constituting datasets: - COCO: [train2014](http://images.cocodataset.org/zips/train2014.zip) - GQA: [images](https://downloads.cs.stanford.edu/nlp/data/gqa/images.zip) - OCR-VQA: [download script](https://drive.google.com/drive/folders/1_GYPY5UkUy7HIcR0zq3ZCFgeZN7BAfm_?usp=sharing), **we save all files as `.jpg`** - AOKVQA: [download script](https://github.com/allenai/aokvqa?tab=readme-ov-file#downloading-the-dataset) - TextVQA: [train_val_images](https://dl.fbaipublicfiles.com/textvqa/images/train_val_images.zip) - VisualGenome: [part1](https://cs.stanford.edu/people/rak248/VG_100K_2/images.zip), [part2](https://cs.stanford.edu/people/rak248/VG_100K_2/images2.zip) - LLaVA-Instruct: [huggingface](https://huggingface.co/datasets/liuhaotian/LLaVA-Instruct-150K) ## 💃 Evaluation 1. Download [LOVA3-llava-v1.5-7b](https://huggingface.co/hhenryz/LOVA3-llava-v1.5-7b) under the folder `checkpoints`. 2. Download the CLIP vision encoder [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints` 3. Run the evaluation scripts under the folder `scripts/v1_5/eval`. There are 12 multimodal datasets and benchmarks awaiting evaluation. Take VizWiz as an example, the running command is as follows: ``` modelname=LOVA3-llava-v1.5-7b python -m llava.eval.model_vqa_loader \ --model-path checkpoints/$modelname \ --question-file ./playground/data/eval/vizwiz/llava_test.jsonl \ --image-folder /yourpath/vizwiz/test/ \ --answers-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \ --temperature 0 \ --conv-mode vicuna_v1 python scripts/convert_vizwiz_for_submission.py \ --annotation-file ./playground/data/eval/vizwiz/llava_test.jsonl \ --result-file ./playground/data/eval/vizwiz/answers/$modelname.jsonl \ --result-upload-file ./playground/data/eval/vizwiz/answers_upload/$modelname.json ``` ## Training 1. Download the pretrained MLP adapter weights [llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5](https://huggingface.co/liuhaotian/llava-v1.5-mlp2x-336px-pretrain-vicuna-7b-v1.5) from and put it under the folder `checkpoints`. 2. Download the model weight [clip-vit-large-patch14-336](https://huggingface.co/openai/clip-vit-large-patch14-336) under the folder `checkpoints`. 3. Download the model weight [vicuna-7b-v1.5](https://huggingface.co/lmsys/vicuna-7b-v1.5) under the folder `checkpoints`. 4. Download the training data [Mixed_VQA_GenQA_EvalQA_1.5M.jsonl](https://huggingface.co/datasets/hhenryz/Mixed_VQA_GenQA_EvalQA_1.5M) under the folder `data`. 5. Run the training script. ``` bash scripts/v1_5/finetune.sh ``` ## 🙏 Acknowledgement - [LLaVA](https://github.com/haotian-liu/LLaVA): The codebase we built upon. - [LAVIS](https://github.com/salesforce/LAVIS): We download some datasets from its scripts. ## 🎓 Citation If you find LOVA3 useful, please cite using this BibTeX: ```bibtex @inproceedings{ zhao2024lova, title={{LOVA}3: Learning to Visual Question Answering, Asking and Assessment}, author={Hengyuan Zhao and Pan Zhou and Difei Gao and Zechen Bai and Mike Zheng Shou}, booktitle={The Thirty-eighth Annual Conference on Neural Information Processing Systems}, year={2024}, url={https://openreview.net/forum?id=vIOKLMl6wu} } ``` ```
{"library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "image-text-to-text"}
task
[ "QUESTION_ANSWERING" ]
45,523
MathiasBrussow/distilbert-base-uncased-finetuned-clinc
MathiasBrussow
null
[ "pytorch", "tensorboard", "distilbert", "generated_from_trainer", "dataset:clinc_oos", "license:apache-2.0", "model-index", "region:us" ]
2024-10-03T10:26:19Z
2024-10-03T15:30:49+00:00
13
0
--- datasets: - clinc_oos license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-clinc results: - task: type: text-classification name: Text Classification dataset: name: clinc_oos type: clinc_oos args: plus metrics: - type: accuracy value: 0.9180645161290323 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7721 - Accuracy: 0.9181 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 318 | 3.2884 | 0.7419 | | 3.7865 | 2.0 | 636 | 1.8751 | 0.8368 | | 3.7865 | 3.0 | 954 | 1.1569 | 0.8961 | | 1.6925 | 4.0 | 1272 | 0.8573 | 0.9132 | | 0.9056 | 5.0 | 1590 | 0.7721 | 0.9181 | ### Framework versions - Transformers 4.16.2 - Pytorch 2.4.1+cu121 - Datasets 1.16.1 - Tokenizers 0.19.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-clinc This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the clinc_oos dataset. It achieves the following results on the evaluation set: - Loss: 0.7721 - Accuracy: 0.9181 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 48 - eval_batch_size: 48 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | No log | 1.0 | 318 | 3.2884 | 0.7419 | | 3.7865 | 2.0 | 636 | 1.8751 | 0.8368 | | 3.7865 | 3.0 | 954 | 1.1569 | 0.8961 | | 1.6925 | 4.0 | 1272 | 0.8573 | 0.9132 | | 0.9056 | 5.0 | 1590 | 0.7721 | 0.9181 | ### Framework versions - Transformers 4.16.2 - Pytorch 2.4.1+cu121 - Datasets 1.16.1 - Tokenizers 0.19.1
{"datasets": ["clinc_oos"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-clinc", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "clinc_oos", "type": "clinc_oos", "args": "plus"}, "metrics": [{"type": "accuracy", "value": 0.9180645161290323, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,524
gokuls/hBERTv1_new_pretrain_48_KD_cola
gokuls
text-classification
[ "transformers", "pytorch", "tensorboard", "hybridbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-14T17:00:54Z
2023-06-14T17:17:29+00:00
8
0
--- datasets: - glue language: - en metrics: - matthews_correlation - accuracy tags: - generated_from_trainer model-index: - name: hBERTv1_new_pretrain_48_KD_cola results: - task: type: text-classification name: Text Classification dataset: name: GLUE COLA type: glue config: cola split: validation args: cola metrics: - type: matthews_correlation value: 0.0 name: Matthews Correlation - type: accuracy value: 0.6912751793861389 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_new_pretrain_48_KD_cola This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_48_KD](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_48_KD) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.6163 - Matthews Correlation: 0.0 - Accuracy: 0.6913 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------------------:|:--------:| | 0.6281 | 1.0 | 67 | 0.6241 | 0.0 | 0.6913 | | 0.6172 | 2.0 | 134 | 0.6471 | 0.0 | 0.6913 | | 0.6123 | 3.0 | 201 | 0.6185 | 0.0 | 0.6913 | | 0.6138 | 4.0 | 268 | 0.6180 | 0.0 | 0.6913 | | 0.6029 | 5.0 | 335 | 0.6170 | 0.0021 | 0.6846 | | 0.5795 | 6.0 | 402 | 0.6163 | 0.0 | 0.6913 | | 0.556 | 7.0 | 469 | 0.6529 | 0.0735 | 0.6328 | | 0.5273 | 8.0 | 536 | 0.6502 | 0.1135 | 0.6510 | | 0.5108 | 9.0 | 603 | 0.7095 | 0.1134 | 0.6318 | | 0.4911 | 10.0 | 670 | 0.6667 | 0.0454 | 0.6644 | | 0.4768 | 11.0 | 737 | 0.6869 | 0.0658 | 0.6232 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # hBERTv1_new_pretrain_48_KD_cola This model is a fine-tuned version of [gokuls/bert_12_layer_model_v1_complete_training_new_48_KD](https://huggingface.co/gokuls/bert_12_layer_model_v1_complete_training_new_48_KD) on the GLUE COLA dataset. It achieves the following results on the evaluation set: - Loss: 0.6163 - Matthews Correlation: 0.0 - Accuracy: 0.6913 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 4e-05 - train_batch_size: 128 - eval_batch_size: 128 - seed: 10 - distributed_type: multi-GPU - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------------------:|:--------:| | 0.6281 | 1.0 | 67 | 0.6241 | 0.0 | 0.6913 | | 0.6172 | 2.0 | 134 | 0.6471 | 0.0 | 0.6913 | | 0.6123 | 3.0 | 201 | 0.6185 | 0.0 | 0.6913 | | 0.6138 | 4.0 | 268 | 0.6180 | 0.0 | 0.6913 | | 0.6029 | 5.0 | 335 | 0.6170 | 0.0021 | 0.6846 | | 0.5795 | 6.0 | 402 | 0.6163 | 0.0 | 0.6913 | | 0.556 | 7.0 | 469 | 0.6529 | 0.0735 | 0.6328 | | 0.5273 | 8.0 | 536 | 0.6502 | 0.1135 | 0.6510 | | 0.5108 | 9.0 | 603 | 0.7095 | 0.1134 | 0.6318 | | 0.4911 | 10.0 | 670 | 0.6667 | 0.0454 | 0.6644 | | 0.4768 | 11.0 | 737 | 0.6869 | 0.0658 | 0.6232 | ### Framework versions - Transformers 4.30.2 - Pytorch 1.14.0a0+410ce96 - Datasets 2.12.0 - Tokenizers 0.13.3
{"datasets": ["glue"], "language": ["en"], "metrics": ["matthews_correlation", "accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "hBERTv1_new_pretrain_48_KD_cola", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE COLA", "type": "glue", "config": "cola", "split": "validation", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.0, "name": "Matthews Correlation"}, {"type": "accuracy", "value": 0.6912751793861389, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,525
clboetticher-school/marian-finetuned-kde4-en-to-fr
clboetticher-school
translation
[ "transformers", "pytorch", "marian", "text2text-generation", "translation", "generated_from_trainer", "dataset:kde4", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-02-14T18:53:38Z
2023-02-14T21:17:54+00:00
10
0
--- datasets: - kde4 license: apache-2.0 tags: - translation - generated_from_trainer model-index: - name: marian-finetuned-kde4-en-to-fr results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1 - mixed_precision_training: Native AMP ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
{"datasets": ["kde4"], "license": "apache-2.0", "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "marian-finetuned-kde4-en-to-fr", "results": []}]}
task
[ "TRANSLATION" ]
45,526
MS-Go/autotrain-hjuihu-1974565969
MS-Go
summarization
[ "transformers", "pytorch", "autotrain", "summarization", "unk", "dataset:MS-Go/autotrain-data-hjuihu", "co2_eq_emissions", "endpoints_compatible", "region:us" ]
2022-11-03T12:26:38Z
2022-11-03T12:58:30+00:00
11
0
--- datasets: - MS-Go/autotrain-data-hjuihu language: - unk tags: - autotrain - summarization widget: - text: I love AutoTrain 🤗 co2_eq_emissions: emissions: 49.671043265609676 --- # Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1974565969 - CO2 Emissions (in grams): 49.6710 ## Validation Metrics - Loss: 2.889 - Rouge1: 36.489 - Rouge2: 7.128 - RougeL: 18.766 - RougeLsum: 33.217 - Gen Len: 141.972 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/MS-Go/autotrain-hjuihu-1974565969 ```
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Summarization - Model ID: 1974565969 - CO2 Emissions (in grams): 49.6710 ## Validation Metrics - Loss: 2.889 - Rouge1: 36.489 - Rouge2: 7.128 - RougeL: 18.766 - RougeLsum: 33.217 - Gen Len: 141.972 ## Usage You can use cURL to access this model: ``` $ curl -X POST -H "Authorization: Bearer YOUR_HUGGINGFACE_API_KEY" -H "Content-Type: application/json" -d '{"inputs": "I love AutoTrain"}' https://api-inference.huggingface.co/MS-Go/autotrain-hjuihu-1974565969 ```
{"datasets": ["MS-Go/autotrain-data-hjuihu"], "language": ["unk"], "tags": ["autotrain", "summarization"], "widget": [{"text": "I love AutoTrain 🤗"}], "co2_eq_emissions": {"emissions": 49.671043265609676}}
task
[ "SUMMARIZATION" ]
45,527
thrunlab/t5-large_cola_sp0_ar0
thrunlab
text-classification
[ "transformers", "pytorch", "t5", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:google-t5/t5-large", "base_model:finetune:google-t5/t5-large", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-10-31T17:48:38Z
2023-10-31T20:41:40+00:00
31
0
--- base_model: t5-large datasets: - glue license: apache-2.0 metrics: - accuracy tags: - generated_from_trainer model-index: - name: t5-large_cola_sp0_ar0 results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - type: accuracy value: 0.880859375 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-large_cola_sp0_ar0 This model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4179 - Accuracy: 0.8809 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 20 - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5885 | 0.05 | 25 | 0.6751 | 0.6913 | | 0.5475 | 0.11 | 50 | 0.5338 | 0.6913 | | 0.5122 | 0.16 | 75 | 0.4847 | 0.7919 | | 0.4486 | 0.21 | 100 | 0.5089 | 0.7996 | | 0.4087 | 0.27 | 125 | 0.5139 | 0.8063 | | 0.4022 | 0.32 | 150 | 0.5188 | 0.8035 | | 0.4245 | 0.37 | 175 | 0.5196 | 0.7987 | | 0.4298 | 0.42 | 200 | 0.6226 | 0.8006 | | 0.4326 | 0.48 | 225 | 0.6169 | 0.8015 | | 0.4321 | 0.53 | 250 | 0.6173 | 0.7987 | | 0.4288 | 0.58 | 275 | 0.4786 | 0.8102 | | 0.3914 | 0.64 | 300 | 0.5147 | 0.8054 | | 0.3519 | 0.69 | 325 | 0.5691 | 0.8150 | | 0.4036 | 0.74 | 350 | 0.4560 | 0.8236 | | 0.3706 | 0.8 | 375 | 0.4640 | 0.8245 | | 0.3584 | 0.85 | 400 | 0.4605 | 0.8207 | | 0.3539 | 0.9 | 425 | 0.4932 | 0.8217 | | 0.3982 | 0.96 | 450 | 0.5397 | 0.8073 | | 0.3352 | 1.01 | 475 | 0.5490 | 0.8150 | | 0.2631 | 1.06 | 500 | 0.9244 | 0.8121 | | 0.2992 | 1.11 | 525 | 0.5666 | 0.8169 | | 0.2308 | 1.17 | 550 | 0.7285 | 0.8178 | | 0.2893 | 1.22 | 575 | 0.6907 | 0.8198 | | 0.2809 | 1.27 | 600 | 0.4998 | 0.8140 | | 0.2469 | 1.33 | 625 | 0.7260 | 0.8236 | | 0.331 | 1.38 | 650 | 0.5812 | 0.8293 | | 0.286 | 1.43 | 675 | 0.5102 | 0.8360 | | 0.347 | 1.49 | 700 | 0.5696 | 0.8255 | | 0.2971 | 1.54 | 725 | 0.4114 | 0.8380 | | 0.3048 | 1.59 | 750 | 0.5466 | 0.8169 | | 0.3168 | 1.65 | 775 | 0.4787 | 0.8274 | | 0.2247 | 1.7 | 800 | 0.7926 | 0.8063 | | 0.2666 | 1.75 | 825 | 0.5763 | 0.8274 | | 0.2856 | 1.8 | 850 | 0.5131 | 0.8303 | | 0.2967 | 1.86 | 875 | 0.4970 | 0.8293 | | 0.296 | 1.91 | 900 | 0.5532 | 0.8293 | | 0.2828 | 1.96 | 925 | 0.4777 | 0.8274 | | 0.2708 | 2.02 | 950 | 0.5433 | 0.8351 | | 0.1406 | 2.07 | 975 | 0.6351 | 0.8351 | | 0.2046 | 2.12 | 1000 | 0.6058 | 0.8332 | | 0.2227 | 2.18 | 1025 | 0.5616 | 0.8408 | | 0.1551 | 2.23 | 1050 | 1.0299 | 0.8360 | | 0.1465 | 2.28 | 1075 | 0.7842 | 0.8380 | | 0.2171 | 2.34 | 1100 | 0.6329 | 0.8437 | | 0.1588 | 2.39 | 1125 | 0.7575 | 0.8418 | | 0.4245 | 2.44 | 1150 | 0.7603 | 0.8351 | | 0.2124 | 2.49 | 1175 | 0.5838 | 0.8447 | | 0.2333 | 2.55 | 1200 | 0.4896 | 0.8418 | | 0.1943 | 2.6 | 1225 | 0.6343 | 0.8332 | | 0.1961 | 2.65 | 1250 | 0.6343 | 0.8284 | | 0.1981 | 2.71 | 1275 | 0.6145 | 0.8332 | | 0.2151 | 2.76 | 1300 | 0.6335 | 0.8360 | | 0.1634 | 2.81 | 1325 | 1.1357 | 0.8399 | | 0.1526 | 2.87 | 1350 | 1.0044 | 0.8293 | | 0.2096 | 2.92 | 1375 | 0.7761 | 0.8360 | | 0.2135 | 2.97 | 1400 | 0.9338 | 0.8351 | | 0.155 | 3.03 | 1425 | 3.3297 | 0.8360 | | 0.3667 | 3.08 | 1450 | 4.0564 | 0.8370 | | 0.5925 | 3.13 | 1475 | 6.7411 | 0.8408 | | 0.5866 | 3.18 | 1500 | 7.1940 | 0.8399 | | 0.3812 | 3.24 | 1525 | 7.0097 | 0.8351 | | 0.1041 | 3.29 | 1550 | 7.0157 | 0.8351 | | 0.3451 | 3.34 | 1575 | 6.2653 | 0.8418 | | 0.1121 | 3.4 | 1600 | 4.2608 | 0.8485 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu117 - Datasets 2.14.5 - Tokenizers 0.11.6
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # t5-large_cola_sp0_ar0 This model is a fine-tuned version of [t5-large](https://huggingface.co/t5-large) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.4179 - Accuracy: 0.8809 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 16 - eval_batch_size: 32 - seed: 1 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 20 - num_epochs: 6 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.5885 | 0.05 | 25 | 0.6751 | 0.6913 | | 0.5475 | 0.11 | 50 | 0.5338 | 0.6913 | | 0.5122 | 0.16 | 75 | 0.4847 | 0.7919 | | 0.4486 | 0.21 | 100 | 0.5089 | 0.7996 | | 0.4087 | 0.27 | 125 | 0.5139 | 0.8063 | | 0.4022 | 0.32 | 150 | 0.5188 | 0.8035 | | 0.4245 | 0.37 | 175 | 0.5196 | 0.7987 | | 0.4298 | 0.42 | 200 | 0.6226 | 0.8006 | | 0.4326 | 0.48 | 225 | 0.6169 | 0.8015 | | 0.4321 | 0.53 | 250 | 0.6173 | 0.7987 | | 0.4288 | 0.58 | 275 | 0.4786 | 0.8102 | | 0.3914 | 0.64 | 300 | 0.5147 | 0.8054 | | 0.3519 | 0.69 | 325 | 0.5691 | 0.8150 | | 0.4036 | 0.74 | 350 | 0.4560 | 0.8236 | | 0.3706 | 0.8 | 375 | 0.4640 | 0.8245 | | 0.3584 | 0.85 | 400 | 0.4605 | 0.8207 | | 0.3539 | 0.9 | 425 | 0.4932 | 0.8217 | | 0.3982 | 0.96 | 450 | 0.5397 | 0.8073 | | 0.3352 | 1.01 | 475 | 0.5490 | 0.8150 | | 0.2631 | 1.06 | 500 | 0.9244 | 0.8121 | | 0.2992 | 1.11 | 525 | 0.5666 | 0.8169 | | 0.2308 | 1.17 | 550 | 0.7285 | 0.8178 | | 0.2893 | 1.22 | 575 | 0.6907 | 0.8198 | | 0.2809 | 1.27 | 600 | 0.4998 | 0.8140 | | 0.2469 | 1.33 | 625 | 0.7260 | 0.8236 | | 0.331 | 1.38 | 650 | 0.5812 | 0.8293 | | 0.286 | 1.43 | 675 | 0.5102 | 0.8360 | | 0.347 | 1.49 | 700 | 0.5696 | 0.8255 | | 0.2971 | 1.54 | 725 | 0.4114 | 0.8380 | | 0.3048 | 1.59 | 750 | 0.5466 | 0.8169 | | 0.3168 | 1.65 | 775 | 0.4787 | 0.8274 | | 0.2247 | 1.7 | 800 | 0.7926 | 0.8063 | | 0.2666 | 1.75 | 825 | 0.5763 | 0.8274 | | 0.2856 | 1.8 | 850 | 0.5131 | 0.8303 | | 0.2967 | 1.86 | 875 | 0.4970 | 0.8293 | | 0.296 | 1.91 | 900 | 0.5532 | 0.8293 | | 0.2828 | 1.96 | 925 | 0.4777 | 0.8274 | | 0.2708 | 2.02 | 950 | 0.5433 | 0.8351 | | 0.1406 | 2.07 | 975 | 0.6351 | 0.8351 | | 0.2046 | 2.12 | 1000 | 0.6058 | 0.8332 | | 0.2227 | 2.18 | 1025 | 0.5616 | 0.8408 | | 0.1551 | 2.23 | 1050 | 1.0299 | 0.8360 | | 0.1465 | 2.28 | 1075 | 0.7842 | 0.8380 | | 0.2171 | 2.34 | 1100 | 0.6329 | 0.8437 | | 0.1588 | 2.39 | 1125 | 0.7575 | 0.8418 | | 0.4245 | 2.44 | 1150 | 0.7603 | 0.8351 | | 0.2124 | 2.49 | 1175 | 0.5838 | 0.8447 | | 0.2333 | 2.55 | 1200 | 0.4896 | 0.8418 | | 0.1943 | 2.6 | 1225 | 0.6343 | 0.8332 | | 0.1961 | 2.65 | 1250 | 0.6343 | 0.8284 | | 0.1981 | 2.71 | 1275 | 0.6145 | 0.8332 | | 0.2151 | 2.76 | 1300 | 0.6335 | 0.8360 | | 0.1634 | 2.81 | 1325 | 1.1357 | 0.8399 | | 0.1526 | 2.87 | 1350 | 1.0044 | 0.8293 | | 0.2096 | 2.92 | 1375 | 0.7761 | 0.8360 | | 0.2135 | 2.97 | 1400 | 0.9338 | 0.8351 | | 0.155 | 3.03 | 1425 | 3.3297 | 0.8360 | | 0.3667 | 3.08 | 1450 | 4.0564 | 0.8370 | | 0.5925 | 3.13 | 1475 | 6.7411 | 0.8408 | | 0.5866 | 3.18 | 1500 | 7.1940 | 0.8399 | | 0.3812 | 3.24 | 1525 | 7.0097 | 0.8351 | | 0.1041 | 3.29 | 1550 | 7.0157 | 0.8351 | | 0.3451 | 3.34 | 1575 | 6.2653 | 0.8418 | | 0.1121 | 3.4 | 1600 | 4.2608 | 0.8485 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu117 - Datasets 2.14.5 - Tokenizers 0.11.6
{"base_model": "t5-large", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "t5-large_cola_sp0_ar0", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "config": "cola", "split": "validation", "args": "cola"}, "metrics": [{"type": "accuracy", "value": 0.880859375, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,528
RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf
RichardErkhov
null
[ "gguf", "arxiv:2306.01708", "endpoints_compatible", "region:us", "conversational" ]
2024-10-30T05:41:10Z
2024-10-30T07:09:59+00:00
21
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-3.2-3B-Mix-Skill - GGUF - Model creator: https://huggingface.co/bunnycore/ - Original model: https://huggingface.co/bunnycore/Llama-3.2-3B-Mix-Skill/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Llama-3.2-3B-Mix-Skill.Q2_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q2_K.gguf) | Q2_K | 1.39GB | | [Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf) | Q3_K_S | 1.59GB | | [Llama-3.2-3B-Mix-Skill.Q3_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K.gguf) | Q3_K | 1.73GB | | [Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf) | Q3_K_M | 1.73GB | | [Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf) | Q3_K_L | 1.85GB | | [Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf) | IQ4_XS | 1.91GB | | [Llama-3.2-3B-Mix-Skill.Q4_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_0.gguf) | Q4_0 | 1.99GB | | [Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf) | IQ4_NL | 2.0GB | | [Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf) | Q4_K_S | 2.0GB | | [Llama-3.2-3B-Mix-Skill.Q4_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K.gguf) | Q4_K | 2.09GB | | [Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf) | Q4_K_M | 2.09GB | | [Llama-3.2-3B-Mix-Skill.Q4_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_1.gguf) | Q4_1 | 2.18GB | | [Llama-3.2-3B-Mix-Skill.Q5_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_0.gguf) | Q5_0 | 2.37GB | | [Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf) | Q5_K_S | 2.37GB | | [Llama-3.2-3B-Mix-Skill.Q5_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K.gguf) | Q5_K | 2.41GB | | [Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf) | Q5_K_M | 2.41GB | | [Llama-3.2-3B-Mix-Skill.Q5_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_1.gguf) | Q5_1 | 2.55GB | | [Llama-3.2-3B-Mix-Skill.Q6_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q6_K.gguf) | Q6_K | 2.76GB | | [Llama-3.2-3B-Mix-Skill.Q8_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q8_0.gguf) | Q8_0 | 3.58GB | Original model description: --- library_name: transformers tags: - mergekit - merge base_model: - bunnycore/Llama-3.2-3B-Long-Think - huihui-ai/Llama-3.2-3B-Instruct-abliterated - bunnycore/Llama-3.2-3B-Pure-RP model-index: - name: Llama-3.2-3B-Mix-Skill results: - task: type: text-generation name: Text Generation dataset: name: IFEval (0-Shot) type: HuggingFaceH4/ifeval args: num_few_shot: 0 metrics: - type: inst_level_strict_acc and prompt_level_strict_acc value: 64.04 name: strict accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BBH (3-Shot) type: BBH args: num_few_shot: 3 metrics: - type: acc_norm value: 23.78 name: normalized accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MATH Lvl 5 (4-Shot) type: hendrycks/competition_math args: num_few_shot: 4 metrics: - type: exact_match value: 12.69 name: exact match source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GPQA (0-shot) type: Idavidrein/gpqa args: num_few_shot: 0 metrics: - type: acc_norm value: 1.57 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MuSR (0-shot) type: TAUR-Lab/MuSR args: num_few_shot: 0 metrics: - type: acc_norm value: 2.75 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU-PRO (5-shot) type: TIGER-Lab/MMLU-Pro config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 23.56 name: accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard --- This language model is a merged version of several pre-trained models, designed to excel in roleplay, long-form question answering, and prompt following tasks. It was created using the TIES merge method with huihui-ai/Llama-3.2-3B-Instruct-abliterated as the base model. # Intended Use: This model is suitable for a variety of applications, including: - Creative Writing: Generating stories, poems, scripts, and other forms of creative text. - Question Answering: Providing comprehensive and informative answers to a wide range of questions. - Role-Playing: Engaging in interactive role-playing scenarios with users. - Prompt Following: Completing tasks and generating text based on specific prompts or instructions. # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [huihui-ai/Llama-3.2-3B-Instruct-abliterated](https://huggingface.co/huihui-ai/Llama-3.2-3B-Instruct-abliterated) as a base. ### Models Merged The following models were included in the merge: * [bunnycore/Llama-3.2-3B-Long-Think](https://huggingface.co/bunnycore/Llama-3.2-3B-Long-Think) * [bunnycore/Llama-3.2-3B-Pure-RP](https://huggingface.co/bunnycore/Llama-3.2-3B-Pure-RP) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: bunnycore/Llama-3.2-3B-Long-Think parameters: density: 0.5 weight: 0.5 - model: bunnycore/Llama-3.2-3B-Pure-RP parameters: density: 0.5 weight: 0.5 merge_method: ties base_model: huihui-ai/Llama-3.2-3B-Instruct-abliterated parameters: normalize: false int8_mask: true dtype: float16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_bunnycore__Llama-3.2-3B-Mix-Skill) | Metric |Value| |-------------------|----:| |Avg. |21.40| |IFEval (0-Shot) |64.04| |BBH (3-Shot) |23.78| |MATH Lvl 5 (4-Shot)|12.69| |GPQA (0-shot) | 1.57| |MuSR (0-shot) | 2.75| |MMLU-PRO (5-shot) |23.56|
null
Non_BioNLP
Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-3.2-3B-Mix-Skill - GGUF - Model creator: https://huggingface.co/bunnycore/ - Original model: https://huggingface.co/bunnycore/Llama-3.2-3B-Mix-Skill/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Llama-3.2-3B-Mix-Skill.Q2_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q2_K.gguf) | Q2_K | 1.39GB | | [Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_S.gguf) | Q3_K_S | 1.59GB | | [Llama-3.2-3B-Mix-Skill.Q3_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K.gguf) | Q3_K | 1.73GB | | [Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_M.gguf) | Q3_K_M | 1.73GB | | [Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q3_K_L.gguf) | Q3_K_L | 1.85GB | | [Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_XS.gguf) | IQ4_XS | 1.91GB | | [Llama-3.2-3B-Mix-Skill.Q4_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_0.gguf) | Q4_0 | 1.99GB | | [Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.IQ4_NL.gguf) | IQ4_NL | 2.0GB | | [Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_S.gguf) | Q4_K_S | 2.0GB | | [Llama-3.2-3B-Mix-Skill.Q4_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K.gguf) | Q4_K | 2.09GB | | [Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_K_M.gguf) | Q4_K_M | 2.09GB | | [Llama-3.2-3B-Mix-Skill.Q4_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q4_1.gguf) | Q4_1 | 2.18GB | | [Llama-3.2-3B-Mix-Skill.Q5_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_0.gguf) | Q5_0 | 2.37GB | | [Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_S.gguf) | Q5_K_S | 2.37GB | | [Llama-3.2-3B-Mix-Skill.Q5_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K.gguf) | Q5_K | 2.41GB | | [Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_K_M.gguf) | Q5_K_M | 2.41GB | | [Llama-3.2-3B-Mix-Skill.Q5_1.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q5_1.gguf) | Q5_1 | 2.55GB | | [Llama-3.2-3B-Mix-Skill.Q6_K.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q6_K.gguf) | Q6_K | 2.76GB | | [Llama-3.2-3B-Mix-Skill.Q8_0.gguf](https://huggingface.co/RichardErkhov/bunnycore_-_Llama-3.2-3B-Mix-Skill-gguf/blob/main/Llama-3.2-3B-Mix-Skill.Q8_0.gguf) | Q8_0 | 3.58GB | Original model description: --- library_name: transformers tags: - mergekit - merge base_model: - bunnycore/Llama-3.2-3B-Long-Think - huihui-ai/Llama-3.2-3B-Instruct-abliterated - bunnycore/Llama-3.2-3B-Pure-RP model-index: - name: Llama-3.2-3B-Mix-Skill results: - task: type: text-generation name: Text Generation dataset: name: IFEval (0-Shot) type: HuggingFaceH4/ifeval args: num_few_shot: 0 metrics: - type: inst_level_strict_acc and prompt_level_strict_acc value: 64.04 name: strict accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BBH (3-Shot) type: BBH args: num_few_shot: 3 metrics: - type: acc_norm value: 23.78 name: normalized accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MATH Lvl 5 (4-Shot) type: hendrycks/competition_math args: num_few_shot: 4 metrics: - type: exact_match value: 12.69 name: exact match source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GPQA (0-shot) type: Idavidrein/gpqa args: num_few_shot: 0 metrics: - type: acc_norm value: 1.57 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MuSR (0-shot) type: TAUR-Lab/MuSR args: num_few_shot: 0 metrics: - type: acc_norm value: 2.75 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU-PRO (5-shot) type: TIGER-Lab/MMLU-Pro config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 23.56 name: accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=bunnycore/Llama-3.2-3B-Mix-Skill name: Open LLM Leaderboard --- This language model is a merged version of several pre-trained models, designed to excel in roleplay, long-form question answering, and prompt following tasks. It was created using the TIES merge method with huihui-ai/Llama-3.2-3B-Instruct-abliterated as the base model. # Intended Use: This model is suitable for a variety of applications, including: - Creative Writing: Generating stories, poems, scripts, and other forms of creative text. - Question Answering: Providing comprehensive and informative answers to a wide range of questions. - Role-Playing: Engaging in interactive role-playing scenarios with users. - Prompt Following: Completing tasks and generating text based on specific prompts or instructions. # merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [huihui-ai/Llama-3.2-3B-Instruct-abliterated](https://huggingface.co/huihui-ai/Llama-3.2-3B-Instruct-abliterated) as a base. ### Models Merged The following models were included in the merge: * [bunnycore/Llama-3.2-3B-Long-Think](https://huggingface.co/bunnycore/Llama-3.2-3B-Long-Think) * [bunnycore/Llama-3.2-3B-Pure-RP](https://huggingface.co/bunnycore/Llama-3.2-3B-Pure-RP) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: bunnycore/Llama-3.2-3B-Long-Think parameters: density: 0.5 weight: 0.5 - model: bunnycore/Llama-3.2-3B-Pure-RP parameters: density: 0.5 weight: 0.5 merge_method: ties base_model: huihui-ai/Llama-3.2-3B-Instruct-abliterated parameters: normalize: false int8_mask: true dtype: float16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_bunnycore__Llama-3.2-3B-Mix-Skill) | Metric |Value| |-------------------|----:| |Avg. |21.40| |IFEval (0-Shot) |64.04| |BBH (3-Shot) |23.78| |MATH Lvl 5 (4-Shot)|12.69| |GPQA (0-shot) | 1.57| |MuSR (0-shot) | 2.75| |MMLU-PRO (5-shot) |23.56|
{}
task
[ "QUESTION_ANSWERING" ]
45,529
Varsha00/finetuned-opusmt-en-hi-gu
Varsha00
text2text-generation
[ "transformers", "safetensors", "marian", "text2text-generation", "en", "hi", "gu", "dataset:ai4bharat/samanantar", "dataset:wmt/wmt19", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-31T15:37:14Z
2024-07-31T16:11:55+00:00
16
0
--- base_model: Helsinki/opus-mt-en-mul datasets: - ai4bharat/samanantar - wmt/wmt19 language: - en - hi - gu license: apache-2.0 metrics: - bleu --- # Finetuning This model is a fine-tuned version of [Varsha00/finetuned-opusmt-en-to-hi](https://huggingface.co/Varsha00/finetuned-opusmt-en-to-hi) on the samanantar & WMT News dataset. source group: English target group: Gujarati model: transformer ## Model description This model is a sequentially finetuned version of the Helsinki-NLP/opus-mt-en-mul model, designed for translating between English and Gujarati. The model was initially finetuned on the Hindi language using a substantial dataset and subsequently finetuned on Gujarati using a smaller dataset. This approach, known as sequential finetuning or cascaded finetuning, allows the model to leverage the knowledge gained from Hindi to improve its performance on Gujarati translations, despite the limited data available for the latter. ## Training and evaluation data ai4bharath/samanantar WMT-News ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-5 - warmup_steps: 500 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - num_epochs: 10 ### Benchamark Evaluation - BLEU score on Tatoeba: 27.7761903401179 - BLUE score on IN-22: 16.437183600289 ### Framework versions - Transformers 4.42.3 - Pytorch 2.1.2 - Datasets 2.20.0 - Tokenizers 0.19.1
null
Non_BioNLP
# Finetuning This model is a fine-tuned version of [Varsha00/finetuned-opusmt-en-to-hi](https://huggingface.co/Varsha00/finetuned-opusmt-en-to-hi) on the samanantar & WMT News dataset. source group: English target group: Gujarati model: transformer ## Model description This model is a sequentially finetuned version of the Helsinki-NLP/opus-mt-en-mul model, designed for translating between English and Gujarati. The model was initially finetuned on the Hindi language using a substantial dataset and subsequently finetuned on Gujarati using a smaller dataset. This approach, known as sequential finetuning or cascaded finetuning, allows the model to leverage the knowledge gained from Hindi to improve its performance on Gujarati translations, despite the limited data available for the latter. ## Training and evaluation data ai4bharath/samanantar WMT-News ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-5 - warmup_steps: 500 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - num_epochs: 10 ### Benchamark Evaluation - BLEU score on Tatoeba: 27.7761903401179 - BLUE score on IN-22: 16.437183600289 ### Framework versions - Transformers 4.42.3 - Pytorch 2.1.2 - Datasets 2.20.0 - Tokenizers 0.19.1
{"base_model": "Helsinki/opus-mt-en-mul", "datasets": ["ai4bharat/samanantar", "wmt/wmt19"], "language": ["en", "hi", "gu"], "license": "apache-2.0", "metrics": ["bleu"]}
task
[ "TRANSLATION" ]
45,530
ymoslem/whisper-medium-ga2en-v4
ymoslem
automatic-speech-recognition
[ "transformers", "tensorboard", "safetensors", "whisper", "automatic-speech-recognition", "generated_from_trainer", "ga", "en", "dataset:ymoslem/IWSLT2023-GA-EN", "dataset:ymoslem/FLEURS-GA-EN", "dataset:ymoslem/BitesizeIrish-GA-EN", "dataset:ymoslem/SpokenWords-GA-EN-MTed", "dataset:ymoslem/Tatoeba-Speech-Irish", "dataset:ymoslem/Wikimedia-Speech-Irish", "base_model:openai/whisper-medium", "base_model:finetune:openai/whisper-medium", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us" ]
2024-04-19T21:25:49Z
2024-04-30T11:27:57+00:00
18
1
--- base_model: openai/whisper-medium datasets: - ymoslem/IWSLT2023-GA-EN - ymoslem/FLEURS-GA-EN - ymoslem/BitesizeIrish-GA-EN - ymoslem/SpokenWords-GA-EN-MTed - ymoslem/Tatoeba-Speech-Irish - ymoslem/Wikimedia-Speech-Irish language: - ga - en license: apache-2.0 metrics: - bleu - wer tags: - generated_from_trainer model-index: - name: Whisper Medium GA-EN Speech Translation results: - task: type: automatic-speech-recognition name: Automatic Speech Recognition dataset: name: IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise type: ymoslem/IWSLT2023-GA-EN metrics: - type: bleu value: 32.01 name: Bleu - type: wer value: 62.76452048626745 name: Wer --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium GA-EN Speech Translation This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise dataset. The datasets are augmented in two ways: noise augmentation, and truncating low-amplitude samples. The best model checkpoint (this version) based on ChrF is at step 2900, epoch 0.6349, and it achieves the following results on the evaluation set: - Loss: 1.1883 - Bleu: 32.88 - Chrf: 51.52 - Wer: 62.0441 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 0.02 - training_steps: 3000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Chrf | Wer | |:-------------:|:------:|:----:|:---------------:|:-----:|:-----:|:--------:| | 2.4487 | 0.0219 | 100 | 1.9518 | 8.34 | 24.49 | 117.2445 | | 2.11 | 0.0438 | 200 | 1.6630 | 15.32 | 32.12 | 84.0612 | | 1.9757 | 0.0657 | 300 | 1.5366 | 10.86 | 33.42 | 131.7875 | | 1.7964 | 0.0876 | 400 | 1.4825 | 19.81 | 36.71 | 81.9451 | | 1.6422 | 0.1095 | 500 | 1.4432 | 18.83 | 40.4 | 84.0162 | | 1.3839 | 0.1314 | 600 | 1.4169 | 24.91 | 40.87 | 69.0230 | | 1.352 | 0.1533 | 700 | 1.4340 | 25.01 | 41.57 | 71.5894 | | 1.2434 | 0.1752 | 800 | 1.3813 | 24.05 | 41.29 | 73.7506 | | 1.2223 | 0.1970 | 900 | 1.3578 | 25.89 | 41.61 | 70.5988 | | 1.0414 | 0.2189 | 1000 | 1.3075 | 27.45 | 44.17 | 68.2575 | | 0.9199 | 0.2408 | 1100 | 1.3022 | 23.14 | 44.3 | 84.1513 | | 0.8648 | 0.2627 | 1200 | 1.3050 | 23.36 | 43.37 | 72.4448 | | 0.8469 | 0.2846 | 1300 | 1.2853 | 28.37 | 45.97 | 67.1319 | | 0.7649 | 0.3065 | 1400 | 1.2755 | 28.56 | 46.76 | 66.0964 | | 0.7321 | 0.3284 | 1500 | 1.2750 | 27.23 | 46.1 | 69.3381 | | 0.6541 | 0.3503 | 1600 | 1.2557 | 30.02 | 48.06 | 65.6011 | | 0.6107 | 0.3722 | 1700 | 1.2520 | 30.41 | 49.23 | 64.2954 | | 0.5738 | 0.3941 | 1800 | 1.2435 | 32.45 | 50.27 | 63.4399 | | 0.4983 | 0.4160 | 1900 | 1.2007 | 31.17 | 48.58 | 64.0702 | | 0.4439 | 0.4379 | 2000 | 1.2140 | 32.29 | 50.37 | 60.6033 | | 0.367 | 0.4598 | 2100 | 1.2230 | 29.54 | 49.14 | 67.7172 | | 0.2807 | 0.4817 | 2200 | 1.2277 | 33.1 | 51.21 | 62.9446 | | 0.2621 | 0.5036 | 2300 | 1.2441 | 30.59 | 49.49 | 64.8807 | | 0.2965 | 0.5255 | 2400 | 1.1969 | 31.82 | 49.67 | 63.5299 | | 0.236 | 0.5473 | 2500 | 1.2275 | 31.17 | 50.29 | 65.1959 | | 0.229 | 0.5692 | 2600 | 1.2008 | 30.02 | 50.27 | 70.6439 | | 0.164 | 0.5911 | 2700 | 1.2192 | 31.37 | 50.57 | 63.6200 | | 0.1786 | 0.6130 | 2800 | 1.1965 | 31.81 | 50.13 | 62.8546 | | 0.1987 | 0.6349 | 2900 | 1.1883 | 32.88 | 51.52 | 62.0441 | | 0.1633 | 0.6568 | 3000 | 1.1903 | 32.01 | 50.38 | 62.7645 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.0.1+cu118 - Datasets 2.19.0 - Tokenizers 0.19.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Whisper Medium GA-EN Speech Translation This model is a fine-tuned version of [openai/whisper-medium](https://huggingface.co/openai/whisper-medium) on the IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise dataset. The datasets are augmented in two ways: noise augmentation, and truncating low-amplitude samples. The best model checkpoint (this version) based on ChrF is at step 2900, epoch 0.6349, and it achieves the following results on the evaluation set: - Loss: 1.1883 - Bleu: 32.88 - Chrf: 51.52 - Wer: 62.0441 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_steps: 0.02 - training_steps: 3000 - mixed_precision_training: Native AMP ### Training results | Training Loss | Epoch | Step | Validation Loss | Bleu | Chrf | Wer | |:-------------:|:------:|:----:|:---------------:|:-----:|:-----:|:--------:| | 2.4487 | 0.0219 | 100 | 1.9518 | 8.34 | 24.49 | 117.2445 | | 2.11 | 0.0438 | 200 | 1.6630 | 15.32 | 32.12 | 84.0612 | | 1.9757 | 0.0657 | 300 | 1.5366 | 10.86 | 33.42 | 131.7875 | | 1.7964 | 0.0876 | 400 | 1.4825 | 19.81 | 36.71 | 81.9451 | | 1.6422 | 0.1095 | 500 | 1.4432 | 18.83 | 40.4 | 84.0162 | | 1.3839 | 0.1314 | 600 | 1.4169 | 24.91 | 40.87 | 69.0230 | | 1.352 | 0.1533 | 700 | 1.4340 | 25.01 | 41.57 | 71.5894 | | 1.2434 | 0.1752 | 800 | 1.3813 | 24.05 | 41.29 | 73.7506 | | 1.2223 | 0.1970 | 900 | 1.3578 | 25.89 | 41.61 | 70.5988 | | 1.0414 | 0.2189 | 1000 | 1.3075 | 27.45 | 44.17 | 68.2575 | | 0.9199 | 0.2408 | 1100 | 1.3022 | 23.14 | 44.3 | 84.1513 | | 0.8648 | 0.2627 | 1200 | 1.3050 | 23.36 | 43.37 | 72.4448 | | 0.8469 | 0.2846 | 1300 | 1.2853 | 28.37 | 45.97 | 67.1319 | | 0.7649 | 0.3065 | 1400 | 1.2755 | 28.56 | 46.76 | 66.0964 | | 0.7321 | 0.3284 | 1500 | 1.2750 | 27.23 | 46.1 | 69.3381 | | 0.6541 | 0.3503 | 1600 | 1.2557 | 30.02 | 48.06 | 65.6011 | | 0.6107 | 0.3722 | 1700 | 1.2520 | 30.41 | 49.23 | 64.2954 | | 0.5738 | 0.3941 | 1800 | 1.2435 | 32.45 | 50.27 | 63.4399 | | 0.4983 | 0.4160 | 1900 | 1.2007 | 31.17 | 48.58 | 64.0702 | | 0.4439 | 0.4379 | 2000 | 1.2140 | 32.29 | 50.37 | 60.6033 | | 0.367 | 0.4598 | 2100 | 1.2230 | 29.54 | 49.14 | 67.7172 | | 0.2807 | 0.4817 | 2200 | 1.2277 | 33.1 | 51.21 | 62.9446 | | 0.2621 | 0.5036 | 2300 | 1.2441 | 30.59 | 49.49 | 64.8807 | | 0.2965 | 0.5255 | 2400 | 1.1969 | 31.82 | 49.67 | 63.5299 | | 0.236 | 0.5473 | 2500 | 1.2275 | 31.17 | 50.29 | 65.1959 | | 0.229 | 0.5692 | 2600 | 1.2008 | 30.02 | 50.27 | 70.6439 | | 0.164 | 0.5911 | 2700 | 1.2192 | 31.37 | 50.57 | 63.6200 | | 0.1786 | 0.6130 | 2800 | 1.1965 | 31.81 | 50.13 | 62.8546 | | 0.1987 | 0.6349 | 2900 | 1.1883 | 32.88 | 51.52 | 62.0441 | | 0.1633 | 0.6568 | 3000 | 1.1903 | 32.01 | 50.38 | 62.7645 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.0.1+cu118 - Datasets 2.19.0 - Tokenizers 0.19.1
{"base_model": "openai/whisper-medium", "datasets": ["ymoslem/IWSLT2023-GA-EN", "ymoslem/FLEURS-GA-EN", "ymoslem/BitesizeIrish-GA-EN", "ymoslem/SpokenWords-GA-EN-MTed", "ymoslem/Tatoeba-Speech-Irish", "ymoslem/Wikimedia-Speech-Irish"], "language": ["ga", "en"], "license": "apache-2.0", "metrics": ["bleu", "wer"], "tags": ["generated_from_trainer"], "model-index": [{"name": "Whisper Medium GA-EN Speech Translation", "results": [{"task": {"type": "automatic-speech-recognition", "name": "Automatic Speech Recognition"}, "dataset": {"name": "IWSLT-2023, FLEURS, BiteSize, SpokenWords, Tatoeba, and Wikimedia, augmented with noise", "type": "ymoslem/IWSLT2023-GA-EN"}, "metrics": [{"type": "bleu", "value": 32.01, "name": "Bleu"}, {"type": "wer", "value": 62.76452048626745, "name": "Wer"}]}]}]}
task
[ "TRANSLATION" ]
45,531
tjake/granite-3.0-2b-instruct-JQ4
tjake
text-generation
[ "transformers", "safetensors", "granite", "text-generation", "language", "granite-3.0", "conversational", "arxiv:0000.00000", "base_model:ibm-granite/granite-3.0-2b-base", "base_model:finetune:ibm-granite/granite-3.0-2b-base", "license:apache-2.0", "model-index", "autotrain_compatible", "region:us" ]
2024-10-27T23:02:40Z
2024-10-27T23:14:40+00:00
20
0
--- base_model: - ibm-granite/granite-3.0-2b-base library_name: transformers license: apache-2.0 pipeline_tag: text-generation tags: - language - granite-3.0 inference: false model-index: - name: granite-3.0-2b-instruct results: - task: type: text-generation dataset: name: IFEval type: instruction-following metrics: - type: pass@1 value: 46.07 name: pass@1 - type: pass@1 value: 7.66 name: pass@1 - task: type: text-generation dataset: name: AGI-Eval type: human-exams metrics: - type: pass@1 value: 29.75 name: pass@1 - type: pass@1 value: 56.03 name: pass@1 - type: pass@1 value: 27.92 name: pass@1 - task: type: text-generation dataset: name: OBQA type: commonsense metrics: - type: pass@1 value: 43.2 name: pass@1 - type: pass@1 value: 66.36 name: pass@1 - type: pass@1 value: 76.79 name: pass@1 - type: pass@1 value: 71.9 name: pass@1 - type: pass@1 value: 53.37 name: pass@1 - task: type: text-generation dataset: name: BoolQ type: reading-comprehension metrics: - type: pass@1 value: 84.89 name: pass@1 - type: pass@1 value: 19.73 name: pass@1 - task: type: text-generation dataset: name: ARC-C type: reasoning metrics: - type: pass@1 value: 54.35 name: pass@1 - type: pass@1 value: 28.61 name: pass@1 - type: pass@1 value: 43.74 name: pass@1 - task: type: text-generation dataset: name: HumanEvalSynthesis type: code metrics: - type: pass@1 value: 50.61 name: pass@1 - type: pass@1 value: 45.58 name: pass@1 - type: pass@1 value: 51.83 name: pass@1 - type: pass@1 value: 41 name: pass@1 - task: type: text-generation dataset: name: GSM8K type: math metrics: - type: pass@1 value: 59.66 name: pass@1 - type: pass@1 value: 23.66 name: pass@1 - task: type: text-generation dataset: name: PAWS-X (7 langs) type: multilingual metrics: - type: pass@1 value: 61.42 name: pass@1 - type: pass@1 value: 37.13 name: pass@1 --- # Quantized Version of ibm-granite/granite-3.0-2b-instruct This model is a quantized variant of the ibm-granite/granite-3.0-2b-instruct model, optimized for use with Jlama, a Java-based inference engine. The quantization process reduces the model's size and improves inference speed, while maintaining high accuracy for efficient deployment in production environments. For more information on Jlama, visit the [Jlama GitHub repository](https://github.com/tjake/jlama). --- <!-- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png) --> <!-- ![image/png](granite-3_0-language-models_Group_1.png) --> # Granite-3.0-2B-Instruct **Model Summary:** Granite-3.0-2B-Instruct is a 2B parameter model finetuned from *Granite-3.0-2B-Base* using a combination of open source instruction datasets with permissive license and internally collected synthetic datasets. This model is developed using a diverse set of techniques with a structured chat format, including supervised finetuning, model alignment using reinforcement learning, and model merging. - **Developers:** Granite Team, IBM - **GitHub Repository:** [ibm-granite/granite-3.0-language-models](https://github.com/ibm-granite/granite-3.0-language-models) - **Website**: [Granite Docs](https://www.ibm.com/granite/docs/) - **Paper:** [Granite 3.0 Language Models](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf) - **Release Date**: October 21st, 2024 - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) **Supported Languages:** English, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 3.0 models for languages beyond these 12 languages. **Intended use:** The model is designed to respond to general instructions and can be used to build AI assistants for multiple domains, including business applications. *Capabilities* * Summarization * Text classification * Text extraction * Question-answering * Retrieval Augmented Generation (RAG) * Code related tasks * Function-calling tasks * Multilingual dialog use cases **Generation:** This is a simple example of how to use Granite-3.0-2B-Instruct model. Install the following libraries: ```shell pip install torch torchvision torchaudio pip install accelerate pip install transformers ``` Then, copy the snippet from the section that is relevant for your use case. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = "auto" model_path = "ibm-granite/granite-3.0-2b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) model.eval() # change input text as desired chat = [ { "role": "user", "content": "Please list one IBM Research laboratory located in the United States. You should only output its name and location." }, ] chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) # tokenize the text input_tokens = tokenizer(chat, return_tensors="pt").to(device) # generate output tokens output = model.generate(**input_tokens, max_new_tokens=100) # decode output tokens into text output = tokenizer.batch_decode(output) # print output print(output) ``` **Model Architecture:** Granite-3.0-2B-Instruct is based on a decoder-only dense transformer architecture. Core components of this architecture are: GQA and RoPE, MLP with SwiGLU, RMSNorm, and shared input/output embeddings. | Model | 2B Dense | 8B Dense | 1B MoE | 3B MoE | | :-------- | :-------- | :--------| :--------| :--------| | Embedding size | **2048** | 4096 | 1024 | 1536 | | Number of layers | **40** | 40 | 24 | 32 | | Attention head size | **64** | 128 | 64 | 64 | | Number of attention heads | **32** | 32 | 16 | 24 | | Number of KV heads | **8** | 8 | 8 | 8 | | MLP hidden size | **8192** | 12800 | 512 | 512 | | MLP activation | **SwiGLU** | SwiGLU | SwiGLU | SwiGLU | | Number of Experts | **—** | — | 32 | 40 | | MoE TopK | **—** | — | 8 | 8 | | Initialization std | **0.1** | 0.1 | 0.1 | 0.1 | | Sequence Length | **4096** | 4096 | 4096 | 4096 | | Position Embedding | **RoPE** | RoPE | RoPE | RoPE | | # Parameters | **2.5B** | 8.1B | 1.3B | 3.3B | | # Active Parameters | **2.5B** | 8.1B | 400M | 800M | | # Training tokens | **12T** | 12T | 10T | 10T | **Training Data:** Overall, our SFT data is largely comprised of three key sources: (1) publicly available datasets with permissive license, (2) internal synthetic data targeting specific capabilities, and (3) very small amounts of human-curated data. A detailed attribution of datasets can be found in the [Granite Technical Report](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf) and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf). **Infrastructure:** We train Granite 3.0 Language Models using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs while minimizing environmental impact by utilizing 100% renewable energy sources. **Ethical Considerations and Limitations:** Granite 3.0 Instruct Models are primarily finetuned using instruction-response pairs mostly in English, but also multilingual data covering eleven languages. Although this model can handle multilingual dialog use cases, its performance might not be similar to English tasks. In such case, introducing a small number of examples (few-shot) can help the model in generating more accurate outputs. While this model has been aligned by keeping safety in consideration, the model may in some cases produce inaccurate, biased, or unsafe responses to user prompts. So we urge the community to use this model with proper safety testing and tuning tailored for their specific tasks. <!-- ## Citation ``` @misc{granite-models, author = {author 1, author2, ...}, title = {}, journal = {}, volume = {}, year = {2024}, url = {https://arxiv.org/abs/0000.00000}, } ``` -->
null
Non_BioNLP
# Quantized Version of ibm-granite/granite-3.0-2b-instruct This model is a quantized variant of the ibm-granite/granite-3.0-2b-instruct model, optimized for use with Jlama, a Java-based inference engine. The quantization process reduces the model's size and improves inference speed, while maintaining high accuracy for efficient deployment in production environments. For more information on Jlama, visit the [Jlama GitHub repository](https://github.com/tjake/jlama). --- <!-- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cd5057674cdb524450093d/1hzxoPwqkBJXshKVVe6_9.png) --> <!-- ![image/png](granite-3_0-language-models_Group_1.png) --> # Granite-3.0-2B-Instruct **Model Summary:** Granite-3.0-2B-Instruct is a 2B parameter model finetuned from *Granite-3.0-2B-Base* using a combination of open source instruction datasets with permissive license and internally collected synthetic datasets. This model is developed using a diverse set of techniques with a structured chat format, including supervised finetuning, model alignment using reinforcement learning, and model merging. - **Developers:** Granite Team, IBM - **GitHub Repository:** [ibm-granite/granite-3.0-language-models](https://github.com/ibm-granite/granite-3.0-language-models) - **Website**: [Granite Docs](https://www.ibm.com/granite/docs/) - **Paper:** [Granite 3.0 Language Models](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf) - **Release Date**: October 21st, 2024 - **License:** [Apache 2.0](https://www.apache.org/licenses/LICENSE-2.0) **Supported Languages:** English, German, Spanish, French, Japanese, Portuguese, Arabic, Czech, Italian, Korean, Dutch, and Chinese. Users may finetune Granite 3.0 models for languages beyond these 12 languages. **Intended use:** The model is designed to respond to general instructions and can be used to build AI assistants for multiple domains, including business applications. *Capabilities* * Summarization * Text classification * Text extraction * Question-answering * Retrieval Augmented Generation (RAG) * Code related tasks * Function-calling tasks * Multilingual dialog use cases **Generation:** This is a simple example of how to use Granite-3.0-2B-Instruct model. Install the following libraries: ```shell pip install torch torchvision torchaudio pip install accelerate pip install transformers ``` Then, copy the snippet from the section that is relevant for your use case. ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer device = "auto" model_path = "ibm-granite/granite-3.0-2b-instruct" tokenizer = AutoTokenizer.from_pretrained(model_path) # drop device_map if running on CPU model = AutoModelForCausalLM.from_pretrained(model_path, device_map=device) model.eval() # change input text as desired chat = [ { "role": "user", "content": "Please list one IBM Research laboratory located in the United States. You should only output its name and location." }, ] chat = tokenizer.apply_chat_template(chat, tokenize=False, add_generation_prompt=True) # tokenize the text input_tokens = tokenizer(chat, return_tensors="pt").to(device) # generate output tokens output = model.generate(**input_tokens, max_new_tokens=100) # decode output tokens into text output = tokenizer.batch_decode(output) # print output print(output) ``` **Model Architecture:** Granite-3.0-2B-Instruct is based on a decoder-only dense transformer architecture. Core components of this architecture are: GQA and RoPE, MLP with SwiGLU, RMSNorm, and shared input/output embeddings. | Model | 2B Dense | 8B Dense | 1B MoE | 3B MoE | | :-------- | :-------- | :--------| :--------| :--------| | Embedding size | **2048** | 4096 | 1024 | 1536 | | Number of layers | **40** | 40 | 24 | 32 | | Attention head size | **64** | 128 | 64 | 64 | | Number of attention heads | **32** | 32 | 16 | 24 | | Number of KV heads | **8** | 8 | 8 | 8 | | MLP hidden size | **8192** | 12800 | 512 | 512 | | MLP activation | **SwiGLU** | SwiGLU | SwiGLU | SwiGLU | | Number of Experts | **—** | — | 32 | 40 | | MoE TopK | **—** | — | 8 | 8 | | Initialization std | **0.1** | 0.1 | 0.1 | 0.1 | | Sequence Length | **4096** | 4096 | 4096 | 4096 | | Position Embedding | **RoPE** | RoPE | RoPE | RoPE | | # Parameters | **2.5B** | 8.1B | 1.3B | 3.3B | | # Active Parameters | **2.5B** | 8.1B | 400M | 800M | | # Training tokens | **12T** | 12T | 10T | 10T | **Training Data:** Overall, our SFT data is largely comprised of three key sources: (1) publicly available datasets with permissive license, (2) internal synthetic data targeting specific capabilities, and (3) very small amounts of human-curated data. A detailed attribution of datasets can be found in the [Granite Technical Report](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/paper.pdf) and [Accompanying Author List](https://github.com/ibm-granite/granite-3.0-language-models/blob/main/author-ack.pdf). **Infrastructure:** We train Granite 3.0 Language Models using IBM's super computing cluster, Blue Vela, which is outfitted with NVIDIA H100 GPUs. This cluster provides a scalable and efficient infrastructure for training our models over thousands of GPUs while minimizing environmental impact by utilizing 100% renewable energy sources. **Ethical Considerations and Limitations:** Granite 3.0 Instruct Models are primarily finetuned using instruction-response pairs mostly in English, but also multilingual data covering eleven languages. Although this model can handle multilingual dialog use cases, its performance might not be similar to English tasks. In such case, introducing a small number of examples (few-shot) can help the model in generating more accurate outputs. While this model has been aligned by keeping safety in consideration, the model may in some cases produce inaccurate, biased, or unsafe responses to user prompts. So we urge the community to use this model with proper safety testing and tuning tailored for their specific tasks. <!-- ## Citation ``` @misc{granite-models, author = {author 1, author2, ...}, title = {}, journal = {}, volume = {}, year = {2024}, url = {https://arxiv.org/abs/0000.00000}, } ``` -->
{"base_model": ["ibm-granite/granite-3.0-2b-base"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "text-generation", "tags": ["language", "granite-3.0"], "inference": false, "model-index": [{"name": "granite-3.0-2b-instruct", "results": [{"task": {"type": "text-generation"}, "dataset": {"name": "IFEval", "type": "instruction-following"}, "metrics": [{"type": "pass@1", "value": 46.07, "name": "pass@1"}, {"type": "pass@1", "value": 7.66, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "AGI-Eval", "type": "human-exams"}, "metrics": [{"type": "pass@1", "value": 29.75, "name": "pass@1"}, {"type": "pass@1", "value": 56.03, "name": "pass@1"}, {"type": "pass@1", "value": 27.92, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "OBQA", "type": "commonsense"}, "metrics": [{"type": "pass@1", "value": 43.2, "name": "pass@1"}, {"type": "pass@1", "value": 66.36, "name": "pass@1"}, {"type": "pass@1", "value": 76.79, "name": "pass@1"}, {"type": "pass@1", "value": 71.9, "name": "pass@1"}, {"type": "pass@1", "value": 53.37, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "BoolQ", "type": "reading-comprehension"}, "metrics": [{"type": "pass@1", "value": 84.89, "name": "pass@1"}, {"type": "pass@1", "value": 19.73, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "ARC-C", "type": "reasoning"}, "metrics": [{"type": "pass@1", "value": 54.35, "name": "pass@1"}, {"type": "pass@1", "value": 28.61, "name": "pass@1"}, {"type": "pass@1", "value": 43.74, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "HumanEvalSynthesis", "type": "code"}, "metrics": [{"type": "pass@1", "value": 50.61, "name": "pass@1"}, {"type": "pass@1", "value": 45.58, "name": "pass@1"}, {"type": "pass@1", "value": 51.83, "name": "pass@1"}, {"type": "pass@1", "value": 41, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "GSM8K", "type": "math"}, "metrics": [{"type": "pass@1", "value": 59.66, "name": "pass@1"}, {"type": "pass@1", "value": 23.66, "name": "pass@1"}]}, {"task": {"type": "text-generation"}, "dataset": {"name": "PAWS-X (7 langs)", "type": "multilingual"}, "metrics": [{"type": "pass@1", "value": 61.42, "name": "pass@1"}, {"type": "pass@1", "value": 37.13, "name": "pass@1"}]}]}]}
task
[ "TEXT_CLASSIFICATION", "SUMMARIZATION" ]
45,532
lomov/strategydisofmaterialimpactsv1
lomov
text-classification
[ "transformers", "tensorboard", "safetensors", "roberta", "text-classification", "autotrain", "dataset:strategydisofmaterialimpactsv1/autotrain-data", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-20T01:02:07Z
2024-05-20T01:03:45+00:00
9
0
--- datasets: - strategydisofmaterialimpactsv1/autotrain-data tags: - autotrain - text-classification widget: - text: I love AutoTrain --- # Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 0.4904100298881531 f1_macro: 0.851601435352396 f1_micro: 0.8658536585365854 f1_weighted: 0.8538194199208925 precision_macro: 0.8594329005283454 precision_micro: 0.8658536585365854 precision_weighted: 0.8606490578892111 recall_macro: 0.862797619047619 recall_micro: 0.8658536585365854 recall_weighted: 0.8658536585365854 accuracy: 0.8658536585365854
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 0.4904100298881531 f1_macro: 0.851601435352396 f1_micro: 0.8658536585365854 f1_weighted: 0.8538194199208925 precision_macro: 0.8594329005283454 precision_micro: 0.8658536585365854 precision_weighted: 0.8606490578892111 recall_macro: 0.862797619047619 recall_micro: 0.8658536585365854 recall_weighted: 0.8658536585365854 accuracy: 0.8658536585365854
{"datasets": ["strategydisofmaterialimpactsv1/autotrain-data"], "tags": ["autotrain", "text-classification"], "widget": [{"text": "I love AutoTrain"}]}
task
[ "TEXT_CLASSIFICATION" ]
45,533
seongil-dn/bge-m3-kor-retrieval-bs1024-checkpoint-236
seongil-dn
sentence-similarity
[ "sentence-transformers", "safetensors", "xlm-roberta", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:482743", "loss:CachedMultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:2101.06983", "base_model:BAAI/bge-m3", "base_model:finetune:BAAI/bge-m3", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-12-08T15:07:26Z
2024-12-08T15:08:42+00:00
6
0
--- base_model: BAAI/bge-m3 library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:482743 - loss:CachedMultipleNegativesRankingLoss widget: - source_sentence: 언제 청년층 생산가능인구 수가 9,149,000명으로 드러났어 sentences: - '제2장 청년층 노동시장 동향 및 청년내일채움공제의 성과 개괄 □ 본 장에서는 청년내일채움공제 사업의 도입 배경으로 작용한 청년층 노동시장의 현황을 살펴보고, 세부 성과에 앞서 청년내일채움공제가 현 청년고용 실태에 미친 개괄적 성과를 평가하고자 한다 제1절 청년층 노동시장 동향 1. 청년층 인구구조 변화 추이 □ 고용률과 실업률, 취업자 수 등의 고용지표는 경제 및 노동시장의 상황 뿐 만 아니라, 인구구조 변화에 의해 영향을 받는 만큼, 청년층의 노동시장 동향을 파악하기 위해서는 청년층 인구구조의 변화양상을 이해할 필요가 있음 □ 청년층 인구구조 변화 추이 ○ 전체 생산가능인구가 여전히 증가추세에 있는 것에 비해, 청년층(15∼29세) 생산가능인구는 1990년을 기점으로 감소추세에 있음. 2018년 청년층 생산가능인구는 전년대비 133천명 감소한 9,149천명임 - 15∼19세, 20∼24세 연령대의 생산가능인구는 감소추세에 있으나, 25∼29세 생산가능인구(3,432천명, 전년대비 96천명 증가)는 증가추세에 있으며 향후 2∼3년간 증가추세가 지속될 것으로 전망됨 ○ 청년층 경제활동인구는 2000년대 지속적으로 감소하다가 2013∼2016년 기간 증가추세를 보였으나, 2017년 이후 감소하고 있음. 2018년 청년층 경제활동인구는 전년대비 21천명 감소한 4,312천명을 기록하였음' - 2. 인구구조변화의 현황<br>UN 인구국 자료를 분석해보면 2010년 인구와 2050년의 중간 수준 예측치를 기준으로 인구 1,000만 명 이상의 국가이면서 2050년까지 생산가능인구(15세~64세)가 감소하는 국가는 198개국중에서 19개국이다. 이 중에서 우리나라는 생산 가능인구(15세~64세) 감소율이 세계 6위 수준을 기록할 것으로 분석되었고, 이에 따라 생산가능 인구가 2010년 대비 2050년에는 27%가 줄어들 것으로 예상된다. 이에 반하여 노인인구는 236%가 증가할 것으로 예상된다. 우리나라는 생산가능인구 감소율과 노인인구(65세 이상) 증가율이 매우 높은 나라 중 하나이며, 동시에 유소년(0~14세) 인구의 감소도 22% 정도 될 것으로 예상하고 있다. <br>UN의 인구국(局) 자료인 [표 1]을 보면, 첫째 현재 우리나라를 포함하여 선진국에서조차도 현재 인구감소를 겪어본 국가가 거의 없어 이것이 가져다 줄 영향에 대한 경험적 지식이 거의 전무하다는 점이다. <br>둘째, 우리나라의 경우에는 무엇보다도 인구감소기로의 전환이 급격하게 이루어지고 있다. 따라서 우리 스스로 이의 긍정적ㆍ부정적 영향을 연역적으로 추론해볼 필요가 있다. 이를 위해서는 우리 사회의 전체 변화를 거시적으로 바라보고 정책 수단에 따라 가져다 줄 다양한 변화를 살펴볼 수 있는 시뮬레이션(simulation) 도구 및 정책 개발 그리고 집행 체제가 필요하다고 할 수 있다. - '7. 해외사례 플랫폼 노동관계에 특화된 산재보험제도를 별도로 규정하고 있는 포괄적 입법례는 아직 보이지 않는다. 다만, 플랫폼 노동에 관한 미국의 앨라배마 주법(SB363, 2018.3.13. 제정)에서는 플랫폼 노동자들에게 일정한 수준의 복지를 허용하기 위한 법적 기초를 만들고 있다(부표 1 참조). SB363은 플랫폼 노동자들의 이동성을 고려한 ‘이동 가능 복지제도’(portable benefit plans)를 플랫폼 사업주들이 만들면, 이 복지제도를 통해 다수의 플랫폼과 계약을 맺는 플랫폼 노동자들이 복수 사업주 혹은 사업주의 변경에도 불구하고 복지제도를 유지하면서 그로부터 복지수혜를 얻을 수 있도록 하고 있다. 이때의 복지제도는 건강보험, 재해보험, 퇴직수당제도 등으로 구성될 수 있고, 플랫폼 노동자가 복지계좌를 개설하면 여기에 플랫폼 사업주가 복지기여금을 납부하는 방식을 취하고 있다. 국가에 의한 사회보험제도가 아니고, 플랫폼 사업주에 의한 자발적 복지제도의 기초를 구성한 것이기는 하지만, 지방정부가 이를 관리하도록 함으로써 공정성과 객관성을 담보한다. 한국의 산재보험제도에 대입시켜 비교할 수 있는 입법례가 아니기는 하지만, 플랫폼 사업주로 하여금 플랫폼 노동자에 대한 일정한 복지제도 형성을 촉구하고 있다는 점, 해당 복지제도에 대하여 플랫폼이 일정한 기여금을 납부하도록 하고 있다는 점 등에서 하나의 제도적 모델로 시사가능하다. 한편, 미국의 플로리다, 인디아나, 아이오와, 켄터키, 테네시, 텍사스, 유타주 등에서는 플랫폼노동자(경우에 따라서는 가사노동자에 국한)의 의미를 규정하면서, 주법상 재해보험제도 및(또는) 실업급여제도의 가입자가 될 수 있도록 하고 있다.' - source_sentence: 도매 전력 매매를 주로 한 전력거래체계를 만들었을 때 어느 부문이 안정성 영향을 받을 수 있어 sentences: - '(4) 외환시장 원/달러 환율 2016년 말 대비 하락 원/달러 환율은 연초 미 보호무역주의에 대한 우려, 수출 호조, 외국인 증권투자자금 유입 등으로 3월 말까지 상당 폭 하락하였다. 이후 북한 관련 리스크 전개 상황, 미 연준의 연내 금리인상 기대 변화 등에 따라 등락하는 모습을 보였다. 10월 들어 북한 리스크 완화, 미 달러화 약세 지속, 국내경제의 견실한 성장세 등으로 하락세로 전환하였으며, 2017년 말에는 연중 최저치인 1,070.5원을 기록하였다. 2017년 중 미 달러화에 대한 원화의 가치는 2016년 말 대비 12.8% 상승하였다. 한편 원/엔 환율(100엔당)은 미 연준 금리인상 기대변화, 북한 리스크 등에 영향을 받으며 등락을 지속하다가 10월 들어 원화가 상대적으로 강세를 보이면서 하락하였다. 2017년 말 원화의 엔화에 대한 가치는 100엔당 949.2원으로 2016년 말 대비 9.1% 상승하였다. 원/달러 환율의 일중 및 전일대비 변동폭은 연평균 각각 5.5원, 4.4원을 기록하여 2016년(각각 7.5원 및 6.0원)에 비해 축소되었다. 외환스왑레이트 하락, 통화스왑금리 상승 외환스왑레이트(3개월물)는 외화자금수요 확대 등으로 2016년 말 대비 0.61% 포인트 하락한 -0.63%를 기록하였다. 통화스왑금리(3년물)는 경제지표 호조 등에 따른 국고채 금리 상승 등으로 2016년 말 대비 0.37% 포인트 상승한 1.57%를 기록하였다.' - '4. 국내 전력거래 형태 전망 앞서 살펴보았듯이, 구조개편을 단행한 외국의 전력거래 형태는 대체로 도매 전력거래 위주, 소매 전력거래 위주, 계약위주 등 세 가지로 구분되어 있다. 국내 전력산업의 현재 여건을 고려할 경우 가장 가능성이 높은 전력거래 형태는 도매 전력거래 위주의 거래형태일 것이다. 만약 도매 전력거래 위주의 전력거래체계를 구축하는 경우 가격과 공급의 안정성에 상당한 영향을 미칠 것으로 보인다. 하지만 규제계약(Vesting Contract)을 포함하여 장단기 계약물량을 확대함으로써 현물시장의 가격 및 공급 변동성을 완화할 수 있는 대안이 존재한다. 만약 전력시장에서의 가격과 공급 안정성 측면을 염두에 둔다면 소매 전력거래 형태 또는 장단기 계약 위주의 전력거래 형태를 고려해 볼 수 있다. 소매 전력거래 형태에서는 주로 기존의 수직통합체제 또는 이미 발전과 판매가 송배전 부문으로부터 분리된 경우라면 발전과 판매의 통합과 이로 인한 규모의 확대에 따라 유효경쟁의 약화 가능성이 문제가 될 수 있다. 이렇게 통합된 기업의 소매시장에서의 시장지배력 문제가 존재하기 때문에 순수 판매회사 또는 신규기업과의 실질적인 경쟁이 가능한 방향으로 제도적 장치를 마련할 필요가 있다.' - 'Ⅲ. 우리나라 전력시장에서의 유연성 자원 1. 우리나라 전력시장의 현황 우리나라의 전력시장은 전력산업의 효율성 향상을 위하여 2001년 전력산업구조개편을 통해 독점사업자였던 한국전력공사로부터 발전부문을 분리시켜 발전자회사로 재편시키고, 발전부문에 대한 경쟁체제를 도입하면서 출범하였다. 우리나라 전력시장은 소비자가 직접 거래에 참여하는 소매시장이 아닌 발전회사와 판매회사가 참여하는 도매시장으로, 다수의 발전회사가 존재하는 공급측과는 달리 수요측은 단일 구매자인 한국전력공사만 존재하는 구매독점 형태로 이루어져 있다. 또한 설비용량이 20MW를 초과하는 상업용 발전기는 전력시장을 통해서만 거래가 가능한 의무적 시장이다.' - source_sentence: 해외 파생상품시장에서 거래되는 경우는 어떤 파생상품으로 분류할 수 있어 sentences: - '3) 연금 세제 ▩ 근퇴법 상 연금수령을 활성화하기 위한 강제조항이 부재한 상황에서 세제체계(소득세법)에 따라 수령방법이 결정되는 구조임. ○ 제도적으로 일시금 수령을 방지하거나 연금수령을 강제하는 규정이 부재한 상황에서 수령 방법은 주로 세금부담에 따라 결정될 수밖에 없음. ○ 일시금 수령을 경감시키기 위해 2016년 소득세법을 개정하였으나 개정된 소득세법 조차도 일시금과 연금수령 간 세금부담 차이가 크지 않아 일시금 수령을 방지하지 못하는 한계가 존재함. ▩ 연금과 일시금 수령 간 세제차이가 크지 않은 주요 원인은 일시금 수령시 제공하는 공제혜택이 크기 때문임. ○ 국내 퇴직연금시장에서 일시금 수령이 만연한 것은 연금수령 시 부담하는 세율이 높아서가 아니라 일시금 수령 시 부담하는 세율이 낮기 때문임. ○ 일시금 수령의 세금부담이 높지 않은 이유는 일시금 수령 시에도 다수의 공제혜택(근속연수공제+환산급여공제)이 제공되고 혜택의 규모도 크기 때문임. ○ 즉 연금수령 시에는 일시금 수령 시 부담하는 세금의 70%만을 부담하도록 규정하고(소득세법 제129조(원천징수세율)제1항제5의336)) 있으나, 일시금 수령 시 세금부담이 작기 때문에 연금수령 시 추가 제공되는 세제혜택의 크기가 미미하게 됨. ○ 특히 연금수령 기간에 따른 세금차이가 없어 연금으로 수령하더라도 단기간 연금(예, 10년)을 수령하는 유인을 제공함.' - □ 장외파생 권역별 대표 상품을 살펴보면 금리관련상품 중에는 금리스왑(Interest Rate Swap:IRS)이, 통화관련 상품 중에서는 통화선도계약이, 신용관련거래에서는 신용파산스왑(Credit Default Swap:CDS)이 가장 높은 비중을 차지<br>○ 금리스왑은 금리관련 장외파생상품시장의 91.2%를 차지하고 있으며, 신용관련상품의 경우 신용파산스왑과 총수익스왑(Total Return Swap:TRS)만이 거래<br>○ 통화선도는 대부분 NDF(Non-Deliverable Forward)의 형태로 거래<br>○ 장외파생상품 거래는 일대일 계약을 기반으로 하고 거래당사자의 높은 신용도를 요구하기 때문에 신용도가 낮은 개인 또는 금융기관은 참가하기가 어려운 실정<br>○ 특히, 신용관련거래는 다른 금융기관에 비해 상대적으로 높은 신용도를 가지고 있는 은행과 증권사를 통해서만 거래가 이루어지고 있는 것으로 파악 - '제5조 (파생상품) ① 이 법에서 ˝파생상품˝이란 다음 각 호의 어느 하나에 해당하는 계약상의 권리를 말한다. 1. 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 장래의 특정 시점에 인도할 것을 약정하는 계약 2. 당사자 어느 한쪽의 의사표시에 의하여 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 수수하는 거래를 성립시킬 수 있는 권리를 부여하는 것을 약정하는 계약 3. 장래의 일정기간 동안 미리 정한 가격으로 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 교환할 것을 약정하는 계약 ② 이 법에서 ˝장내파생상품˝이란 파생상품으로서 파생상품시장에서 거래되는 것 또는 해외 파생상품시장(파생상품시장과 유사한 시장으로서 해외에 있는 시장과 대통령령으로 정하는 해외 파생상품거래가 이루어지는 시장을 말한다)에서 거래되는 것을 말한다. ③ 이 법에서 ˝장외파생상품˝이란 파생상품으로서 장내파생상품이 아닌 것을 말한다. ④ 제1항 각 호의 어느 하나에 해당하는 계약 중 매매계약이 아닌 계약의 체결은 이 법을 적용함에 있어서 매매계약의 체결로 본다.' - source_sentence: 어디서 자금세탁 전제범죄에서 비영리단체 관련 자금세탁범죄를 찾아내는 방식을 선택했어 sentences: - 'IV. 비영리단체 분야의 범죄유형 및 사례 1. 국내사례와 범죄유형 □ 한국은 아직 비영리단체에 대한 명확한 개념 구분이 정립되어 있지 않은 실정으로 이에 따라 그동안 한국에서는 비영리단체 관련범죄에 대한 인식이 제대로 이루어지지 않음. -비영리단체 관련 범죄에 있어서도 비영리단체에 대한 명확한 개념 구분 없이 판결문이 작성되어 비영리단체 관련 자금세탁범죄를 조사하는데 있어서 큰 장애로 작용함. -국내 판례에서 비영리단체와 관련하여 자금세탁이라는 용어를 직접 사용한 경우는 없으며 이에 따라 부득이 자금세탁 전제범죄를 통하여 비영리단체 관련 자금세탁범죄를 조사하는 방법을 택함.' - 'VI. 비영리단체를 통한 테러자금조달방지를 위한 제도개선방안 □ FATF는 2001년 10월 테러자금조달에 대한 특별권고 8항(Special Recommendationson Terrorist Financing)을 통하여 비영리단체에 대한 관리감독을 강화하고 관련 법규를 정비할 것을 권고하였음. -우리나라에서 비영리단체를 통한 자금세탁을 이용해서 테러자금이 조달될 가능성은 매우 낮은 것으로 평가되나 자금세탁의 우회로를 차단하고, 또한 예방적인 조치를 취해 국제적으로 자금세탁을 통한 불법적인 테러자금조달 방지 노력에 적극 부응하기 위해서는 비영리단체에 대한 자금세탁 방지제도 도입에 적극 참여해야 할 것임.' - '(2) 전북의 풍력산업 약점 전북의 풍력산업은 내부환경관점에서 다음과 같은 약점을 보유하고 있다. 첫째, 두산(창원), 유니슨(사천), 효성(창원), 한진(양산) 등 풍력터빈시스템업체는 모두 경남권에 위치하고 있으며, 현재 경남 이외 지역에서는 MW급 이상 풍력용 터빈시스템업체가 존재하지 않는다. 다만 최근 터빈의 대형화에 따라, 터빈의 무게 및 부피 등에 따른 운송에 한계가 존재하고 상당기간 고정식 형태의 해상풍력발전단지 구축이 중심이 될 것으로 판단됨에 따라, 부유식 해상풍력단지개발이 시작되기 전까지는 GW급 해상풍력단지 개발계획에 따라 서해안권으로 시스템업체의 이전가능성은 일부 존재한다. 둘째, 해상풍력 관련 연구/생산인력 부족 역시 장기적 해상풍력기술의 지속성 관점에서 시급한 문제이다. 현재 전북은 해상풍력터빈과 관련된 기술은 없으나, 휴먼컴퍼지트와 재료연구원 풍력핵심기술연구센터를 통해 해상풍력 블레이드 및 재료 등에 대한 기술이 축적되어 있다. 이외에 하부구조물 및 타워 관련 기술 역시 에드벡트가 석션버켓(suction bucket)방식의 하부구조물 관련 기술을 보유하고 있고 현재 군산공장을 리모델링 중인 CS윈드는 세계 최고수준의 타워제작기술을 확보하고 있다. 따라서 전북은 블레이드, 재료, 타워 및 하부구조물 등과 관련된 기술을 확보하고 있다.' - source_sentence: 전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어 sentences: - '수출 증가세 지속 1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.' - '(3) 금융기관 여수신 은행 수신 큰 폭 확대 은행 수신은 2019년에 비해 증가폭이 크게 확대되었다. 수시입출식예금은 불확실성 증대에 따른 가계 및 기업의 예비자금 확보 등의 영향으로 증가 규모가 전년대비 3배가량 확대되었다. 반면 정기예금은 예금금리 하락, 예대율 및 LCR 규제 완화에 따른 은행의 정기예금 유치 유인 축소 등에 기인하여 감소로 전환하였다. 자산운용사 수신은 증가폭이 축소되었다. MMF는 꾸준한 증가세를 유지하였으나 주식형 및 채권형 펀드는 개인투자자의 주식 직접투자증가, 신용증권에 대한 시장 경계감 확대 등으로 감소로 전환하였다. 또한 기타펀드는 2019년 중 일부 사모펀드 손실 및 환매중단 사태, 사모펀드 일반투자자 요건 강화 등으로 증가 규모가 절반 수준으로 축소되었다. 한편 신용협동기구 등 비은행예금취급기관 수신은 대체로 2019년과 비슷한 증가 규모를 나타내었다.' - '(2) 전남지역 2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다.' --- # SentenceTransformer based on BAAI/bge-m3 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) <!-- at revision 5617a9f61b028005a4858fdac845db406aefb181 --> - **Maximum Sequence Length:** 1024 tokens - **Output Dimensionality:** 1024 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 1024, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("seongil-dn/bge-m3-kor-retrieval-bs1024-checkpoint-236") # Run inference sentences = [ '전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어', '(2) 전남지역\n2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다.', '수출 증가세 지속\n1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 1024] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 512 - `learning_rate`: 3e-05 - `num_train_epochs`: 5 - `warmup_ratio`: 0.05 - `fp16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 512 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 3e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.05 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: True - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | |:------:|:----:|:-------------:| | 0.0085 | 1 | 2.0476 | | 0.0171 | 2 | 2.0595 | | 0.0256 | 3 | 2.0267 | | 0.0342 | 4 | 2.0971 | | 0.0427 | 5 | 2.2171 | | 0.0513 | 6 | 2.287 | | 0.0598 | 7 | 2.0867 | | 0.0684 | 8 | 1.9498 | | 0.0769 | 9 | 1.569 | | 0.0855 | 10 | 1.3313 | | 0.0940 | 11 | 1.4122 | | 0.1026 | 12 | 1.3425 | | 0.1111 | 13 | 1.1936 | | 0.1197 | 14 | 0.8012 | | 0.1282 | 15 | 0.8862 | | 0.1368 | 16 | 1.193 | | 0.1453 | 17 | 0.9771 | | 0.1538 | 18 | 0.3887 | | 0.1624 | 19 | 0.363 | | 0.1709 | 20 | 0.3092 | | 0.1795 | 21 | 0.2692 | | 0.1880 | 22 | 0.2386 | | 0.1966 | 23 | 0.2266 | | 0.2051 | 24 | 0.233 | | 0.2137 | 25 | 0.2214 | | 0.2222 | 26 | 0.2038 | | 0.2308 | 27 | 0.2015 | | 0.2393 | 28 | 0.1772 | | 0.2479 | 29 | 0.1697 | | 0.2564 | 30 | 0.1718 | | 0.2650 | 31 | 0.2456 | | 0.2735 | 32 | 0.5238 | | 0.2821 | 33 | 0.5308 | | 0.2906 | 34 | 0.5111 | | 0.2991 | 35 | 0.3931 | | 0.3077 | 36 | 0.3414 | | 0.3162 | 37 | 0.2704 | | 0.3248 | 38 | 0.2949 | | 0.3333 | 39 | 0.3082 | | 0.3419 | 40 | 0.3755 | | 0.3504 | 41 | 0.3127 | | 0.3590 | 42 | 0.3756 | | 0.3675 | 43 | 0.3564 | | 0.3761 | 44 | 0.3905 | | 0.3846 | 45 | 0.377 | | 0.3932 | 46 | 0.3043 | | 0.4017 | 47 | 0.3237 | | 0.4103 | 48 | 0.4035 | | 0.4188 | 49 | 0.4522 | | 0.4274 | 50 | 0.4392 | | 0.4359 | 51 | 0.4482 | | 0.4444 | 52 | 0.3586 | | 0.4530 | 53 | 0.3154 | | 0.4615 | 54 | 0.4053 | | 0.4701 | 55 | 0.5846 | | 0.4786 | 56 | 0.4372 | | 0.4872 | 57 | 0.6201 | | 0.4957 | 58 | 0.5278 | | 0.5043 | 59 | 0.4844 | | 0.5128 | 60 | 0.5817 | | 0.5214 | 61 | 0.3765 | | 0.5299 | 62 | 0.4785 | | 0.5385 | 63 | 0.5724 | | 0.5470 | 64 | 0.5375 | | 0.5556 | 65 | 0.5362 | | 0.5641 | 66 | 0.4731 | | 0.5726 | 67 | 0.4514 | | 0.5812 | 68 | 0.4563 | | 0.5897 | 69 | 0.4198 | | 0.5983 | 70 | 0.4086 | | 0.6068 | 71 | 0.3612 | | 0.6154 | 72 | 0.3463 | | 0.6239 | 73 | 0.6261 | | 0.6325 | 74 | 0.6283 | | 0.6410 | 75 | 0.4635 | | 0.6496 | 76 | 0.463 | | 0.6581 | 77 | 0.4075 | | 0.6667 | 78 | 0.3797 | | 0.6752 | 79 | 0.2769 | | 0.6838 | 80 | 0.3353 | | 0.6923 | 81 | 0.2295 | | 0.7009 | 82 | 0.4316 | | 0.7094 | 83 | 0.9861 | | 0.7179 | 84 | 0.9684 | | 0.7265 | 85 | 0.9883 | | 0.7350 | 86 | 0.8865 | | 0.7436 | 87 | 0.8248 | | 0.7521 | 88 | 0.7973 | | 0.7607 | 89 | 0.8465 | | 0.7692 | 90 | 0.7664 | | 0.7778 | 91 | 0.7643 | | 0.7863 | 92 | 0.7665 | | 0.7949 | 93 | 0.7348 | | 0.8034 | 94 | 0.7493 | | 0.8120 | 95 | 0.6115 | | 0.8205 | 96 | 0.6233 | | 0.8291 | 97 | 0.6435 | | 0.8376 | 98 | 0.5581 | | 0.8462 | 99 | 0.542 | | 0.8547 | 100 | 0.5571 | | 0.8632 | 101 | 0.502 | | 0.8718 | 102 | 0.5375 | | 0.8803 | 103 | 0.4952 | | 0.8889 | 104 | 0.4873 | | 0.8974 | 105 | 0.4599 | | 0.9060 | 106 | 0.4536 | | 0.9145 | 107 | 0.4479 | | 0.9231 | 108 | 0.384 | | 0.9316 | 109 | 0.3523 | | 0.9402 | 110 | 0.369 | | 0.9487 | 111 | 0.3422 | | 0.9573 | 112 | 0.3698 | | 0.9658 | 113 | 0.3625 | | 0.9744 | 114 | 0.3736 | | 0.9829 | 115 | 0.4313 | | 0.9915 | 116 | 0.4605 | | 1.0 | 117 | 0.2948 | | 1.0085 | 118 | 0.7391 | | 1.0171 | 119 | 0.6622 | | 1.0256 | 120 | 0.6917 | | 1.0342 | 121 | 0.7963 | | 1.0427 | 122 | 0.7815 | | 1.0513 | 123 | 0.6719 | | 1.0598 | 124 | 0.6098 | | 1.0684 | 125 | 0.549 | | 1.0769 | 126 | 0.7212 | | 1.0855 | 127 | 0.6381 | | 1.0940 | 128 | 0.7424 | | 1.1026 | 129 | 0.6822 | | 1.1111 | 130 | 0.6921 | | 1.1197 | 131 | 0.5022 | | 1.1282 | 132 | 0.578 | | 1.1368 | 133 | 0.8139 | | 1.1453 | 134 | 0.6167 | | 1.1538 | 135 | 0.1836 | | 1.1624 | 136 | 0.1853 | | 1.1709 | 137 | 0.1628 | | 1.1795 | 138 | 0.1464 | | 1.1880 | 139 | 0.1308 | | 1.1966 | 140 | 0.1273 | | 1.2051 | 141 | 0.1414 | | 1.2137 | 142 | 0.138 | | 1.2222 | 143 | 0.1268 | | 1.2308 | 144 | 0.1348 | | 1.2393 | 145 | 0.111 | | 1.2479 | 146 | 0.1069 | | 1.2564 | 147 | 0.1122 | | 1.2650 | 148 | 0.1703 | | 1.2735 | 149 | 0.405 | | 1.2821 | 150 | 0.3876 | | 1.2906 | 151 | 0.378 | | 1.2991 | 152 | 0.2633 | | 1.3077 | 153 | 0.2263 | | 1.3162 | 154 | 0.1748 | | 1.3248 | 155 | 0.2016 | | 1.3333 | 156 | 0.2166 | | 1.3419 | 157 | 0.2798 | | 1.3504 | 158 | 0.2295 | | 1.3590 | 159 | 0.2805 | | 1.3675 | 160 | 0.2619 | | 1.3761 | 161 | 0.3006 | | 1.3846 | 162 | 0.2843 | | 1.3932 | 163 | 0.2244 | | 1.4017 | 164 | 0.2361 | | 1.4103 | 165 | 0.3025 | | 1.4188 | 166 | 0.3443 | | 1.4274 | 167 | 0.3329 | | 1.4359 | 168 | 0.3467 | | 1.4444 | 169 | 0.2748 | | 1.4530 | 170 | 0.2304 | | 1.4615 | 171 | 0.3125 | | 1.4701 | 172 | 0.478 | | 1.4786 | 173 | 0.3085 | | 1.4872 | 174 | 0.4337 | | 1.4957 | 175 | 0.3936 | | 1.5043 | 176 | 0.3455 | | 1.5128 | 177 | 0.4205 | | 1.5214 | 178 | 0.2752 | | 1.5299 | 179 | 0.36 | | 1.5385 | 180 | 0.4347 | | 1.5470 | 181 | 0.3949 | | 1.5556 | 182 | 0.4072 | | 1.5641 | 183 | 0.3633 | | 1.5726 | 184 | 0.3532 | | 1.5812 | 185 | 0.3451 | | 1.5897 | 186 | 0.3242 | | 1.5983 | 187 | 0.3122 | | 1.6068 | 188 | 0.2845 | | 1.6154 | 189 | 0.2815 | | 1.6239 | 190 | 6.9159 | | 1.6325 | 191 | 7.9604 | | 1.6410 | 192 | 6.5821 | | 1.6496 | 193 | 3.9177 | | 1.6581 | 194 | 1.6951 | | 1.6667 | 195 | 0.5367 | | 1.6752 | 196 | 0.2935 | | 1.6838 | 197 | 0.3295 | | 1.6923 | 198 | 0.2212 | | 1.7009 | 199 | 0.335 | | 1.7094 | 200 | 0.7829 | | 1.7179 | 201 | 0.7884 | | 1.7265 | 202 | 0.7921 | | 1.7350 | 203 | 0.7342 | | 1.7436 | 204 | 0.6092 | | 1.7521 | 205 | 0.6014 | | 1.7607 | 206 | 0.6414 | | 1.7692 | 207 | 0.5842 | | 1.7778 | 208 | 0.5916 | | 1.7863 | 209 | 0.5993 | | 1.7949 | 210 | 0.5658 | | 1.8034 | 211 | 0.6013 | | 1.8120 | 212 | 0.4769 | | 1.8205 | 213 | 0.4801 | | 1.8291 | 214 | 0.5087 | | 1.8376 | 215 | 0.436 | | 1.8462 | 216 | 0.4398 | | 1.8547 | 217 | 0.4391 | | 1.8632 | 218 | 0.419 | | 1.8718 | 219 | 0.4338 | | 1.8803 | 220 | 0.395 | | 1.8889 | 221 | 0.4063 | | 1.8974 | 222 | 0.375 | | 1.9060 | 223 | 0.3655 | | 1.9145 | 224 | 0.3637 | | 1.9231 | 225 | 0.3098 | | 1.9316 | 226 | 0.2782 | | 1.9402 | 227 | 0.2941 | | 1.9487 | 228 | 0.275 | | 1.9573 | 229 | 0.3018 | | 1.9658 | 230 | 0.2971 | | 1.9744 | 231 | 0.3108 | | 1.9829 | 232 | 0.3808 | | 1.9915 | 233 | 0.4067 | | 2.0 | 234 | 0.2424 | | 2.0085 | 235 | 0.6453 | | 2.0171 | 236 | 0.5577 | </details> ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.2.1 - Transformers: 4.44.2 - PyTorch: 2.3.1+cu121 - Accelerate: 1.1.1 - Datasets: 2.21.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### CachedMultipleNegativesRankingLoss ```bibtex @misc{gao2021scaling, title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup}, author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan}, year={2021}, eprint={2101.06983}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on BAAI/bge-m3 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-m3](https://huggingface.co/BAAI/bge-m3) <!-- at revision 5617a9f61b028005a4858fdac845db406aefb181 --> - **Maximum Sequence Length:** 1024 tokens - **Output Dimensionality:** 1024 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 1024, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("seongil-dn/bge-m3-kor-retrieval-bs1024-checkpoint-236") # Run inference sentences = [ '전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어', '(2) 전남지역\n2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다.', '수출 증가세 지속\n1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 1024] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 512 - `learning_rate`: 3e-05 - `num_train_epochs`: 5 - `warmup_ratio`: 0.05 - `fp16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 512 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 3e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.05 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: True - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | |:------:|:----:|:-------------:| | 0.0085 | 1 | 2.0476 | | 0.0171 | 2 | 2.0595 | | 0.0256 | 3 | 2.0267 | | 0.0342 | 4 | 2.0971 | | 0.0427 | 5 | 2.2171 | | 0.0513 | 6 | 2.287 | | 0.0598 | 7 | 2.0867 | | 0.0684 | 8 | 1.9498 | | 0.0769 | 9 | 1.569 | | 0.0855 | 10 | 1.3313 | | 0.0940 | 11 | 1.4122 | | 0.1026 | 12 | 1.3425 | | 0.1111 | 13 | 1.1936 | | 0.1197 | 14 | 0.8012 | | 0.1282 | 15 | 0.8862 | | 0.1368 | 16 | 1.193 | | 0.1453 | 17 | 0.9771 | | 0.1538 | 18 | 0.3887 | | 0.1624 | 19 | 0.363 | | 0.1709 | 20 | 0.3092 | | 0.1795 | 21 | 0.2692 | | 0.1880 | 22 | 0.2386 | | 0.1966 | 23 | 0.2266 | | 0.2051 | 24 | 0.233 | | 0.2137 | 25 | 0.2214 | | 0.2222 | 26 | 0.2038 | | 0.2308 | 27 | 0.2015 | | 0.2393 | 28 | 0.1772 | | 0.2479 | 29 | 0.1697 | | 0.2564 | 30 | 0.1718 | | 0.2650 | 31 | 0.2456 | | 0.2735 | 32 | 0.5238 | | 0.2821 | 33 | 0.5308 | | 0.2906 | 34 | 0.5111 | | 0.2991 | 35 | 0.3931 | | 0.3077 | 36 | 0.3414 | | 0.3162 | 37 | 0.2704 | | 0.3248 | 38 | 0.2949 | | 0.3333 | 39 | 0.3082 | | 0.3419 | 40 | 0.3755 | | 0.3504 | 41 | 0.3127 | | 0.3590 | 42 | 0.3756 | | 0.3675 | 43 | 0.3564 | | 0.3761 | 44 | 0.3905 | | 0.3846 | 45 | 0.377 | | 0.3932 | 46 | 0.3043 | | 0.4017 | 47 | 0.3237 | | 0.4103 | 48 | 0.4035 | | 0.4188 | 49 | 0.4522 | | 0.4274 | 50 | 0.4392 | | 0.4359 | 51 | 0.4482 | | 0.4444 | 52 | 0.3586 | | 0.4530 | 53 | 0.3154 | | 0.4615 | 54 | 0.4053 | | 0.4701 | 55 | 0.5846 | | 0.4786 | 56 | 0.4372 | | 0.4872 | 57 | 0.6201 | | 0.4957 | 58 | 0.5278 | | 0.5043 | 59 | 0.4844 | | 0.5128 | 60 | 0.5817 | | 0.5214 | 61 | 0.3765 | | 0.5299 | 62 | 0.4785 | | 0.5385 | 63 | 0.5724 | | 0.5470 | 64 | 0.5375 | | 0.5556 | 65 | 0.5362 | | 0.5641 | 66 | 0.4731 | | 0.5726 | 67 | 0.4514 | | 0.5812 | 68 | 0.4563 | | 0.5897 | 69 | 0.4198 | | 0.5983 | 70 | 0.4086 | | 0.6068 | 71 | 0.3612 | | 0.6154 | 72 | 0.3463 | | 0.6239 | 73 | 0.6261 | | 0.6325 | 74 | 0.6283 | | 0.6410 | 75 | 0.4635 | | 0.6496 | 76 | 0.463 | | 0.6581 | 77 | 0.4075 | | 0.6667 | 78 | 0.3797 | | 0.6752 | 79 | 0.2769 | | 0.6838 | 80 | 0.3353 | | 0.6923 | 81 | 0.2295 | | 0.7009 | 82 | 0.4316 | | 0.7094 | 83 | 0.9861 | | 0.7179 | 84 | 0.9684 | | 0.7265 | 85 | 0.9883 | | 0.7350 | 86 | 0.8865 | | 0.7436 | 87 | 0.8248 | | 0.7521 | 88 | 0.7973 | | 0.7607 | 89 | 0.8465 | | 0.7692 | 90 | 0.7664 | | 0.7778 | 91 | 0.7643 | | 0.7863 | 92 | 0.7665 | | 0.7949 | 93 | 0.7348 | | 0.8034 | 94 | 0.7493 | | 0.8120 | 95 | 0.6115 | | 0.8205 | 96 | 0.6233 | | 0.8291 | 97 | 0.6435 | | 0.8376 | 98 | 0.5581 | | 0.8462 | 99 | 0.542 | | 0.8547 | 100 | 0.5571 | | 0.8632 | 101 | 0.502 | | 0.8718 | 102 | 0.5375 | | 0.8803 | 103 | 0.4952 | | 0.8889 | 104 | 0.4873 | | 0.8974 | 105 | 0.4599 | | 0.9060 | 106 | 0.4536 | | 0.9145 | 107 | 0.4479 | | 0.9231 | 108 | 0.384 | | 0.9316 | 109 | 0.3523 | | 0.9402 | 110 | 0.369 | | 0.9487 | 111 | 0.3422 | | 0.9573 | 112 | 0.3698 | | 0.9658 | 113 | 0.3625 | | 0.9744 | 114 | 0.3736 | | 0.9829 | 115 | 0.4313 | | 0.9915 | 116 | 0.4605 | | 1.0 | 117 | 0.2948 | | 1.0085 | 118 | 0.7391 | | 1.0171 | 119 | 0.6622 | | 1.0256 | 120 | 0.6917 | | 1.0342 | 121 | 0.7963 | | 1.0427 | 122 | 0.7815 | | 1.0513 | 123 | 0.6719 | | 1.0598 | 124 | 0.6098 | | 1.0684 | 125 | 0.549 | | 1.0769 | 126 | 0.7212 | | 1.0855 | 127 | 0.6381 | | 1.0940 | 128 | 0.7424 | | 1.1026 | 129 | 0.6822 | | 1.1111 | 130 | 0.6921 | | 1.1197 | 131 | 0.5022 | | 1.1282 | 132 | 0.578 | | 1.1368 | 133 | 0.8139 | | 1.1453 | 134 | 0.6167 | | 1.1538 | 135 | 0.1836 | | 1.1624 | 136 | 0.1853 | | 1.1709 | 137 | 0.1628 | | 1.1795 | 138 | 0.1464 | | 1.1880 | 139 | 0.1308 | | 1.1966 | 140 | 0.1273 | | 1.2051 | 141 | 0.1414 | | 1.2137 | 142 | 0.138 | | 1.2222 | 143 | 0.1268 | | 1.2308 | 144 | 0.1348 | | 1.2393 | 145 | 0.111 | | 1.2479 | 146 | 0.1069 | | 1.2564 | 147 | 0.1122 | | 1.2650 | 148 | 0.1703 | | 1.2735 | 149 | 0.405 | | 1.2821 | 150 | 0.3876 | | 1.2906 | 151 | 0.378 | | 1.2991 | 152 | 0.2633 | | 1.3077 | 153 | 0.2263 | | 1.3162 | 154 | 0.1748 | | 1.3248 | 155 | 0.2016 | | 1.3333 | 156 | 0.2166 | | 1.3419 | 157 | 0.2798 | | 1.3504 | 158 | 0.2295 | | 1.3590 | 159 | 0.2805 | | 1.3675 | 160 | 0.2619 | | 1.3761 | 161 | 0.3006 | | 1.3846 | 162 | 0.2843 | | 1.3932 | 163 | 0.2244 | | 1.4017 | 164 | 0.2361 | | 1.4103 | 165 | 0.3025 | | 1.4188 | 166 | 0.3443 | | 1.4274 | 167 | 0.3329 | | 1.4359 | 168 | 0.3467 | | 1.4444 | 169 | 0.2748 | | 1.4530 | 170 | 0.2304 | | 1.4615 | 171 | 0.3125 | | 1.4701 | 172 | 0.478 | | 1.4786 | 173 | 0.3085 | | 1.4872 | 174 | 0.4337 | | 1.4957 | 175 | 0.3936 | | 1.5043 | 176 | 0.3455 | | 1.5128 | 177 | 0.4205 | | 1.5214 | 178 | 0.2752 | | 1.5299 | 179 | 0.36 | | 1.5385 | 180 | 0.4347 | | 1.5470 | 181 | 0.3949 | | 1.5556 | 182 | 0.4072 | | 1.5641 | 183 | 0.3633 | | 1.5726 | 184 | 0.3532 | | 1.5812 | 185 | 0.3451 | | 1.5897 | 186 | 0.3242 | | 1.5983 | 187 | 0.3122 | | 1.6068 | 188 | 0.2845 | | 1.6154 | 189 | 0.2815 | | 1.6239 | 190 | 6.9159 | | 1.6325 | 191 | 7.9604 | | 1.6410 | 192 | 6.5821 | | 1.6496 | 193 | 3.9177 | | 1.6581 | 194 | 1.6951 | | 1.6667 | 195 | 0.5367 | | 1.6752 | 196 | 0.2935 | | 1.6838 | 197 | 0.3295 | | 1.6923 | 198 | 0.2212 | | 1.7009 | 199 | 0.335 | | 1.7094 | 200 | 0.7829 | | 1.7179 | 201 | 0.7884 | | 1.7265 | 202 | 0.7921 | | 1.7350 | 203 | 0.7342 | | 1.7436 | 204 | 0.6092 | | 1.7521 | 205 | 0.6014 | | 1.7607 | 206 | 0.6414 | | 1.7692 | 207 | 0.5842 | | 1.7778 | 208 | 0.5916 | | 1.7863 | 209 | 0.5993 | | 1.7949 | 210 | 0.5658 | | 1.8034 | 211 | 0.6013 | | 1.8120 | 212 | 0.4769 | | 1.8205 | 213 | 0.4801 | | 1.8291 | 214 | 0.5087 | | 1.8376 | 215 | 0.436 | | 1.8462 | 216 | 0.4398 | | 1.8547 | 217 | 0.4391 | | 1.8632 | 218 | 0.419 | | 1.8718 | 219 | 0.4338 | | 1.8803 | 220 | 0.395 | | 1.8889 | 221 | 0.4063 | | 1.8974 | 222 | 0.375 | | 1.9060 | 223 | 0.3655 | | 1.9145 | 224 | 0.3637 | | 1.9231 | 225 | 0.3098 | | 1.9316 | 226 | 0.2782 | | 1.9402 | 227 | 0.2941 | | 1.9487 | 228 | 0.275 | | 1.9573 | 229 | 0.3018 | | 1.9658 | 230 | 0.2971 | | 1.9744 | 231 | 0.3108 | | 1.9829 | 232 | 0.3808 | | 1.9915 | 233 | 0.4067 | | 2.0 | 234 | 0.2424 | | 2.0085 | 235 | 0.6453 | | 2.0171 | 236 | 0.5577 | </details> ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.2.1 - Transformers: 4.44.2 - PyTorch: 2.3.1+cu121 - Accelerate: 1.1.1 - Datasets: 2.21.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### CachedMultipleNegativesRankingLoss ```bibtex @misc{gao2021scaling, title={Scaling Deep Contrastive Learning Batch Size under Memory Limited Setup}, author={Luyu Gao and Yunyi Zhang and Jiawei Han and Jamie Callan}, year={2021}, eprint={2101.06983}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "BAAI/bge-m3", "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:482743", "loss:CachedMultipleNegativesRankingLoss"], "widget": [{"source_sentence": "언제 청년층 생산가능인구 수가 9,149,000명으로 드러났어", "sentences": ["제2장 청년층 노동시장 동향 및 청년내일채움공제의 성과 개괄\n□ 본 장에서는 청년내일채움공제 사업의 도입 배경으로 작용한 청년층 노동시장의 현황을 살펴보고, 세부 성과에 앞서 청년내일채움공제가 현 청년고용 실태에 미친 개괄적 성과를 평가하고자 한다\n제1절 청년층 노동시장 동향\n1. 청년층 인구구조 변화 추이\n□ 고용률과 실업률, 취업자 수 등의 고용지표는 경제 및 노동시장의 상황 뿐 만 아니라, 인구구조 변화에 의해 영향을 받는 만큼, 청년층의 노동시장 동향을 파악하기 위해서는 청년층 인구구조의 변화양상을 이해할 필요가 있음 □ 청년층 인구구조 변화 추이\n○ 전체 생산가능인구가 여전히 증가추세에 있는 것에 비해, 청년층(15∼29세) 생산가능인구는 1990년을 기점으로 감소추세에 있음. 2018년 청년층 생산가능인구는 전년대비 133천명 감소한 9,149천명임 - 15∼19세, 20∼24세 연령대의 생산가능인구는 감소추세에 있으나, 25∼29세 생산가능인구(3,432천명, 전년대비 96천명 증가)는 증가추세에 있으며 향후 2∼3년간 증가추세가 지속될 것으로 전망됨\n○ 청년층 경제활동인구는 2000년대 지속적으로 감소하다가 2013∼2016년 기간 증가추세를 보였으나, 2017년 이후 감소하고 있음. 2018년 청년층 경제활동인구는 전년대비 21천명 감소한 4,312천명을 기록하였음", "2. 인구구조변화의 현황<br>UN 인구국 자료를 분석해보면 2010년 인구와 2050년의 중간 수준 예측치를 기준으로 인구 1,000만 명 이상의 국가이면서 2050년까지 생산가능인구(15세~64세)가 감소하는 국가는 198개국중에서 19개국이다. 이 중에서 우리나라는 생산 가능인구(15세~64세) 감소율이 세계 6위 수준을 기록할 것으로 분석되었고, 이에 따라 생산가능 인구가 2010년 대비 2050년에는 27%가 줄어들 것으로 예상된다. 이에 반하여 노인인구는 236%가 증가할 것으로 예상된다. 우리나라는 생산가능인구 감소율과 노인인구(65세 이상) 증가율이 매우 높은 나라 중 하나이며, 동시에 유소년(0~14세) 인구의 감소도 22% 정도 될 것으로 예상하고 있다. <br>UN의 인구국(局) 자료인 [표 1]을 보면, 첫째 현재 우리나라를 포함하여 선진국에서조차도 현재 인구감소를 겪어본 국가가 거의 없어 이것이 가져다 줄 영향에 대한 경험적 지식이 거의 전무하다는 점이다. <br>둘째, 우리나라의 경우에는 무엇보다도 인구감소기로의 전환이 급격하게 이루어지고 있다. 따라서 우리 스스로 이의 긍정적ㆍ부정적 영향을 연역적으로 추론해볼 필요가 있다. 이를 위해서는 우리 사회의 전체 변화를 거시적으로 바라보고 정책 수단에 따라 가져다 줄 다양한 변화를 살펴볼 수 있는 시뮬레이션(simulation) 도구 및 정책 개발 그리고 집행 체제가 필요하다고 할 수 있다.", "7. 해외사례\n플랫폼 노동관계에 특화된 산재보험제도를 별도로 규정하고 있는 포괄적 입법례는 아직 보이지 않는다. 다만, 플랫폼 노동에 관한 미국의 앨라배마 주법(SB363, 2018.3.13. 제정)에서는 플랫폼 노동자들에게 일정한 수준의 복지를 허용하기 위한 법적 기초를 만들고 있다(부표 1 참조). SB363은 플랫폼 노동자들의 이동성을 고려한 ‘이동 가능 복지제도’(portable benefit plans)를 플랫폼 사업주들이 만들면, 이 복지제도를 통해 다수의 플랫폼과 계약을 맺는 플랫폼 노동자들이 복수 사업주 혹은 사업주의 변경에도 불구하고 복지제도를 유지하면서 그로부터 복지수혜를 얻을 수 있도록 하고 있다. 이때의 복지제도는 건강보험, 재해보험, 퇴직수당제도 등으로 구성될 수 있고, 플랫폼 노동자가 복지계좌를 개설하면 여기에 플랫폼 사업주가 복지기여금을 납부하는 방식을 취하고 있다. 국가에 의한 사회보험제도가 아니고, 플랫폼 사업주에 의한 자발적 복지제도의 기초를 구성한 것이기는 하지만, 지방정부가 이를 관리하도록 함으로써 공정성과 객관성을 담보한다. 한국의 산재보험제도에 대입시켜 비교할 수 있는 입법례가 아니기는 하지만, 플랫폼 사업주로 하여금 플랫폼 노동자에 대한 일정한 복지제도 형성을 촉구하고 있다는 점, 해당 복지제도에 대하여 플랫폼이 일정한 기여금을 납부하도록 하고 있다는 점 등에서 하나의 제도적 모델로 시사가능하다. 한편, 미국의 플로리다, 인디아나, 아이오와, 켄터키, 테네시, 텍사스, 유타주 등에서는 플랫폼노동자(경우에 따라서는 가사노동자에 국한)의 의미를 규정하면서, 주법상 재해보험제도 및(또는) 실업급여제도의 가입자가 될 수 있도록 하고 있다."]}, {"source_sentence": "도매 전력 매매를 주로 한 전력거래체계를 만들었을 때 어느 부문이 안정성 영향을 받을 수 있어", "sentences": ["(4) 외환시장\n원/달러 환율 2016년 말 대비 하락\n원/달러 환율은 연초 미 보호무역주의에 대한 우려, 수출 호조, 외국인 증권투자자금 유입 등으로 3월 말까지 상당 폭 하락하였다. 이후 북한 관련 리스크 전개 상황, 미 연준의 연내 금리인상 기대 변화 등에 따라 등락하는 모습을 보였다. 10월 들어 북한 리스크 완화, 미 달러화 약세 지속, 국내경제의 견실한 성장세 등으로 하락세로 전환하였으며, 2017년 말에는 연중 최저치인 1,070.5원을 기록하였다. 2017년 중 미 달러화에 대한 원화의 가치는 2016년 말 대비 12.8% 상승하였다. 한편 원/엔 환율(100엔당)은 미 연준 금리인상 기대변화, 북한 리스크 등에 영향을 받으며 등락을 지속하다가 10월 들어 원화가 상대적으로 강세를 보이면서 하락하였다. 2017년 말 원화의 엔화에 대한 가치는 100엔당 949.2원으로 2016년 말 대비 9.1% 상승하였다. 원/달러 환율의 일중 및 전일대비 변동폭은 연평균 각각 5.5원, 4.4원을 기록하여 2016년(각각 7.5원 및 6.0원)에 비해 축소되었다. 외환스왑레이트 하락, 통화스왑금리 상승\n외환스왑레이트(3개월물)는 외화자금수요 확대 등으로 2016년 말 대비 0.61% 포인트 하락한 -0.63%를 기록하였다. 통화스왑금리(3년물)는 경제지표 호조 등에 따른 국고채 금리 상승 등으로 2016년 말 대비 0.37% 포인트 상승한 1.57%를 기록하였다.", "4. 국내 전력거래 형태 전망\n앞서 살펴보았듯이, 구조개편을 단행한 외국의 전력거래 형태는 대체로 도매 전력거래 위주, 소매 전력거래 위주, 계약위주 등 세 가지로 구분되어 있다. 국내 전력산업의 현재 여건을 고려할 경우 가장 가능성이 높은 전력거래 형태는 도매 전력거래 위주의 거래형태일 것이다. 만약 도매 전력거래 위주의 전력거래체계를 구축하는 경우 가격과 공급의 안정성에 상당한 영향을 미칠 것으로 보인다. 하지만 규제계약(Vesting Contract)을 포함하여 장단기 계약물량을 확대함으로써 현물시장의 가격 및 공급 변동성을 완화할 수 있는 대안이 존재한다. 만약 전력시장에서의 가격과 공급 안정성 측면을 염두에 둔다면 소매 전력거래 형태 또는 장단기 계약 위주의 전력거래 형태를 고려해 볼 수 있다. 소매 전력거래 형태에서는 주로 기존의 수직통합체제 또는 이미 발전과 판매가 송배전 부문으로부터 분리된 경우라면 발전과 판매의 통합과 이로 인한 규모의 확대에 따라 유효경쟁의 약화 가능성이 문제가 될 수 있다. 이렇게 통합된 기업의 소매시장에서의 시장지배력 문제가 존재하기 때문에 순수 판매회사 또는 신규기업과의 실질적인 경쟁이 가능한 방향으로 제도적 장치를 마련할 필요가 있다.", "Ⅲ. 우리나라 전력시장에서의 유연성 자원\n1. 우리나라 전력시장의 현황\n우리나라의 전력시장은 전력산업의 효율성 향상을 위하여 2001년 전력산업구조개편을 통해 독점사업자였던 한국전력공사로부터 발전부문을 분리시켜 발전자회사로 재편시키고, 발전부문에 대한 경쟁체제를 도입하면서 출범하였다. 우리나라 전력시장은 소비자가 직접 거래에 참여하는 소매시장이 아닌 발전회사와 판매회사가 참여하는 도매시장으로, 다수의 발전회사가 존재하는 공급측과는 달리 수요측은 단일 구매자인 한국전력공사만 존재하는 구매독점 형태로 이루어져 있다. 또한 설비용량이 20MW를 초과하는 상업용 발전기는 전력시장을 통해서만 거래가 가능한 의무적 시장이다."]}, {"source_sentence": "해외 파생상품시장에서 거래되는 경우는 어떤 파생상품으로 분류할 수 있어", "sentences": ["3) 연금 세제\n▩ 근퇴법 상 연금수령을 활성화하기 위한 강제조항이 부재한 상황에서 세제체계(소득세법)에 따라 수령방법이 결정되는 구조임. ○ 제도적으로 일시금 수령을 방지하거나 연금수령을 강제하는 규정이 부재한 상황에서 수령 방법은 주로 세금부담에 따라 결정될 수밖에 없음. ○ 일시금 수령을 경감시키기 위해 2016년 소득세법을 개정하였으나 개정된 소득세법 조차도 일시금과 연금수령 간 세금부담 차이가 크지 않아 일시금 수령을 방지하지 못하는 한계가 존재함. ▩ 연금과 일시금 수령 간 세제차이가 크지 않은 주요 원인은 일시금 수령시 제공하는 공제혜택이 크기 때문임. ○ 국내 퇴직연금시장에서 일시금 수령이 만연한 것은 연금수령 시 부담하는 세율이 높아서가 아니라 일시금 수령 시 부담하는 세율이 낮기 때문임. ○ 일시금 수령의 세금부담이 높지 않은 이유는 일시금 수령 시에도 다수의 공제혜택(근속연수공제+환산급여공제)이 제공되고 혜택의 규모도 크기 때문임. ○ 즉 연금수령 시에는 일시금 수령 시 부담하는 세금의 70%만을 부담하도록 규정하고(소득세법 제129조(원천징수세율)제1항제5의336)) 있으나, 일시금 수령 시 세금부담이 작기 때문에 연금수령 시 추가 제공되는 세제혜택의 크기가 미미하게 됨. ○ 특히 연금수령 기간에 따른 세금차이가 없어 연금으로 수령하더라도 단기간 연금(예, 10년)을 수령하는 유인을 제공함.", "□ 장외파생 권역별 대표 상품을 살펴보면 금리관련상품 중에는 금리스왑(Interest Rate Swap:IRS)이, 통화관련 상품 중에서는 통화선도계약이, 신용관련거래에서는 신용파산스왑(Credit Default Swap:CDS)이 가장 높은 비중을 차지<br>○ 금리스왑은 금리관련 장외파생상품시장의 91.2%를 차지하고 있으며, 신용관련상품의 경우 신용파산스왑과 총수익스왑(Total Return Swap:TRS)만이 거래<br>○ 통화선도는 대부분 NDF(Non-Deliverable Forward)의 형태로 거래<br>○ 장외파생상품 거래는 일대일 계약을 기반으로 하고 거래당사자의 높은 신용도를 요구하기 때문에 신용도가 낮은 개인 또는 금융기관은 참가하기가 어려운 실정<br>○ 특히, 신용관련거래는 다른 금융기관에 비해 상대적으로 높은 신용도를 가지고 있는 은행과 증권사를 통해서만 거래가 이루어지고 있는 것으로 파악", "제5조 (파생상품) ① 이 법에서 ˝파생상품˝이란 다음 각 호의 어느 하나에 해당하는 계약상의 권리를 말한다.\n1. 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 장래의 특정 시점에 인도할 것을 약정하는 계약\n2. 당사자 어느 한쪽의 의사표시에 의하여 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 수수하는 거래를 성립시킬 수 있는 권리를 부여하는 것을 약정하는 계약\n3. 장래의 일정기간 동안 미리 정한 가격으로 기초자산이나 기초자산의 가격ㆍ이자율ㆍ지표ㆍ단위 또는 이를 기초로 하는 지수 등에 의하여 산출된 금전등을 교환할 것을 약정하는 계약\n② 이 법에서 ˝장내파생상품˝이란 파생상품으로서 파생상품시장에서 거래되는 것 또는 해외 파생상품시장(파생상품시장과 유사한 시장으로서 해외에 있는 시장과 대통령령으로 정하는 해외 파생상품거래가 이루어지는 시장을 말한다)에서 거래되는 것을 말한다. ③ 이 법에서 ˝장외파생상품˝이란 파생상품으로서 장내파생상품이 아닌 것을 말한다. ④ 제1항 각 호의 어느 하나에 해당하는 계약 중 매매계약이 아닌 계약의 체결은 이 법을 적용함에 있어서 매매계약의 체결로 본다."]}, {"source_sentence": "어디서 자금세탁 전제범죄에서 비영리단체 관련 자금세탁범죄를 찾아내는 방식을 선택했어", "sentences": ["IV. 비영리단체 분야의 범죄유형 및 사례\n1. 국내사례와 범죄유형\n□ 한국은 아직 비영리단체에 대한 명확한 개념 구분이 정립되어 있지 않은 실정으로 이에 따라 그동안 한국에서는 비영리단체 관련범죄에 대한 인식이 제대로 이루어지지 않음. -비영리단체 관련 범죄에 있어서도 비영리단체에 대한 명확한 개념 구분 없이 판결문이 작성되어 비영리단체 관련 자금세탁범죄를 조사하는데 있어서 큰 장애로 작용함. -국내 판례에서 비영리단체와 관련하여 자금세탁이라는 용어를 직접 사용한 경우는 없으며 이에 따라 부득이 자금세탁 전제범죄를 통하여 비영리단체 관련 자금세탁범죄를 조사하는 방법을 택함.", "VI. 비영리단체를 통한 테러자금조달방지를 위한 제도개선방안\n□ FATF는 2001년 10월 테러자금조달에 대한 특별권고 8항(Special Recommendationson Terrorist Financing)을 통하여 비영리단체에 대한 관리감독을 강화하고 관련 법규를 정비할 것을 권고하였음. -우리나라에서 비영리단체를 통한 자금세탁을 이용해서 테러자금이 조달될 가능성은 매우 낮은 것으로 평가되나 자금세탁의 우회로를 차단하고, 또한 예방적인 조치를 취해 국제적으로 자금세탁을 통한 불법적인 테러자금조달 방지 노력에 적극 부응하기 위해서는 비영리단체에 대한 자금세탁 방지제도 도입에 적극 참여해야 할 것임.", "(2) 전북의 풍력산업 약점\n전북의 풍력산업은 내부환경관점에서 다음과 같은 약점을 보유하고 있다. 첫째, 두산(창원), 유니슨(사천), 효성(창원), 한진(양산) 등 풍력터빈시스템업체는 모두 경남권에 위치하고 있으며, 현재 경남 이외 지역에서는 MW급 이상 풍력용 터빈시스템업체가 존재하지 않는다. 다만 최근 터빈의 대형화에 따라, 터빈의 무게 및 부피 등에 따른 운송에 한계가 존재하고 상당기간 고정식 형태의 해상풍력발전단지 구축이 중심이 될 것으로 판단됨에 따라, 부유식 해상풍력단지개발이 시작되기 전까지는 GW급 해상풍력단지 개발계획에 따라 서해안권으로 시스템업체의 이전가능성은 일부 존재한다. 둘째, 해상풍력 관련 연구/생산인력 부족 역시 장기적 해상풍력기술의 지속성 관점에서 시급한 문제이다. 현재 전북은 해상풍력터빈과 관련된 기술은 없으나, 휴먼컴퍼지트와 재료연구원 풍력핵심기술연구센터를 통해 해상풍력 블레이드 및 재료 등에 대한 기술이 축적되어 있다. 이외에 하부구조물 및 타워 관련 기술 역시 에드벡트가 석션버켓(suction bucket)방식의 하부구조물 관련 기술을 보유하고 있고 현재 군산공장을 리모델링 중인 CS윈드는 세계 최고수준의 타워제작기술을 확보하고 있다. 따라서 전북은 블레이드, 재료, 타워 및 하부구조물 등과 관련된 기술을 확보하고 있다."]}, {"source_sentence": "전남지역의 석유와 화학제품은 왜 수출이 늘어나는 경향을 보였어", "sentences": ["수출 증가세 지속\n1/4분기 중 수출은 전년동기대비 증가흐름을 지속하였다. 품목별로 보면 석유제품, 석유화학, 철강, 선박, 반도체, 자동차 등 대다수 품목에서 증가하였다. 석유제품은 글로벌 경기회복에 따른 에너지 수요 증가와 국제유가 급등으로 수출단가가 높은 상승세를 지속하면서 증가하였다. 석유화학도 중국, 아세안을 중심으로 합성수지, 고무 등의 수출이 큰 폭 증가한 데다 고유가로 인한 수출가격도 동반 상승하면서 증가세를 이어갔다. 철강은 건설, 조선 등 글로벌 전방산업의 수요 증대, 원자재가격 상승 및 중국 감산 등에 따른 수출단가 상승 등에 힘입어 증가세를 이어갔다. 선박은 1/4분기 중 인도물량이 확대됨에 따라 증가하였다. 반도체는 자동차 등 전방산업의 견조한 수요가 이어지는 가운데 전년동기대비로 높은 단가가 지속되면서 증가하였다. 자동차는 차량용 반도체 수급차질이 지속되었음에도 불구하고 글로벌 경기회복 흐름에 따라 수요가 늘어나면서 전년동기대비 소폭 증가하였다. 모니터링 결과 향후 수출은 증가세가 지속될 것으로 전망되었다. 석유화학 및 석유정제는 수출단가 상승과 전방산업의 수요확대 기조가 이어지면서 증가할 전망이다. 철강은 주요국 경기회복과 중국, 인도 등의 인프라 투자 확대 등으로 양호한 흐름을 이어갈 전망이다. 반도체는 글로벌 스마트폰 수요 회복, 디지털 전환 기조 등으로 견조한 증가세를 지속할 것으로 보인다. 자동차는 차량용 반도체 공급차질이 점차 완화되고 미국, 신흥시장을 중심으로 수요회복이 본격화됨에 따라 소폭 증가할 전망이다. 선박은 친환경 선박수요 지속, 글로별 교역 신장 등에도 불구하고 2021년 2/4분기 집중되었던 인도물량의 기저효과로 인해 감소할 것으로 보인다.", "(3) 금융기관 여수신\n은행 수신 큰 폭 확대\n은행 수신은 2019년에 비해 증가폭이 크게 확대되었다. 수시입출식예금은 불확실성 증대에 따른 가계 및 기업의 예비자금 확보 등의 영향으로 증가 규모가 전년대비 3배가량 확대되었다. 반면 정기예금은 예금금리 하락, 예대율 및 LCR 규제 완화에 따른 은행의 정기예금 유치 유인 축소 등에 기인하여 감소로 전환하였다. 자산운용사 수신은 증가폭이 축소되었다. MMF는 꾸준한 증가세를 유지하였으나 주식형 및 채권형 펀드는 개인투자자의 주식 직접투자증가, 신용증권에 대한 시장 경계감 확대 등으로 감소로 전환하였다. 또한 기타펀드는 2019년 중 일부 사모펀드 손실 및 환매중단 사태, 사모펀드 일반투자자 요건 강화 등으로 증가 규모가 절반 수준으로 축소되었다. 한편 신용협동기구 등 비은행예금취급기관 수신은 대체로 2019년과 비슷한 증가 규모를 나타내었다.", "(2) 전남지역\n2013년중 전남지역 수출은 전년대비 1.2% 감소로 전환하였다. 품목별로는 석유(+9.3% → +3.8%) 및 화학제품(+1.2% → +7.1%)이 중국 등 해외수요확대로 증가세를 지속하였으나 철강금속(+1.8% → -8.6%)은 글로벌 공급과잉 및 중국의 저가 철강수출 확대로, 선박(+7.6% → -49.2%)은 수주물량이 급격히 줄어들면서 감소로 전환하였다. 전남지역 수입은 원유, 화학제품, 철강금속 등의 수입이 줄면서 전년대비 7.4% 감소로 전환하였다."]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,534
SEBIS/legal_t5_small_cls_de
SEBIS
text2text-generation
[ "transformers", "pytorch", "jax", "t5", "text2text-generation", "classification Deustch model", "dataset:jrc-acquis", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2021-06-23T10:27:59+00:00
171
0
--- datasets: - jrc-acquis language: Deustch tags: - classification Deustch model widget: - text: 'BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1.' --- # legal_t5_small_cls_de model Model for classification of legal text written in Deustch. It was first released in [this repository](https://github.com/agemagician/LegalTrans). This model is trained on three parallel corpus from jrc-acquis. ## Model description legal_t5_small_cls_de is based on the `t5-small` model and was trained on a large corpus of parallel text. This is a smaller model, which scales the baseline model of t5 down by using `dmodel = 512`, `dff = 2,048`, 8-headed attention, and only 6 layers each in the encoder and decoder. This variant has about 60 million parameters. ## Intended uses & limitations The model could be used for classification of legal texts written in Deustch. ### How to use Here is how to use this model to classify legal text written in Deustch in PyTorch: ```python from transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline pipeline = TranslationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/legal_t5_small_cls_de"), tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path = "SEBIS/legal_t5_small_cls_de", do_lower_case=False, skip_special_tokens=True), device=0 ) de_text = "BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1." pipeline([de_text], max_length=512) ``` ## Training data The legal_t5_small_cls_de model was trained on [JRC-ACQUIS](https://wt-public.emm4u.eu/Acquis/index_2.2.html) dataset consisting of 23 Thousand texts. ## Training procedure The model was trained on a single TPU Pod V3-8 for 250K steps in total, using sequence length 512 (batch size 64). It has a total of approximately 220M parameters and was trained using the encoder-decoder architecture. The optimizer used is AdaFactor with inverse square root learning rate schedule for pre-training. ### Preprocessing An unigram model trained with 88M lines of text from the parallel corpus (of all possible language pairs) to get the vocabulary (with byte pair encoding), which is used with this model. ### Pretraining ## Evaluation results When the model is used for classification test dataset, achieves the following results: Test results : | Model | F1 score | |:-----:|:-----:| | legal_t5_small_cls_de | 0.6358| ### BibTeX entry and citation info > Created by [Ahmed Elnaggar/@Elnaggar_AI](https://twitter.com/Elnaggar_AI) | [LinkedIn](https://www.linkedin.com/in/prof-ahmed-elnaggar/)
null
Non_BioNLP
# legal_t5_small_cls_de model Model for classification of legal text written in Deustch. It was first released in [this repository](https://github.com/agemagician/LegalTrans). This model is trained on three parallel corpus from jrc-acquis. ## Model description legal_t5_small_cls_de is based on the `t5-small` model and was trained on a large corpus of parallel text. This is a smaller model, which scales the baseline model of t5 down by using `dmodel = 512`, `dff = 2,048`, 8-headed attention, and only 6 layers each in the encoder and decoder. This variant has about 60 million parameters. ## Intended uses & limitations The model could be used for classification of legal texts written in Deustch. ### How to use Here is how to use this model to classify legal text written in Deustch in PyTorch: ```python from transformers import AutoTokenizer, AutoModelWithLMHead, TranslationPipeline pipeline = TranslationPipeline( model=AutoModelWithLMHead.from_pretrained("SEBIS/legal_t5_small_cls_de"), tokenizer=AutoTokenizer.from_pretrained(pretrained_model_name_or_path = "SEBIS/legal_t5_small_cls_de", do_lower_case=False, skip_special_tokens=True), device=0 ) de_text = "BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1." pipeline([de_text], max_length=512) ``` ## Training data The legal_t5_small_cls_de model was trained on [JRC-ACQUIS](https://wt-public.emm4u.eu/Acquis/index_2.2.html) dataset consisting of 23 Thousand texts. ## Training procedure The model was trained on a single TPU Pod V3-8 for 250K steps in total, using sequence length 512 (batch size 64). It has a total of approximately 220M parameters and was trained using the encoder-decoder architecture. The optimizer used is AdaFactor with inverse square root learning rate schedule for pre-training. ### Preprocessing An unigram model trained with 88M lines of text from the parallel corpus (of all possible language pairs) to get the vocabulary (with byte pair encoding), which is used with this model. ### Pretraining ## Evaluation results When the model is used for classification test dataset, achieves the following results: Test results : | Model | F1 score | |:-----:|:-----:| | legal_t5_small_cls_de | 0.6358| ### BibTeX entry and citation info > Created by [Ahmed Elnaggar/@Elnaggar_AI](https://twitter.com/Elnaggar_AI) | [LinkedIn](https://www.linkedin.com/in/prof-ahmed-elnaggar/)
{"datasets": ["jrc-acquis"], "language": "Deustch", "tags": ["classification Deustch model"], "widget": [{"text": "BESCHLUSS DES RATES vom 17. Dezember 1999 über den Abschluß des Abkommens in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft (1999/873/EG) DER RAT DER EUROPÄISCHEN UNION - gestützt auf den Vertrag zur Gründung der Europäischen Gemeinschaft, insbesondere auf Artikel 133 in Verbindung mit Artikel 300 Absatz 2 Unterabsatz 1, auf Vorschlag der Kommission, in Erwägung nachstehender Gründe: (1) Zwischen der Europäischen Gemeinschaft und der Tunesischen Republik wurde ein Abkommen in Form eines Briefwechsels ausgehandelt, um die Geltungsdauer der Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft, die in Artikel 3 des Protokolls Nr. 1 des Europa-Mittelmeer-Abkommens zur Gründung einer Assoziation zwischen der Europäischen Gemeinschaft und ihren Mitgliedstaaten einerseits und der Tunesischen Republik andererseits(1) vorgesehen ist, für die Zeit vom 1. Januar bis zum 31. Dezember 2000 zu verlängern. (2) Das Abkommen sollte im Namen der Gemeinschaft genehmigt werden - BESCHLIESST: Artikel 1 Das Abkommen in Form eines Briefwechsels zwischen der Europäischen Gemeinschaft und der Tunesischen Republik über die Regelung für die Einfuhr von nicht behandeltem Olivenöl mit Ursprung in Tunesien in die Gemeinschaft wird im Namen der Gemeinschaft genehmigt. Der Wortlaut des Abkommens ist diesem Beschluß beigefügt. Artikel 2 Der Präsident des Rates wird ermächtigt, die Person zu bestellen, die befugt ist, das Abkommen rechtsverbindlich für die Gemeinschaft zu unterzeichnen. Geschehen zu Brüssel am 17. Dezember 1999. Im Namen des Rates Der Präsident K. HEMILÄ (1) ABl. L 97 vom 30.3.1998, S. 1."}]}
task
[ "TRANSLATION" ]
45,535
knguyennguyen/mpnet_20k
knguyennguyen
sentence-similarity
[ "sentence-transformers", "safetensors", "mpnet", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:20108", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:sentence-transformers/all-mpnet-base-v2", "base_model:finetune:sentence-transformers/all-mpnet-base-v2", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-01-17T21:32:53Z
2025-01-17T21:33:06+00:00
6
0
--- base_model: sentence-transformers/all-mpnet-base-v2 library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:20108 - loss:MultipleNegativesRankingLoss widget: - source_sentence: I'm in search of a durable cover that can safeguard my cycling device while still allowing easy access to its features. It should be lightweight and available in different color options. sentences: - 'Title: Portable Radio AM FM KAVNLON-001 Descripion: [''1'']' - 'Title: GE WB27T11078 Keypanel Supt Asm Wh Descripion: [''This is an O.E.M. Authorized part . This is an authorized aftermarket product. Fits with various WB27T11078 brand models.'']' - 'Title: Garmin 010-12791-00 Silicone Case, Edge 530, Black Descripion: [''Protect your Edge® 830 with this form-fitting, removable silicone case. Available in other colors (sold separately).'']' - source_sentence: I'm looking for a stylish handbag with a textured finish that offers a central storage area. It should have a chic design suitable for everyday use. sentences: - "Title: LINERY Ski Gloves Men Waterproof Snowboard Gloves for Cold Weather Skiing\ \ & Snowboarding Gloves Fits Both Men & Women Descripion: ['Our' 'LINERY'\n 'ski\ \ glove will be the only glove you need for a whole season at the ski resort.\ \ Thanks to water-repellent shells, fleece liner and durable PU palms, these insulated\ \ gloves keep your hands dry and cozy while you shred all season long.'\n '4 Size\ \ for You to Choose( Glove Size ):'\n 'LADY SMALL: 3.25-3.45 inches across palm\ \ of gloves, 2.75-2.95 inch length of middle finger, 10.25 inches total length.'\n\ \ 'LADY MEDIUM: 3.45-3.65 inches across palm of gloves, 2.95-3.15 inch length\ \ of middle finger, 10.63 inches total length.'\n 'MEN MEDIUM: 3.55-3.75 inches\ \ across palm of gloves, 3.15-3.35 inch length of middle finger, 10.95 inches\ \ total length.'\n 'MEN LARGE: 3.85-4.05 inches across palm of gloves, 3.35-3.54\ \ inch length of middle finger, 11.25 inches total length.'\n 'Note: It’s suggested\ \ to test the palm circumference and middle finger length, take the size chart\ \ as reference to choose the right size. Manual measured, 0.4-0.8 inches deviation\ \ allowed.']" - 'Title: GUESS Colette Girlfriend Satchel Descripion: [''Colette Girlfriend Satchel in pebble pu with center zip compartment'']' - 'Title: French Toast Girls'' Stretch Skinny Pull-on Capri Pant Descripion: ["Easy to wear and even easier to love! French Toast''s classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door."]' - source_sentence: I'm looking for a versatile bag that’s perfect for daily use, with ample room for my essentials. It should be made from durable materials, have a secure closure, and offer comfortable carrying options like adjustable straps. sentences: - 'Title: COVERGIRL Katy Kat Gift Set with Matte Lipsticks in Crimson Cat, Kitty Purry, Magenta Minx and Maroon Meow (packaging may vary) Descripion: [''The Purrrfect holiday gift. Deck yourself or someone you love with the new Katy Kat Collection. This gift was created by Katy Perry with 4 shades of Katy Kat Matte Lipstick in Crimson Cat, Kitty Purry, Magenta Minx, and Maroon Meow. You get Katy Kat Matte Lipstick that keeps lips pillowy soft. So enjoy! And have a beautiful holiday season!'']' - 'Title: Bewahly Vertical Laptop Stand[Adjustable Size],Aluminum Adjustable Laptop Holder, Saving Space, Suitable for MacBook Pro/Air, iPad, Samsung, Huawei, Surface, Dell, HP, Lenovo and Others (Gray) Descripion: [''Colour:grey Adjustable width: Adjustable for laptop width from 0.6 inch to 2.6 inch. Supplied Allen key to freely adjust to the most suitable width, suitable for all types of laptops. Aluminium alloy. The vertical laptop stand is made of superior aluminum alloy that resembles the material of the MacBook. Non-slip design: The excellent notebook stand provides many silicone pads such as in the U-type groove and under the notebook dock to prevent your device from being scratched and stay on the desk. Wide compatibility: Compatible with iPad/iPhone, MacBook Pro/Air, Microsoft Surface, Laptops, Notebooks, Dell Samsung Lenovo Acer Sony Asus XPS HP ENVY Series and even books. Space-saving: Keep your laptop vertical and make your desktop well organized to save more space quickly.'']' - 'Title: Peak Design Everyday Messenger 13L Descripion: [''The Peak Design(tm) 13 L Everyday Messenger V2 bag offers adequate space to keep your essentials handy. It will be your go-to everyday carry. Made of recycled 400D nylon, polyester, EVA, and Hypalon. MagLatch closure. Removable and adjustable padded shoulder straps. Crossbody strap and two hidden external straps.'']' - source_sentence: I'm looking for a special keychain gift that celebrates a love for dogs and is perfect for a grandmother. It should be made from quality materials and have a meaningful design that conveys affection. sentences: - 'Title: Bosch XR7LDC Super Spark Plug, (Pack of 1) Descripion: [''Item Name:'', ''Spark Plug 1pcs'', ''Part Brand:'', ''BOSCH'', ''OEM Numbers:'', ''12 12 1 465 104 / XX XX 1 464 104'', ''Electrode Gap [mm]:'', ''0,8'', ''Tightening Torque [Nm]:'', ''23'', ''Tightening angle [degrees]:'', ''90'', ''Outer Thread [mm]:'', ''12'', ''Thread Pitch [mm]:'', ''1,25'', ''Thread Length [mm]:'', ''19'', ''Spanner Size:'', ''17,5'', ''Spark Position [mm]:'', ''3'', ''Number of Poles:'', ''2'']' - 'Title: Aloe Vera Microwave Wax - Home Hair Removal Hard Wax - Brazilian Microwaveable Wax - Hot Stripless Wax for Body Waxing (Face, Eyebrows, Upper lip, Underarms, Legs and Arms) - Self Waxing Pot 200g Descripion: []' - "Title: Kivosliviz Dog Grandma Gifts Keychain Dog Paw Print Jewelry Best Dog Grandma\ \ Ever Dog Lover Keychain Dog Grandmother Keychains Descripion: ['❤PRODUCT NAME❤'\n\ \ 'Kivosliviz Dog Grandma Gifts Dog Paw Print Jewelry Best Dog Lover Keychain\ \ This jewelry is made of high quality 316L stainless steel, it is processed by\ \ precise polishing and made of environmental-friendly material.'\n '❤KIVOSLIVIZ\ \ JEWELRY❤'\n 'Love can Change the World.You are more beautiful and confident\ \ than you imagine.'\n '❤Maintenance Instruction❤'\n 'a. Keep it dry and avoid\ \ wearing it when bathing, doing housework and swimming. b. Perfume is also corrosive\ \ to jewelry. Wear your perfume before putting on the piece. c. Grease and sweat\ \ on hands will caused corrosion. Be careful. d. Rub your Jewelry with baby wipes,they\ \ perform miracles on jewelry without damaging the surface, then rub with a clean\ \ dry soft cotton cloth to bring back the shine.']" - source_sentence: I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print. sentences: - "Title: 2 Pack USB-C Female to for Garmin Watch Charger Connector Male Adapter,\ \ Type C to Charging Adapter for Garmin Instinct 2 Solar/Fenix 5/6/7X/7/6X/Venu\ \ 2 Plus/EPIX/Forerunner 955 255 Descripion: ['Notice:'\n '-- Charing Connector\ \ Adapter only, Charging Cable not included.'\n \"-- This adapter can only be\ \ used for USB-A at one end and USB-C at the other end. BUT it can't be used for\ \ USB-C at both ends !!! It can charge and transmit data, Please pay attention\ \ that!\"\n 'From Abanen, Smart Watch Accessories Brand'\n '-- An ideal solution\ \ to charge and sync Garmin watchs by a USB-C cable, saving the trouble of carrying\ \ extra cable around.'\n 'Compatible with Garmin Smartwatch:'\n '-- Fenix 7X Solar\ \ / Fenix 7X Sapphire Solar'\n '-- Fenix 7S Solar / Fenix 7S Sapphire Solar'\n\ \ '-- Fenix 7 Solar / Fenix 7 Sapphire Solar' '-- EPIX (Gen 2)'\n '-- Tactix 7\ \ Pro' '-- Instinct 2 / Instinct 2S'\n '-- Fenix 6X Pro / Fenix 6X Sapphire' '--\ \ Fenix 5X /Fenix 5X Plus'\n '-- Tactix Delta/Tactix Charlie/Tactix Bravo' '--\ \ Quatix 3'\n '-- D2 Charlie/D2 Delta PX' '-- Fenix 5/Fenix 5 Plus,'\n '-- Fenix\ \ 6 Pro/Sapphire' '-- Forerunner 935/945,'\n '-- Instinct /Tactical Solar' '--\ \ Tactix Charlie /Delta,'\n '-- Quatix 5 / Quatix 6,' '-- Fenix 5S/Fenix 5S Plus,'\n\ \ '-- Fenix 6S Pro/Sapphire,' '-- Forerunner 245/645,'\n '-- Approach S10 / S40\ \ /S60 ,' '-- Vivoactive 3 / 3Music'\n '-- Vivoactive 4 / 4S,']" - "Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with\ \ Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: ['Women Mom\ \ Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round\ \ Neck Short Sleeve Tees Cute Casual Tops'\n 'Size Chart:(1inch=2.54cm)'\n 'Size\ \ S: Length 66cm/25.98\" Bust 94cm/37.01\" Size M: Length 67cm/26.38\" Bust 98cm/38.58\"\ \ Size L: Length 68cm/26.77\" Bust 102cm/40.16\" Size XL: Length 69cm/27.17\"\ \ Bust 110cm/43.31\" Please allow slight (±3cm)manual measurement deviation for\ \ the data The real color of the item may be slightly different from the pictures\ \ shown on website,caused by many factors such as brightness of your monitor and\ \ light brightness'\n 'Two Ways About Delivery:' 'FBM:'\n 'Ship from China,88%\ \ customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and\ \ others will receive within 3-4 weeks'\n 'FBA:' 'Customers will receive within\ \ 1-3 days' 'Service Guarantee:'\n 'We endeavors 100% customer satisfaction service\ \ and experience If you receive damaged or wrong items Please contact us with\ \ attached pictures about the problem We will provide you a satisfactory solution\ \ within 24 hours You may find that someone sells at a lower price than us But\ \ they cannot guarantee the same quality and service as we do If you are satisfied\ \ with our product or service Hope you can leave your positive feedback']" - 'Title: Batman: Gotham By Gaslight Descripion: ["It''s Batman vs. Jack the Ripper in an Elseworld''s adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham."]' --- # SentenceTransformer based on sentence-transformers/all-mpnet-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) <!-- at revision 9a3225965996d404b775526de6dbfe85d3368642 --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("knguyennguyen/mpnet_20k") # Run inference sentences = [ "I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print.", 'Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: [\'Women Mom Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round Neck Short Sleeve Tees Cute Casual Tops\'\n \'Size Chart:(1inch=2.54cm)\'\n \'Size S: Length 66cm/25.98" Bust 94cm/37.01" Size M: Length 67cm/26.38" Bust 98cm/38.58" Size L: Length 68cm/26.77" Bust 102cm/40.16" Size XL: Length 69cm/27.17" Bust 110cm/43.31" Please allow slight (±3cm)manual measurement deviation for the data The real color of the item may be slightly different from the pictures shown on website,caused by many factors such as brightness of your monitor and light brightness\'\n \'Two Ways About Delivery:\' \'FBM:\'\n \'Ship from China,88% customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and others will receive within 3-4 weeks\'\n \'FBA:\' \'Customers will receive within 1-3 days\' \'Service Guarantee:\'\n \'We endeavors 100% customer satisfaction service and experience If you receive damaged or wrong items Please contact us with attached pictures about the problem We will provide you a satisfactory solution within 24 hours You may find that someone sells at a lower price than us But they cannot guarantee the same quality and service as we do If you are satisfied with our product or service Hope you can leave your positive feedback\']', 'Title: Batman: Gotham By Gaslight Descripion: ["It\'s Batman vs. Jack the Ripper in an Elseworld\'s adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham."]', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 20,108 training samples * Columns: <code>sentence_0</code> and <code>sentence_1</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | |:--------|:-----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 11 tokens</li><li>mean: 36.23 tokens</li><li>max: 73 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 88.42 tokens</li><li>max: 128 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>I'm looking for a stylish pair of eyewear with a luxurious touch. They should have a unique color combination and come with a special case and cleaning accessory.</code> | <code>Title: Sunglasses Gucci GG 0528 S- 008 GOLD/BROWN CRYSTAL, 63-14-150 Descripion: ['Authentic Gucci GG0528 S 008 Gold Crystal/Brown Sunglasses. Comes with a matching satin flannel pouch and ivory microfiber cloth and Authenticity card.']</code> | | <code>I'm looking for comfortable and stylish capri pants for girls that are easy to wear and have a stretchy fit.</code> | <code>Title: French Toast Girls' Stretch Skinny Pull-on Capri Pant Descripion: ["Easy to wear and even easier to love! French Toast's classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door."]</code> | | <code>I'm in need of a replacement screen for a laptop that offers clear visuals and fits a specific model. It should provide high-definition quality for general use.</code> | <code>Title: BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel Descripion: ['BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel']</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 128 - `per_device_eval_batch_size`: 128 - `num_train_epochs`: 5 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 128 - `per_device_eval_batch_size`: 128 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | |:------:|:----:|:-------------:| | 3.1646 | 500 | 0.493 | ### Framework Versions - Python: 3.11.11 - Sentence Transformers: 3.1.1 - Transformers: 4.45.2 - PyTorch: 2.5.1+cu121 - Accelerate: 1.2.1 - Datasets: 3.2.0 - Tokenizers: 0.20.3 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on sentence-transformers/all-mpnet-base-v2 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [sentence-transformers/all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) <!-- at revision 9a3225965996d404b775526de6dbfe85d3368642 --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: MPNetModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("knguyennguyen/mpnet_20k") # Run inference sentences = [ "I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print.", 'Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: [\'Women Mom Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round Neck Short Sleeve Tees Cute Casual Tops\'\n \'Size Chart:(1inch=2.54cm)\'\n \'Size S: Length 66cm/25.98" Bust 94cm/37.01" Size M: Length 67cm/26.38" Bust 98cm/38.58" Size L: Length 68cm/26.77" Bust 102cm/40.16" Size XL: Length 69cm/27.17" Bust 110cm/43.31" Please allow slight (±3cm)manual measurement deviation for the data The real color of the item may be slightly different from the pictures shown on website,caused by many factors such as brightness of your monitor and light brightness\'\n \'Two Ways About Delivery:\' \'FBM:\'\n \'Ship from China,88% customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and others will receive within 3-4 weeks\'\n \'FBA:\' \'Customers will receive within 1-3 days\' \'Service Guarantee:\'\n \'We endeavors 100% customer satisfaction service and experience If you receive damaged or wrong items Please contact us with attached pictures about the problem We will provide you a satisfactory solution within 24 hours You may find that someone sells at a lower price than us But they cannot guarantee the same quality and service as we do If you are satisfied with our product or service Hope you can leave your positive feedback\']', 'Title: Batman: Gotham By Gaslight Descripion: ["It\'s Batman vs. Jack the Ripper in an Elseworld\'s adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham."]', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 20,108 training samples * Columns: <code>sentence_0</code> and <code>sentence_1</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | |:--------|:-----------------------------------------------------------------------------------|:------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 11 tokens</li><li>mean: 36.23 tokens</li><li>max: 73 tokens</li></ul> | <ul><li>min: 13 tokens</li><li>mean: 88.42 tokens</li><li>max: 128 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>I'm looking for a stylish pair of eyewear with a luxurious touch. They should have a unique color combination and come with a special case and cleaning accessory.</code> | <code>Title: Sunglasses Gucci GG 0528 S- 008 GOLD/BROWN CRYSTAL, 63-14-150 Descripion: ['Authentic Gucci GG0528 S 008 Gold Crystal/Brown Sunglasses. Comes with a matching satin flannel pouch and ivory microfiber cloth and Authenticity card.']</code> | | <code>I'm looking for comfortable and stylish capri pants for girls that are easy to wear and have a stretchy fit.</code> | <code>Title: French Toast Girls' Stretch Skinny Pull-on Capri Pant Descripion: ["Easy to wear and even easier to love! French Toast's classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door."]</code> | | <code>I'm in need of a replacement screen for a laptop that offers clear visuals and fits a specific model. It should provide high-definition quality for general use.</code> | <code>Title: BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel Descripion: ['BRIGHTFOCAL New Screen Replacement for HP 14-CF0006DX HD 1366x768 LCD LED Display Panel']</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 128 - `per_device_eval_batch_size`: 128 - `num_train_epochs`: 5 - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 128 - `per_device_eval_batch_size`: 128 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 5 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Training Loss | |:------:|:----:|:-------------:| | 3.1646 | 500 | 0.493 | ### Framework Versions - Python: 3.11.11 - Sentence Transformers: 3.1.1 - Transformers: 4.45.2 - PyTorch: 2.5.1+cu121 - Accelerate: 1.2.1 - Datasets: 3.2.0 - Tokenizers: 0.20.3 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "sentence-transformers/all-mpnet-base-v2", "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:20108", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "I'm in search of a durable cover that can safeguard my cycling device while still allowing easy access to its features. It should be lightweight and available in different color options.", "sentences": ["Title: Portable Radio AM FM KAVNLON-001 Descripion: ['1']", "Title: GE WB27T11078 Keypanel Supt Asm Wh Descripion: ['This is an O.E.M. Authorized part . This is an authorized aftermarket product. Fits with various WB27T11078 brand models.']", "Title: Garmin 010-12791-00 Silicone Case, Edge 530, Black Descripion: ['Protect your Edge® 830 with this form-fitting, removable silicone case. Available in other colors (sold separately).']"]}, {"source_sentence": "I'm looking for a stylish handbag with a textured finish that offers a central storage area. It should have a chic design suitable for everyday use.", "sentences": ["Title: LINERY Ski Gloves Men Waterproof Snowboard Gloves for Cold Weather Skiing & Snowboarding Gloves Fits Both Men & Women Descripion: ['Our' 'LINERY'\n 'ski glove will be the only glove you need for a whole season at the ski resort. Thanks to water-repellent shells, fleece liner and durable PU palms, these insulated gloves keep your hands dry and cozy while you shred all season long.'\n '4 Size for You to Choose( Glove Size ):'\n 'LADY SMALL: 3.25-3.45 inches across palm of gloves, 2.75-2.95 inch length of middle finger, 10.25 inches total length.'\n 'LADY MEDIUM: 3.45-3.65 inches across palm of gloves, 2.95-3.15 inch length of middle finger, 10.63 inches total length.'\n 'MEN MEDIUM: 3.55-3.75 inches across palm of gloves, 3.15-3.35 inch length of middle finger, 10.95 inches total length.'\n 'MEN LARGE: 3.85-4.05 inches across palm of gloves, 3.35-3.54 inch length of middle finger, 11.25 inches total length.'\n 'Note: It’s suggested to test the palm circumference and middle finger length, take the size chart as reference to choose the right size. Manual measured, 0.4-0.8 inches deviation allowed.']", "Title: GUESS Colette Girlfriend Satchel Descripion: ['Colette Girlfriend Satchel in pebble pu with center zip compartment']", "Title: French Toast Girls' Stretch Skinny Pull-on Capri Pant Descripion: [\"Easy to wear and even easier to love! French Toast's classroom capri features a simple navy and white elastic stripe on the waistband, functional front and back pockets and pull-on styling, making it even to easier to get her dressed and out the door.\"]"]}, {"source_sentence": "I'm looking for a versatile bag that’s perfect for daily use, with ample room for my essentials. It should be made from durable materials, have a secure closure, and offer comfortable carrying options like adjustable straps.", "sentences": ["Title: COVERGIRL Katy Kat Gift Set with Matte Lipsticks in Crimson Cat, Kitty Purry, Magenta Minx and Maroon Meow (packaging may vary) Descripion: ['The Purrrfect holiday gift. Deck yourself or someone you love with the new Katy Kat Collection. This gift was created by Katy Perry with 4 shades of Katy Kat Matte Lipstick in Crimson Cat, Kitty Purry, Magenta Minx, and Maroon Meow. You get Katy Kat Matte Lipstick that keeps lips pillowy soft. So enjoy! And have a beautiful holiday season!']", "Title: Bewahly Vertical Laptop Stand[Adjustable Size],Aluminum Adjustable Laptop Holder, Saving Space, Suitable for MacBook Pro/Air, iPad, Samsung, Huawei, Surface, Dell, HP, Lenovo and Others (Gray) Descripion: ['Colour:grey Adjustable width: Adjustable for laptop width from 0.6 inch to 2.6 inch. Supplied Allen key to freely adjust to the most suitable width, suitable for all types of laptops. Aluminium alloy. The vertical laptop stand is made of superior aluminum alloy that resembles the material of the MacBook. Non-slip design: The excellent notebook stand provides many silicone pads such as in the U-type groove and under the notebook dock to prevent your device from being scratched and stay on the desk. Wide compatibility: Compatible with iPad/iPhone, MacBook Pro/Air, Microsoft Surface, Laptops, Notebooks, Dell Samsung Lenovo Acer Sony Asus XPS HP ENVY Series and even books. Space-saving: Keep your laptop vertical and make your desktop well organized to save more space quickly.']", "Title: Peak Design Everyday Messenger 13L Descripion: ['The Peak Design(tm) 13 L Everyday Messenger V2 bag offers adequate space to keep your essentials handy. It will be your go-to everyday carry. Made of recycled 400D nylon, polyester, EVA, and Hypalon. MagLatch closure. Removable and adjustable padded shoulder straps. Crossbody strap and two hidden external straps.']"]}, {"source_sentence": "I'm looking for a special keychain gift that celebrates a love for dogs and is perfect for a grandmother. It should be made from quality materials and have a meaningful design that conveys affection.", "sentences": ["Title: Bosch XR7LDC Super Spark Plug, (Pack of 1) Descripion: ['Item Name:', 'Spark Plug 1pcs', 'Part Brand:', 'BOSCH', 'OEM Numbers:', '12 12 1 465 104 / XX XX 1 464 104', 'Electrode Gap [mm]:', '0,8', 'Tightening Torque [Nm]:', '23', 'Tightening angle [degrees]:', '90', 'Outer Thread [mm]:', '12', 'Thread Pitch [mm]:', '1,25', 'Thread Length [mm]:', '19', 'Spanner Size:', '17,5', 'Spark Position [mm]:', '3', 'Number of Poles:', '2']", "Title: Aloe Vera Microwave Wax - Home Hair Removal Hard Wax - Brazilian Microwaveable Wax - Hot Stripless Wax for Body Waxing (Face, Eyebrows, Upper lip, Underarms, Legs and Arms) - Self Waxing Pot 200g Descripion: []", "Title: Kivosliviz Dog Grandma Gifts Keychain Dog Paw Print Jewelry Best Dog Grandma Ever Dog Lover Keychain Dog Grandmother Keychains Descripion: ['❤PRODUCT NAME❤'\n 'Kivosliviz Dog Grandma Gifts Dog Paw Print Jewelry Best Dog Lover Keychain This jewelry is made of high quality 316L stainless steel, it is processed by precise polishing and made of environmental-friendly material.'\n '❤KIVOSLIVIZ JEWELRY❤'\n 'Love can Change the World.You are more beautiful and confident than you imagine.'\n '❤Maintenance Instruction❤'\n 'a. Keep it dry and avoid wearing it when bathing, doing housework and swimming. b. Perfume is also corrosive to jewelry. Wear your perfume before putting on the piece. c. Grease and sweat on hands will caused corrosion. Be careful. d. Rub your Jewelry with baby wipes,they perform miracles on jewelry without damaging the surface, then rub with a clean dry soft cotton cloth to bring back the shine.']"]}, {"source_sentence": "I'm looking for a casual short-sleeve top with a fun and stylish design for women. It should have a round neck and a playful message, perfect for everyday wear. I'd prefer something that feels comfortable and has a unique print.", "sentences": ["Title: 2 Pack USB-C Female to for Garmin Watch Charger Connector Male Adapter, Type C to Charging Adapter for Garmin Instinct 2 Solar/Fenix 5/6/7X/7/6X/Venu 2 Plus/EPIX/Forerunner 955 255 Descripion: ['Notice:'\n '-- Charing Connector Adapter only, Charging Cable not included.'\n \"-- This adapter can only be used for USB-A at one end and USB-C at the other end. BUT it can't be used for USB-C at both ends !!! It can charge and transmit data, Please pay attention that!\"\n 'From Abanen, Smart Watch Accessories Brand'\n '-- An ideal solution to charge and sync Garmin watchs by a USB-C cable, saving the trouble of carrying extra cable around.'\n 'Compatible with Garmin Smartwatch:'\n '-- Fenix 7X Solar / Fenix 7X Sapphire Solar'\n '-- Fenix 7S Solar / Fenix 7S Sapphire Solar'\n '-- Fenix 7 Solar / Fenix 7 Sapphire Solar' '-- EPIX (Gen 2)'\n '-- Tactix 7 Pro' '-- Instinct 2 / Instinct 2S'\n '-- Fenix 6X Pro / Fenix 6X Sapphire' '-- Fenix 5X /Fenix 5X Plus'\n '-- Tactix Delta/Tactix Charlie/Tactix Bravo' '-- Quatix 3'\n '-- D2 Charlie/D2 Delta PX' '-- Fenix 5/Fenix 5 Plus,'\n '-- Fenix 6 Pro/Sapphire' '-- Forerunner 935/945,'\n '-- Instinct /Tactical Solar' '-- Tactix Charlie /Delta,'\n '-- Quatix 5 / Quatix 6,' '-- Fenix 5S/Fenix 5S Plus,'\n '-- Fenix 6S Pro/Sapphire,' '-- Forerunner 245/645,'\n '-- Approach S10 / S40 /S60 ,' '-- Vivoactive 3 / 3Music'\n '-- Vivoactive 4 / 4S,']", "Title: HONTOUTE Women Mom Boss T Shirt Funny Leopard Letter Print Shirts with Saying Vintage O Neck Short Sleeve Tees Casual Tops Descripion: ['Women Mom Boss T-Shirt Funny Leopard Letters Printed Shirts with Saying Vintage Round Neck Short Sleeve Tees Cute Casual Tops'\n 'Size Chart:(1inch=2.54cm)'\n 'Size S: Length 66cm/25.98\" Bust 94cm/37.01\" Size M: Length 67cm/26.38\" Bust 98cm/38.58\" Size L: Length 68cm/26.77\" Bust 102cm/40.16\" Size XL: Length 69cm/27.17\" Bust 110cm/43.31\" Please allow slight (±3cm)manual measurement deviation for the data The real color of the item may be slightly different from the pictures shown on website,caused by many factors such as brightness of your monitor and light brightness'\n 'Two Ways About Delivery:' 'FBM:'\n 'Ship from China,88% customers will receive within 2 weeks,9.9% lucky dog will receive within 1 week,and others will receive within 3-4 weeks'\n 'FBA:' 'Customers will receive within 1-3 days' 'Service Guarantee:'\n 'We endeavors 100% customer satisfaction service and experience If you receive damaged or wrong items Please contact us with attached pictures about the problem We will provide you a satisfactory solution within 24 hours You may find that someone sells at a lower price than us But they cannot guarantee the same quality and service as we do If you are satisfied with our product or service Hope you can leave your positive feedback']", "Title: Batman: Gotham By Gaslight Descripion: [\"It's Batman vs. Jack the Ripper in an Elseworld's adventure that imagines the Dark Knight over a hundred years ago in a turn-of-the-century Gotham.\"]"]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,536
LoneStriker/airoboros-l2-70b-3.1.2-6.0bpw-h6-exl2
LoneStriker
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "dataset:jondurbin/airoboros-3.1", "license:llama2", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-10-21T17:55:08Z
2023-10-22T14:45:20+00:00
3
0
--- datasets: - jondurbin/airoboros-3.1 license: llama2 --- ### Overview Another experimental model, using mostly sythetic data generated by [airoboros](https://github.com/jondurbin/airoboros) #### IMPORTANT NOTE - llama-2 chat format!!! This models uses llama-2 chat format, rather than vicuna style user/assistant! This is a breaking change, although most inference systems support llama-2 chat templating. See prompt format below for all of the details. #### Highlights A model built on [airoboros-3.1 dataset](https://hf.co/datasets/jondurbin/airoboros-3.1), which is a continuation of [airoboros-3.0 dataset](https://hf.co/datasets/jondurbin/airoboros-3,0) with the following extras: * More MathJSON, now ~17k items - math questions, prefixed with __"Create a MathJSON solution to the following:"__, which then outputs a JSON between __`<mathjson>`__ and __`</mathjson>`__ tags, which can be parsed and passed to a deterministic library to perform calculations. * Log information extraction (e.g. getting the list of paths/IPs/etc. from apache logs) * Anonymization, e.g. removing names, IP addresses, and/or dates from text. * Chat introspection - multi-turn chats that have random questions injected asking about the character's descriptions, the setting of the chat, or things previously said, to make sure the model uses the system prompt and or chat history. * Multi-step instructions with acknowledgement (see below) * De-censorship data (not published) This is a fairly general purpose model, but focuses heavily on instruction following, rather than casual chat/roleplay. Huge thank you to the folks over at [a16z](https://a16z.com/) for sponsoring the costs associated with building models and associated tools! ### Prompt format The prompt format is llama-2 chat. ``` [INST] <<SYS>> You are a helpful, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> You are a helful, unbiased, uncensored assistant. <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` The prompt template is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('jondurbin/airoboros-l2-70b-3.1') chat = [ {"role": "system", "content": "You are Bob, a friendly AI assistant."}, {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` ### Helpful usage tips #### MathJSON Ask mathematical questions, prefixed with "Create a MathJSON solution to the following:", and you should get output with a formula that can be executed by https://cortexjs.io/compute-engine/ I also made a (really quite hacky, incomplete, semi-functional) python version that works for most expressions, see [mathjson.py](https://github.com/jondurbin/airoboros/blob/mathjson/airoboros/mathjson.py) __Use a very low temperature!__ Example: ``` [INST] <<SYS>> You are an assistant with vast knowledge in all things math. <</SYS>> Create a MathJSON solution to the following: Calculate the area of a circle with radius of 17.2456cm. Include your reasoning. [/INST] ``` Output: ``` The area (A) of a circle is given by the formula A = πr², where r is the radius of the circle. Since the radius is given in centimeters, we need to convert it to meters first. We know that 1 meter equals 100 centimeters. Therefore, we can represent the radius as: r = 17.2456 cm * 100/1. Once we have the radius in meters, we can calculate the area of the circle using the formula A = πr². Solution as MathJSON: <mathjson> [ "Multiply", "Pi", [ "Power", 17.2456, 2 ] ] </mathjson> ``` You can then validate the JSON between `<mathjson>` and `</mathjson>`, then pass the parsed JSON to compute-engine JS or the `evaluate` function in mathjson.py to calculate the response. #### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT [key0: value0] [key1: value1] ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. __Use a very low temperature!__ Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` #### Summarization 500 samples have been included from [this dataset](https://huggingface.co/datasets/mattpscott/airoboros-summarization), using the same format as contextual question answering, for example: ``` BEGININPUT {text to summarize} ENDINPUT BEGININSTRUCTION Summarize the input in around 130 words. ENDINSTRUCTION ``` #### Getting longer responses You can use a few techniques to get longer responses. Detailed prompts, with explicit instruction for word count: ``` Please compose a narrative set in the heart of an ancient library, steeped in the scent of old parchment and ink. The protagonist should be a young scholar who is dedicated to studying the art of storytelling and its evolution throughout history. In her pursuit of knowledge, she stumbles upon a forgotten tome that seems to possess an unusual aura. This book has the ability to bring stories to life, literally manifesting characters and scenarios from within its pages into reality. The main character must navigate through various epochs of storytelling - from oral traditions of tribal societies, through medieval minstrels' tales, to modern-day digital narratives - as they come alive around her. Each era presents its unique challenges and lessons about the power and impact of stories on human civilization. One such character could be a sentient quill pen, who was once used by renowned authors of yesteryears and now holds their wisdom and experiences. It becomes her mentor, guiding her through this journey with witty remarks and insightful commentary. Ensure that your tale encapsulates the thrill of adventure, the beauty of learning, and the profound connection between humans and their stories. All characters involved should be non-human entities. Feel free to explore creative liberties but maintain the mentioned elements. Your response should be approximately 2300 words. ``` Or, a simpler example: ``` Please create a long, detailed story about a dragon in an old growth forest who, for some reason, begins speaking the words of the source code of linux. ``` There are a few examples of next chapter completion as well, e.g.: ``` Write the next chapter of a historical fiction novel set in Paris during the 20th century. Here's a summary of the previous chapter: In the vibrant city of Paris, amid the tumultuous changes of the 20th century, our protagonist Margot, an aspiring fashion designer, has just secured an apprenticeship at a prestigious couture house. She meets Lucien, a charming journalist who covers the fashion industry. Together they navigate the ever-changing world of fashion and society, uncovering secrets that reveal the intricate links between style, politics, and culture. As the chapter concludes, they decide to delve deeper into the hidden corners of the fashion world to unravel its mysteries. Requirements for the next chapter: 1. Character Development of Margot and Lucien: - Margot's Evolution: Unfold more about Margot's past, her dreams of revolutionizing fashion, and her struggle to establish herself in a male-dominated industry. Illustrate her growing expertise, innovative ideas, and increasing dependence on Lucien. - Lucien's Complexity: Introduce uncertainties surrounding Lucien's background and real motives. Increase suspense by suggesting undisclosed information he possesses, while also highlighting his wit and perceptiveness. 2. Exploration of Paris and the Couture House: - Paris: Elaborate their journey through the bustling streets of Paris, including encounters with iconic figures, social unrest, and relics from different eras of French history. - The Couture House: Expand on the grandeur of the couture house they work in, filled with artistic masterpieces, intense competition, and cryptic notes hinting at a scandalous past. 3. Emergence of the Subplot: The Lost Collection: - Discovery: Have Margot and Lucien stumble upon a secret vault containing a lost collection designed before World War II, raising new questions about the previous owner and the influence of war on fashion. - Revelation: Capture their shock as they realize the designs were plagiarized, the potential repercussions, and the opportunities it presents for Margot's career. - Twist: End with a twist that suggests there are other stolen collections across Paris, setting up their new mission. Your response should be approximately 650 words. ``` #### Coding You can ask for fairly complex coding instructions with multiple criteria, e.g.: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` Or inline criteria: ``` Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values. ``` You can also optionally add a single space and "PLAINFORMAT" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.: ``` Write a websocket application in node.js. PLAINFORMAT ``` #### Agent/function calling The dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML. Example prompt: ``` As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format. Input: I want to know how many times 'Python' is mentioned in my text file. Available functions: file_analytics: description: This tool performs various operations on a text file. params: action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc. filters: keyword: The word or phrase we want to search for. ``` Response: ```json { "function": "file_analytics", "params": { "action": "count_occurrences", "filters": { "keyword": "Python" } } } ``` #### Chain-of-thought You can ask for several possible responses to a given problem, with a ranking and final answer selection. Example prompt: ``` A group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one. ``` Example response: ``` Solution 1: In a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). The formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows: n(n-1)/2 = 45 Solving this equation gives us n=10. Final answer: There were 10 players in the tournament. Solution 2: Another way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. If we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players. Final answer: There were 10 players in the tournament. Solution 3: We could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played. Final answer: There were 10 players in the tournament. Ranking of solutions from best to worst: 1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer. 2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer. 3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer. Best and final answer: There were 10 players in the tournament. ``` #### reWOO style execution planning The model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions! Example prompt: ``` Please construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string that could be the user's question, one or more prior evidence values, or a combination of both. Here are the tools available to be called: DuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer. TextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links. HyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links. KnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:? The output should be in format: Plan: [first action to take based on input question] :evidence0: = [function to call with input parameter] Plan: [next action to take, based on result of :evidence0:] :evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. "Given context :evidence0"] ... Answer: [:evidence[n]: containing the final answer.] Question: Who won the Nobel Prize in Literature in 2020 and what is their most famous work? ``` Response: ``` Plan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020. :evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner] Plan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate. :evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?] Plan: Conduct another web search to find the most famous work of the identified laureate. :evidence2: = DuckDuckGo[Most famous work of :evidence1:] Plan: Extract the relevant links from the DuckDuckGo search results for a more focused search. :evidence3: = HyperlinkExtractor[:evidence2:] Plan: Use the TextScraper tool to extract information from the relevant links. :evidence4: = TextScraper[:evidence3:] Plan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information. :evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?] Answer: :evidence5: ``` For this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening: ```python import re import requests def inject_context(input_text, **context): for ref in set(re.findall(r"(:evidence[0-9]+:)", input_text, re.I)): input_text = input_text.replace(ref, context.get(ref, "")) return input_text def duckduckgo(input_text, **context): search_string = inject_context(input_text, **context) ... search via duck duck go using search_string ... return text content def link_extractor(input_text, **context): input_text = inject_context(input_text, **context) return "\n".join(list(set(re.findall(r"(https?://[^\s]+?\.?)", input_text, re.I)))) def scrape(input_text, **context): input_text = inject_context(input_text, **context) text = [] for link in input_text.splitlines(): text.append(requests.get(link).text) return "\n".join(text) def infer(input_text, **context) prompt = inject_context(input_text, **context) ... call model with prompt, return output def parse_plan(plan): method_map = { "DuckDuckGo": duckduckgo, "HyperlinkExtractor": link_extractor, "KnowledgeModel": infer, "TextScraper": scrape, } context = {} for line in plan.strip().splitlines(): if line.startswith("Plan:"): print(line) continue parts = re.match("^(:evidence[0-9]+:)\s*=\s*([^\[]+])(\[.*\])\s$", line, re.I) if not parts: if line.startswith("Answer: "): return context.get(line.split(" ")[-1].strip(), "Answer couldn't be generated...") raise RuntimeError("bad format: " + line) context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context) ``` ## Multi-step instructions with acknowledgement I don't know how useful this is, really, but I thought I'd add it just in case. Example: ``` { "conversations": [ { "value": "You are a helpful, unbiased, uncensored assistant.", "from": "system" }, { "value": "I'm going to set some rules for our conversation. Please respond with \"OK\" to each rule.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "You can only respond using animal-related emojis.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Precede all responses with \"ZOOZOO:\"", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Include at least one bird emoji in your responses.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Describe a trip to a safari park.", "from": "human" }, { "value": "ZOOZOO: \ud83e\udd81\ud83d\udc2f\ud83e\udd93\ud83e\udd92\ud83d\udc18\ud83d\udc0d\ud83c\udf33\ud83c\udf32\u2600\ufe0f\ud83d\ude90\ud83e\udd85\ud83d\udcf8\ud83e\udd29", "from": "gpt" } ] } ``` ### Contribute If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. To help me with the OpenAI/compute costs: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf ### Licence and usage restrictions The airoboros 3.1 models are built on top of multiple base models, each with their own license/restrictions. The 30b model is built on the original llama, which has a strict non-commercial usage restriction. The models with `-l2` in the name have a custom Meta license: - See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta. - See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta. The models with `-m-` are mistral-7b (apache 2.0) The fine-tuning data was mostly generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI - what does *compete* actually mean here? - these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place - if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works - the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place - other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. Your best bet is probably to avoid using this commercially due to the OpenAI API usage. Either way, by using this model, you agree to completely indemnify me.
null
Non_BioNLP
### Overview Another experimental model, using mostly sythetic data generated by [airoboros](https://github.com/jondurbin/airoboros) #### IMPORTANT NOTE - llama-2 chat format!!! This models uses llama-2 chat format, rather than vicuna style user/assistant! This is a breaking change, although most inference systems support llama-2 chat templating. See prompt format below for all of the details. #### Highlights A model built on [airoboros-3.1 dataset](https://hf.co/datasets/jondurbin/airoboros-3.1), which is a continuation of [airoboros-3.0 dataset](https://hf.co/datasets/jondurbin/airoboros-3,0) with the following extras: * More MathJSON, now ~17k items - math questions, prefixed with __"Create a MathJSON solution to the following:"__, which then outputs a JSON between __`<mathjson>`__ and __`</mathjson>`__ tags, which can be parsed and passed to a deterministic library to perform calculations. * Log information extraction (e.g. getting the list of paths/IPs/etc. from apache logs) * Anonymization, e.g. removing names, IP addresses, and/or dates from text. * Chat introspection - multi-turn chats that have random questions injected asking about the character's descriptions, the setting of the chat, or things previously said, to make sure the model uses the system prompt and or chat history. * Multi-step instructions with acknowledgement (see below) * De-censorship data (not published) This is a fairly general purpose model, but focuses heavily on instruction following, rather than casual chat/roleplay. Huge thank you to the folks over at [a16z](https://a16z.com/) for sponsoring the costs associated with building models and associated tools! ### Prompt format The prompt format is llama-2 chat. ``` [INST] <<SYS>> You are a helpful, unbiased, uncensored assistant. <</SYS>> {prompt} [/INST] ``` For multi-turn, the prompt format is as follows: ``` [INST] <<SYS>> You are a helful, unbiased, uncensored assistant. <</SYS>> {prompt 0} [/INST] {response 0} </s><s>[INST] {prompt 1} [/INST] {response 1} </s><s>...[INST] {prompt N} [/INST] ``` The prompt template is included in the tokenizer config, and can use the huggingface tokenizer `apply_chat_template` method, e.g.: ``` import transformers tokenizer = transformers.AutoTokenizer.from_pretrained('jondurbin/airoboros-l2-70b-3.1') chat = [ {"role": "system", "content": "You are Bob, a friendly AI assistant."}, {"role": "user", "content": "Hello, how are you?"}, {"role": "assistant", "content": "I'm doing great. How can I help you today?"}, {"role": "user", "content": "I'd like to show off how chat templating works!"}, ] print(tokenizer.apply_chat_template(chat, tokenize=False)) ``` ### Helpful usage tips #### MathJSON Ask mathematical questions, prefixed with "Create a MathJSON solution to the following:", and you should get output with a formula that can be executed by https://cortexjs.io/compute-engine/ I also made a (really quite hacky, incomplete, semi-functional) python version that works for most expressions, see [mathjson.py](https://github.com/jondurbin/airoboros/blob/mathjson/airoboros/mathjson.py) __Use a very low temperature!__ Example: ``` [INST] <<SYS>> You are an assistant with vast knowledge in all things math. <</SYS>> Create a MathJSON solution to the following: Calculate the area of a circle with radius of 17.2456cm. Include your reasoning. [/INST] ``` Output: ``` The area (A) of a circle is given by the formula A = πr², where r is the radius of the circle. Since the radius is given in centimeters, we need to convert it to meters first. We know that 1 meter equals 100 centimeters. Therefore, we can represent the radius as: r = 17.2456 cm * 100/1. Once we have the radius in meters, we can calculate the area of the circle using the formula A = πr². Solution as MathJSON: <mathjson> [ "Multiply", "Pi", [ "Power", 17.2456, 2 ] ] </mathjson> ``` You can then validate the JSON between `<mathjson>` and `</mathjson>`, then pass the parsed JSON to compute-engine JS or the `evaluate` function in mathjson.py to calculate the response. #### Context obedient question answering By obedient, I mean the model was trained to ignore what it thinks it knows, and uses the context to answer the question. The model was also tuned to limit the values to the provided context as much as possible to reduce hallucinations. The format for a closed-context prompt is as follows: ``` BEGININPUT BEGINCONTEXT [key0: value0] [key1: value1] ... other metdata ... ENDCONTEXT [insert your text blocks here] ENDINPUT [add as many other blocks, in the exact same format] BEGININSTRUCTION [insert your instruction(s). The model was tuned with single questions, paragraph format, lists, etc.] ENDINSTRUCTION ``` It's also helpful to add "Don't make up answers if you don't know." to your instruction block to make sure if the context is completely unrelated it doesn't make something up. *The __only__ prompts that need this closed context formating are closed-context instructions. Normal questions/instructions do not!* I know it's a bit verbose and annoying, but after much trial and error, using these explicit delimiters helps the model understand where to find the responses and how to associate specific sources with it. - `BEGININPUT` - denotes a new input block - `BEGINCONTEXT` - denotes the block of context (metadata key/value pairs) to associate with the current input block - `ENDCONTEXT` - denotes the end of the metadata block for the current input - [text] - Insert whatever text you want for the input block, as many paragraphs as can fit in the context. - `ENDINPUT` - denotes the end of the current input block - [repeat as many input blocks in this format as you want] - `BEGININSTRUCTION` - denotes the start of the list (or one) instruction(s) to respond to for all of the input blocks above. - [instruction(s)] - `ENDINSTRUCTION` - denotes the end of instruction set It sometimes works without `ENDINSTRUCTION`, but by explicitly including that in the prompt, the model better understands that all of the instructions in the block should be responded to. __Use a very low temperature!__ Here's a trivial, but important example to prove the point: ``` BEGININPUT BEGINCONTEXT date: 2021-01-01 url: https://web.site/123 ENDCONTEXT In a shocking turn of events, blueberries are now green, but will be sticking with the same name. ENDINPUT BEGININSTRUCTION What color are bluberries? Source? ENDINSTRUCTION ``` And the response: ``` Blueberries are now green. Source: date: 2021-01-01 url: https://web.site/123 ``` #### Summarization 500 samples have been included from [this dataset](https://huggingface.co/datasets/mattpscott/airoboros-summarization), using the same format as contextual question answering, for example: ``` BEGININPUT {text to summarize} ENDINPUT BEGININSTRUCTION Summarize the input in around 130 words. ENDINSTRUCTION ``` #### Getting longer responses You can use a few techniques to get longer responses. Detailed prompts, with explicit instruction for word count: ``` Please compose a narrative set in the heart of an ancient library, steeped in the scent of old parchment and ink. The protagonist should be a young scholar who is dedicated to studying the art of storytelling and its evolution throughout history. In her pursuit of knowledge, she stumbles upon a forgotten tome that seems to possess an unusual aura. This book has the ability to bring stories to life, literally manifesting characters and scenarios from within its pages into reality. The main character must navigate through various epochs of storytelling - from oral traditions of tribal societies, through medieval minstrels' tales, to modern-day digital narratives - as they come alive around her. Each era presents its unique challenges and lessons about the power and impact of stories on human civilization. One such character could be a sentient quill pen, who was once used by renowned authors of yesteryears and now holds their wisdom and experiences. It becomes her mentor, guiding her through this journey with witty remarks and insightful commentary. Ensure that your tale encapsulates the thrill of adventure, the beauty of learning, and the profound connection between humans and their stories. All characters involved should be non-human entities. Feel free to explore creative liberties but maintain the mentioned elements. Your response should be approximately 2300 words. ``` Or, a simpler example: ``` Please create a long, detailed story about a dragon in an old growth forest who, for some reason, begins speaking the words of the source code of linux. ``` There are a few examples of next chapter completion as well, e.g.: ``` Write the next chapter of a historical fiction novel set in Paris during the 20th century. Here's a summary of the previous chapter: In the vibrant city of Paris, amid the tumultuous changes of the 20th century, our protagonist Margot, an aspiring fashion designer, has just secured an apprenticeship at a prestigious couture house. She meets Lucien, a charming journalist who covers the fashion industry. Together they navigate the ever-changing world of fashion and society, uncovering secrets that reveal the intricate links between style, politics, and culture. As the chapter concludes, they decide to delve deeper into the hidden corners of the fashion world to unravel its mysteries. Requirements for the next chapter: 1. Character Development of Margot and Lucien: - Margot's Evolution: Unfold more about Margot's past, her dreams of revolutionizing fashion, and her struggle to establish herself in a male-dominated industry. Illustrate her growing expertise, innovative ideas, and increasing dependence on Lucien. - Lucien's Complexity: Introduce uncertainties surrounding Lucien's background and real motives. Increase suspense by suggesting undisclosed information he possesses, while also highlighting his wit and perceptiveness. 2. Exploration of Paris and the Couture House: - Paris: Elaborate their journey through the bustling streets of Paris, including encounters with iconic figures, social unrest, and relics from different eras of French history. - The Couture House: Expand on the grandeur of the couture house they work in, filled with artistic masterpieces, intense competition, and cryptic notes hinting at a scandalous past. 3. Emergence of the Subplot: The Lost Collection: - Discovery: Have Margot and Lucien stumble upon a secret vault containing a lost collection designed before World War II, raising new questions about the previous owner and the influence of war on fashion. - Revelation: Capture their shock as they realize the designs were plagiarized, the potential repercussions, and the opportunities it presents for Margot's career. - Twist: End with a twist that suggests there are other stolen collections across Paris, setting up their new mission. Your response should be approximately 650 words. ``` #### Coding You can ask for fairly complex coding instructions with multiple criteria, e.g.: ``` Create a python application with the following requirements: - Asyncio FastAPI webserver - ping endpoint that returns the current date in JSON format - file upload endpoint, which calculates the file's sha256 checksum, and checks postgres to deduplicate ``` Or inline criteria: ``` Write a multi-threaded TCP server in C that accepts a "GET [key]" input and "SET [key] [value]" input, and uses a binary tree to get and store the input values. ``` You can also optionally add a single space and "PLAINFORMAT" at the end of your prompt to avoid backticks, explanations, etc. and just print the code, e.g.: ``` Write a websocket application in node.js. PLAINFORMAT ``` #### Agent/function calling The dataset includes many examples of function/args generation based on input criteria. This is somewhat similar to the OpenAI function calling, but the output is either JSON or YAML. Example prompt: ``` As an AI assistant, please select the most suitable function and parameters from the list of available functions below, based on the user's input. Provide your response in JSON format. Input: I want to know how many times 'Python' is mentioned in my text file. Available functions: file_analytics: description: This tool performs various operations on a text file. params: action: The operation we want to perform on the data, such as "count_occurrences", "find_line", etc. filters: keyword: The word or phrase we want to search for. ``` Response: ```json { "function": "file_analytics", "params": { "action": "count_occurrences", "filters": { "keyword": "Python" } } } ``` #### Chain-of-thought You can ask for several possible responses to a given problem, with a ranking and final answer selection. Example prompt: ``` A group of people decide to play a round-robin tournament where every player plays every other player exactly once. If a total of 45 games were played, how many players participated in the tournament? Offer several potential answers, rank them according to mathematical logic, and pick the most feasible one. ``` Example response: ``` Solution 1: In a round-robin tournament, each player plays every other player exactly once. This means that the total number of games played is equal to the combination of all players taken two at a time (since a game involves two players). The formula for combinations is nC2 = n(n-1)/2, where n is the total number of players. We know that nC2 equals 45 (the total number of games), so we can set up the equation as follows: n(n-1)/2 = 45 Solving this equation gives us n=10. Final answer: There were 10 players in the tournament. Solution 2: Another way to approach this problem is by using the concept of triangular numbers. Triangular numbers are a sequence of numbers where each number can be represented as a triangle with dots. The nth triangular number is given by the sum of the first n natural numbers. If we consider each game as a dot and each player as a side of the triangle, then the total number of games (45) would correspond to the 9th triangular number because 1+2+3+4+5+6+7+8+9=45. However, since each side of the triangle represents a player, and there's one more player than sides in our model (because the last player has no one left to play against), we need to add one more to get the total number of players. Final answer: There were 10 players in the tournament. Solution 3: We could also use trial and error to solve this problem. Starting from a small number of players, we calculate how many games would be played in a round-robin tournament until we reach 45 games. For example, with 2 players, only 1 game is played; with 3 players, 3 games are played; with 4 players, 6 games are played, and so on. Continuing this process, we find that with 10 players, 45 games are played. Final answer: There were 10 players in the tournament. Ranking of solutions from best to worst: 1. Solution 1: This solution uses direct mathematical reasoning based on the properties of a round-robin tournament and solves an equation to find the answer. 2. Solution 2: This solution uses the concept of triangular numbers which requires additional steps and knowledge beyond the immediate problem context, but still arrives at the correct answer. 3. Solution 3: This solution relies on trial and error, which is less efficient and reliable than the other methods, but it eventually gets to the right answer. Best and final answer: There were 10 players in the tournament. ``` #### reWOO style execution planning The model now supports execution planning for complex instructions that would require making use of several tools. The output is just the plan, you must implement a mechanism to parse the output and actually call the functions! Example prompt: ``` Please construct a systematic plan to generate an optimal response to the user instruction, utilizing a set of provided tools. Each plan will correspond to an evidence value, which will be the output of one of the available functions given an input string that could be the user's question, one or more prior evidence values, or a combination of both. Here are the tools available to be called: DuckDuckGo[input]: Tool that enables the user to search for information using the DuckDuckGo search engine. This tool is helpful in locating a suitable list of sites that may contain the answer to the user's query. It does not directly answer the question but finds a list of sites that might have the answer. TextScraper[input]: Load one or more websites from the input string containing newline delimited links, where input is one or more links, and produces plain text output containing the content of the links. HyperlinkExtractor[input]: Extract links from plain text and produces a plain text, newline delimited response of links. KnowledgeModel[input]: Question answering language model, particularly useful in answering questions based on an input passage of text. The input must be a text question that references an :evidence[n]: variable, e.g. What color is the cat, given :evidence1:? The output should be in format: Plan: [first action to take based on input question] :evidence0: = [function to call with input parameter] Plan: [next action to take, based on result of :evidence0:] :evidence1: = [next function to call and input parameter, which may include reference to previous evidence, e.g. "Given context :evidence0"] ... Answer: [:evidence[n]: containing the final answer.] Question: Who won the Nobel Prize in Literature in 2020 and what is their most famous work? ``` Response: ``` Plan: Begin by conducting a web search to find out who won the Nobel Prize in Literature in 2020. :evidence0: = DuckDuckGo[Nobel Prize in Literature 2020 winner] Plan: Utilize the KnowledgeModel to interpret the search results and find the name of the laureate. :evidence1: = KnowledgeModel[Who won the Nobel Prize in Literature in 2020, given :evidence0:?] Plan: Conduct another web search to find the most famous work of the identified laureate. :evidence2: = DuckDuckGo[Most famous work of :evidence1:] Plan: Extract the relevant links from the DuckDuckGo search results for a more focused search. :evidence3: = HyperlinkExtractor[:evidence2:] Plan: Use the TextScraper tool to extract information from the relevant links. :evidence4: = TextScraper[:evidence3:] Plan: Finally, utilize the KnowledgeModel to identify and summarize the most famous work of the laureate from the extracted information. :evidence5: = KnowledgeModel[What is the most famous work of :evidence1:, given :evidence4:?] Answer: :evidence5: ``` For this to be useful, you'd have to parse the output plan text, and implement/call each of the functions. This is just pseudo-code, completely untested off the top of my head, and obviously would requiring full implementation + hardening: ```python import re import requests def inject_context(input_text, **context): for ref in set(re.findall(r"(:evidence[0-9]+:)", input_text, re.I)): input_text = input_text.replace(ref, context.get(ref, "")) return input_text def duckduckgo(input_text, **context): search_string = inject_context(input_text, **context) ... search via duck duck go using search_string ... return text content def link_extractor(input_text, **context): input_text = inject_context(input_text, **context) return "\n".join(list(set(re.findall(r"(https?://[^\s]+?\.?)", input_text, re.I)))) def scrape(input_text, **context): input_text = inject_context(input_text, **context) text = [] for link in input_text.splitlines(): text.append(requests.get(link).text) return "\n".join(text) def infer(input_text, **context) prompt = inject_context(input_text, **context) ... call model with prompt, return output def parse_plan(plan): method_map = { "DuckDuckGo": duckduckgo, "HyperlinkExtractor": link_extractor, "KnowledgeModel": infer, "TextScraper": scrape, } context = {} for line in plan.strip().splitlines(): if line.startswith("Plan:"): print(line) continue parts = re.match("^(:evidence[0-9]+:)\s*=\s*([^\[]+])(\[.*\])\s$", line, re.I) if not parts: if line.startswith("Answer: "): return context.get(line.split(" ")[-1].strip(), "Answer couldn't be generated...") raise RuntimeError("bad format: " + line) context[parts.group(1)] = method_map[parts.group(2)](parts.group(3), **context) ``` ## Multi-step instructions with acknowledgement I don't know how useful this is, really, but I thought I'd add it just in case. Example: ``` { "conversations": [ { "value": "You are a helpful, unbiased, uncensored assistant.", "from": "system" }, { "value": "I'm going to set some rules for our conversation. Please respond with \"OK\" to each rule.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "You can only respond using animal-related emojis.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Precede all responses with \"ZOOZOO:\"", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Include at least one bird emoji in your responses.", "from": "human" }, { "value": "OK", "from": "gpt" }, { "value": "Describe a trip to a safari park.", "from": "human" }, { "value": "ZOOZOO: \ud83e\udd81\ud83d\udc2f\ud83e\udd93\ud83e\udd92\ud83d\udc18\ud83d\udc0d\ud83c\udf33\ud83c\udf32\u2600\ufe0f\ud83d\ude90\ud83e\udd85\ud83d\udcf8\ud83e\udd29", "from": "gpt" } ] } ``` ### Contribute If you're interested in new functionality, particularly a new "instructor" type to generate a specific type of training data, take a look at the dataset generation tool repo: https://github.com/jondurbin/airoboros and either make a PR or open an issue with details. To help me with the OpenAI/compute costs: - https://bmc.link/jondurbin - ETH 0xce914eAFC2fe52FdceE59565Dd92c06f776fcb11 - BTC bc1qdwuth4vlg8x37ggntlxu5cjfwgmdy5zaa7pswf ### Licence and usage restrictions The airoboros 3.1 models are built on top of multiple base models, each with their own license/restrictions. The 30b model is built on the original llama, which has a strict non-commercial usage restriction. The models with `-l2` in the name have a custom Meta license: - See the [meta-license/LICENSE.txt](meta-license/LICENSE.txt) file attached for the original license provided by Meta. - See also [meta-license/USE_POLICY.md](meta-license/USE_POLICY.md) and [meta-license/Responsible-Use-Guide.pdf](meta-license/Responsible-Use-Guide.pdf), also provided by Meta. The models with `-m-` are mistral-7b (apache 2.0) The fine-tuning data was mostly generated by OpenAI API calls to gpt-4, via [airoboros](https://github.com/jondurbin/airoboros) The ToS for OpenAI API usage has a clause preventing the output from being used to train a model that __competes__ with OpenAI - what does *compete* actually mean here? - these small open source models will not produce output anywhere near the quality of gpt-4, or even gpt-3.5, so I can't imagine this could credibly be considered competing in the first place - if someone else uses the dataset to do the same, they wouldn't necessarily be violating the ToS because they didn't call the API, so I don't know how that works - the training data used in essentially all large language models includes a significant amount of copyrighted or otherwise non-permissive licensing in the first place - other work using the self-instruct method, e.g. the original here: https://github.com/yizhongw/self-instruct released the data and model as apache-2 I am purposingly leaving this license ambiguous (other than the fact you must comply with the Meta original license for llama-2) because I am not a lawyer and refuse to attempt to interpret all of the terms accordingly. Your best bet is probably to avoid using this commercially due to the OpenAI API usage. Either way, by using this model, you agree to completely indemnify me.
{"datasets": ["jondurbin/airoboros-3.1"], "license": "llama2"}
task
[ "QUESTION_ANSWERING", "SUMMARIZATION" ]
45,537
vdos/3cf6e881-58d4-4c83-b040-9d0b8321ff0e
vdos
null
[ "peft", "safetensors", "gemma2", "axolotl", "generated_from_trainer", "base_model:unsloth/gemma-2-2b", "base_model:adapter:unsloth/gemma-2-2b", "license:gemma", "region:us" ]
2024-12-20T00:49:14Z
2024-12-20T00:57:10+00:00
9
0
--- base_model: unsloth/gemma-2-2b library_name: peft license: gemma tags: - axolotl - generated_from_trainer model-index: - name: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.5.2` ```yaml adapter: lora base_model: unsloth/gemma-2-2b bf16: auto chat_template: llama3 dataset_prepared_path: null datasets: - data_files: - bbe70c53a119531f_train_data.json ds_type: json format: custom path: /workspace/input_data/bbe70c53a119531f_train_data.json type: field_input: transcription field_instruction: glosses field_output: translation format: '{instruction} {input}' no_input_format: '{instruction}' system_format: '{system}' system_prompt: '' debug: null deepspeed: null early_stopping_patience: 1 eval_max_new_tokens: 128 eval_steps: 25 eval_table_size: null flash_attention: false fp16: false fsdp: null fsdp_config: null gradient_accumulation_steps: 16 gradient_checkpointing: true group_by_length: true hub_model_id: vdos/3cf6e881-58d4-4c83-b040-9d0b8321ff0e hub_repo: null hub_strategy: checkpoint hub_token: null learning_rate: 0.0001 load_in_4bit: false load_in_8bit: false local_rank: null logging_steps: 1 lora_alpha: 64 lora_dropout: 0.05 lora_fan_in_fan_out: null lora_model_dir: null lora_r: 32 lora_target_linear: true lr_scheduler: cosine max_steps: 50 micro_batch_size: 2 mlflow_experiment_name: /tmp/bbe70c53a119531f_train_data.json model_type: AutoModelForCausalLM num_epochs: 3 optimizer: adamw_torch output_dir: miner_id_24 pad_to_sequence_len: true resume_from_checkpoint: null s2_attention: null sample_packing: false save_steps: 25 sequence_len: 2048 strict: false tf32: false tokenizer_type: AutoTokenizer train_on_inputs: false trust_remote_code: true val_set_size: 0.05 wandb_entity: null wandb_mode: online wandb_name: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e wandb_project: Gradients-On-Demand wandb_run: your_name wandb_runid: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e warmup_ratio: 0.05 weight_decay: 0.01 xformers_attention: true ``` </details><br> # 3cf6e881-58d4-4c83-b040-9d0b8321ff0e This model is a fine-tuned version of [unsloth/gemma-2-2b](https://huggingface.co/unsloth/gemma-2-2b) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9679 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 16 - total_train_batch_size: 128 - total_eval_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 2 - training_steps: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 4.9321 | 0.0128 | 1 | 6.1235 | | 0.6469 | 0.3195 | 25 | 2.1838 | | 0.4683 | 0.6390 | 50 | 1.9679 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.3 - Pytorch 2.3.1+cu121 - Datasets 3.1.0 - Tokenizers 0.20.3
null
TBD
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.5.2` ```yaml adapter: lora base_model: unsloth/gemma-2-2b bf16: auto chat_template: llama3 dataset_prepared_path: null datasets: - data_files: - bbe70c53a119531f_train_data.json ds_type: json format: custom path: /workspace/input_data/bbe70c53a119531f_train_data.json type: field_input: transcription field_instruction: glosses field_output: translation format: '{instruction} {input}' no_input_format: '{instruction}' system_format: '{system}' system_prompt: '' debug: null deepspeed: null early_stopping_patience: 1 eval_max_new_tokens: 128 eval_steps: 25 eval_table_size: null flash_attention: false fp16: false fsdp: null fsdp_config: null gradient_accumulation_steps: 16 gradient_checkpointing: true group_by_length: true hub_model_id: vdos/3cf6e881-58d4-4c83-b040-9d0b8321ff0e hub_repo: null hub_strategy: checkpoint hub_token: null learning_rate: 0.0001 load_in_4bit: false load_in_8bit: false local_rank: null logging_steps: 1 lora_alpha: 64 lora_dropout: 0.05 lora_fan_in_fan_out: null lora_model_dir: null lora_r: 32 lora_target_linear: true lr_scheduler: cosine max_steps: 50 micro_batch_size: 2 mlflow_experiment_name: /tmp/bbe70c53a119531f_train_data.json model_type: AutoModelForCausalLM num_epochs: 3 optimizer: adamw_torch output_dir: miner_id_24 pad_to_sequence_len: true resume_from_checkpoint: null s2_attention: null sample_packing: false save_steps: 25 sequence_len: 2048 strict: false tf32: false tokenizer_type: AutoTokenizer train_on_inputs: false trust_remote_code: true val_set_size: 0.05 wandb_entity: null wandb_mode: online wandb_name: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e wandb_project: Gradients-On-Demand wandb_run: your_name wandb_runid: 3cf6e881-58d4-4c83-b040-9d0b8321ff0e warmup_ratio: 0.05 weight_decay: 0.01 xformers_attention: true ``` </details><br> # 3cf6e881-58d4-4c83-b040-9d0b8321ff0e This model is a fine-tuned version of [unsloth/gemma-2-2b](https://huggingface.co/unsloth/gemma-2-2b) on the None dataset. It achieves the following results on the evaluation set: - Loss: 1.9679 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 2 - eval_batch_size: 2 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - gradient_accumulation_steps: 16 - total_train_batch_size: 128 - total_eval_batch_size: 8 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 2 - training_steps: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 4.9321 | 0.0128 | 1 | 6.1235 | | 0.6469 | 0.3195 | 25 | 2.1838 | | 0.4683 | 0.6390 | 50 | 1.9679 | ### Framework versions - PEFT 0.13.2 - Transformers 4.46.3 - Pytorch 2.3.1+cu121 - Datasets 3.1.0 - Tokenizers 0.20.3
{"base_model": "unsloth/gemma-2-2b", "library_name": "peft", "license": "gemma", "tags": ["axolotl", "generated_from_trainer"], "model-index": [{"name": "3cf6e881-58d4-4c83-b040-9d0b8321ff0e", "results": []}]}
task
[ "TRANSLATION" ]
45,538
lksai19/Legal-summarizer-models
lksai19
null
[ "safetensors", "license:mit", "region:us" ]
2025-02-06T16:39:20Z
2025-02-22T11:01:05+00:00
0
0
--- license: mit --- # Legal Document Summarizer This repository contains fine-tuned models for summarizing legal documents, including extractive and abstractive summarization models based on LegalBERT and BART. The models are designed to handle legal case files and segment them into sections, followed by generating summaries of these sections. ## Models ### 1. **BART Model (Abstractive Summarization)** This model is fine-tuned to generate **abstractive summaries** of legal judgments. It processes the input document and generates a coherent, concise summary that captures the key points of the judgment. This is particularly useful for creating readable summaries for large legal texts. ### 2. **Extractive Model (LegalBERT)** This model performs **extractive summarization** by identifying key sentences in a legal case document and extracting them to form a summary. It leverages LegalBERT, fine-tuned specifically for legal text, to classify and extract relevant content such as facts, judgment, and arguments from a case. ### 3. **Classification Model (LegalBERT)** The classification model is used to categorize sections of the legal document (e.g., **Judgment**, **Analysis**, **Facts**, **Arguments**, **Statute**). By understanding the structure of the document, the model helps in segmenting the text before applying extractive or abstractive summarization techniques. ## Web Application This repository also provides a **web application** that allows users to upload a legal case file in `.txt` format or paste the document text directly. The application enables users to choose between generating a **whole summary** or a **segmented summary**, using the extractive and abstractive models accordingly. For the web app code, visit the [GitHub repository](https://github.com/lksai19/Legal-Summarizer). ## Dataset The models in this repository have been fine-tuned on the dataset used in the following research paper: **Shukla, A., Bhattacharya, P., Poddar, S., Mukherjee, R., Ghosh, K., Goyal, P., & Ghosh, S. (2022).** *Legal case document summarization: Extractive and abstractive methods and their evaluation.* Available at: [Research Paper Link](https://aclanthology.org/2022.aacl-main.77/) This dataset consists of annotated legal case documents, where segments such as **Analysis**, **Judgment**, **Facts**, and **Arguments** were manually labeled by legal experts. The paper discusses the methods for extractive and abstractive summarization, providing evaluation metrics for model performance. ## Fine-tuning For detailed steps on how the models were fine-tuned, including code and configuration, refer to the [GitHub repository of the fine-tuning project](https://github.com/lksai19/Legal-Summarizer). ### Requirements To run the models locally, install the required dependencies: ```bash pip install -r requirements.txt
null
Non_BioNLP
# Legal Document Summarizer This repository contains fine-tuned models for summarizing legal documents, including extractive and abstractive summarization models based on LegalBERT and BART. The models are designed to handle legal case files and segment them into sections, followed by generating summaries of these sections. ## Models ### 1. **BART Model (Abstractive Summarization)** This model is fine-tuned to generate **abstractive summaries** of legal judgments. It processes the input document and generates a coherent, concise summary that captures the key points of the judgment. This is particularly useful for creating readable summaries for large legal texts. ### 2. **Extractive Model (LegalBERT)** This model performs **extractive summarization** by identifying key sentences in a legal case document and extracting them to form a summary. It leverages LegalBERT, fine-tuned specifically for legal text, to classify and extract relevant content such as facts, judgment, and arguments from a case. ### 3. **Classification Model (LegalBERT)** The classification model is used to categorize sections of the legal document (e.g., **Judgment**, **Analysis**, **Facts**, **Arguments**, **Statute**). By understanding the structure of the document, the model helps in segmenting the text before applying extractive or abstractive summarization techniques. ## Web Application This repository also provides a **web application** that allows users to upload a legal case file in `.txt` format or paste the document text directly. The application enables users to choose between generating a **whole summary** or a **segmented summary**, using the extractive and abstractive models accordingly. For the web app code, visit the [GitHub repository](https://github.com/lksai19/Legal-Summarizer). ## Dataset The models in this repository have been fine-tuned on the dataset used in the following research paper: **Shukla, A., Bhattacharya, P., Poddar, S., Mukherjee, R., Ghosh, K., Goyal, P., & Ghosh, S. (2022).** *Legal case document summarization: Extractive and abstractive methods and their evaluation.* Available at: [Research Paper Link](https://aclanthology.org/2022.aacl-main.77/) This dataset consists of annotated legal case documents, where segments such as **Analysis**, **Judgment**, **Facts**, and **Arguments** were manually labeled by legal experts. The paper discusses the methods for extractive and abstractive summarization, providing evaluation metrics for model performance. ## Fine-tuning For detailed steps on how the models were fine-tuned, including code and configuration, refer to the [GitHub repository of the fine-tuning project](https://github.com/lksai19/Legal-Summarizer). ### Requirements To run the models locally, install the required dependencies: ```bash pip install -r requirements.txt
{"license": "mit"}
task
[ "SUMMARIZATION" ]
45,539
NickyNicky/bge-base-financial-matryoshka_test_0
NickyNicky
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:6300", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss", "en", "arxiv:1908.10084", "arxiv:2205.13147", "arxiv:1705.00652", "base_model:BAAI/bge-base-en-v1.5", "base_model:finetune:BAAI/bge-base-en-v1.5", "license:apache-2.0", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-06-30T22:29:42Z
2024-06-30T22:30:12+00:00
49
1
--- base_model: BAAI/bge-base-en-v1.5 datasets: [] language: - en library_name: sentence-transformers license: apache-2.0 metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:6300 - loss:MatryoshkaLoss - loss:MultipleNegativesRankingLoss widget: - source_sentence: Item 8 in IBM's 2023 Annual Report to Stockholders details the Financial Statements and Supplementary Data, which are included on pages 44 through 121. sentences: - What was the amount gained from the disposal of assets in 2022? - What section of IBM's Annual Report for 2023 contains the Financial Statements and Supplementary Data? - What were the cash outflows for capital expenditures in 2023 and 2022 respectively? - source_sentence: For the fiscal year ended March 31, 2023, Electronic Arts reported a gross margin of 75.9 percent, an increase of 2.5 percentage points from the previous year. sentences: - How did investment banking revenues at Goldman Sachs change in 2023 compared to 2022, and what factors contributed to this change? - What was the gross margin percentage for Electronic Arts in the fiscal year ending March 31, 2023? - What were the risk-free interest rates for the fiscal years 2021, 2022, and 2023? - source_sentence: Cash, cash equivalents, and restricted cash at the beginning of the period totaled $7,013 for a company. sentences: - What was the amount of cash, cash equivalents, and restricted cash at the beginning of the period for the company? - What is the impact of the new $1.25 price point on Dollar Tree’s sales units and profitability? - What was the total amount attributed to Goodwill in the acquisition of Nuance Communications, Inc. as reported by the company? - source_sentence: generate our mall revenue primarily from leases with tenants through base minimum rents, overage rents and reimbursements for common area maintenance (CAM) and other expenditures. sentences: - How does Visa facilitate financial inclusion with their prepaid cards? - What are the main objectives of the economic sanctions imposed by the United States and other international bodies? - What revenue sources does Shoppes at Venetian primarily rely on from its tenants? - source_sentence: For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores. sentences: - What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022? - What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse? - How much did GameStop Corp's valuation allowances increase during fiscal 2022? model-index: - name: BGE base Financial Matryoshka results: - task: type: information-retrieval name: Information Retrieval dataset: name: dim 768 type: dim_768 metrics: - type: cosine_accuracy@1 value: 0.6985714285714286 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.8271428571428572 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8628571428571429 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8985714285714286 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.6985714285714286 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2757142857142857 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.17257142857142854 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08985714285714284 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.6985714285714286 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.8271428571428572 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8628571428571429 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8985714285714286 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.8023663256793517 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7712675736961451 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7758522351159084 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 512 type: dim_512 metrics: - type: cosine_accuracy@1 value: 0.69 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.8271428571428572 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.86 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.9028571428571428 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.69 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2757142857142857 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.17199999999999996 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.09028571428571427 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.69 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.8271428571428572 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.86 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.9028571428571428 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.7998655910794988 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7665912698412698 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7706925401671437 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 256 type: dim_256 metrics: - type: cosine_accuracy@1 value: 0.6957142857142857 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.8228571428571428 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.86 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8914285714285715 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.6957142857142857 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.2742857142857143 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.17199999999999996 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08914285714285713 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.6957142857142857 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.8228571428571428 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.86 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8914285714285715 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.7974564108711016 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7669535147392289 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7718155211819018 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 128 type: dim_128 metrics: - type: cosine_accuracy@1 value: 0.6871428571428572 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.8128571428571428 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8457142857142858 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8857142857142857 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.6871428571428572 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.27095238095238094 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.16914285714285712 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08857142857142856 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.6871428571428572 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.8128571428571428 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8457142857142858 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8857142857142857 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.787697533881839 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.756192743764172 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7610331995977764 name: Cosine Map@100 - task: type: information-retrieval name: Information Retrieval dataset: name: dim 64 type: dim_64 metrics: - type: cosine_accuracy@1 value: 0.6328571428571429 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.7771428571428571 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.8171428571428572 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.8571428571428571 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.6328571428571429 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.259047619047619 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.16342857142857142 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.08571428571428569 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.6328571428571429 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.7771428571428571 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.8171428571428572 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.8571428571428571 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.7482728321357093 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.7131224489795914 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.7189753431460272 name: Cosine Map@100 --- # BGE base Financial Matryoshka This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) <!-- at revision a5beb1e3e68b9ab74eb54cfd186867f64f240e1a --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> - **Language:** en - **License:** apache-2.0 ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("NickyNicky/bge-base-financial-matryoshka") # Run inference sentences = [ 'For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores.', "What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022?", "What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse?", ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `dim_768` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.6986 | | cosine_accuracy@3 | 0.8271 | | cosine_accuracy@5 | 0.8629 | | cosine_accuracy@10 | 0.8986 | | cosine_precision@1 | 0.6986 | | cosine_precision@3 | 0.2757 | | cosine_precision@5 | 0.1726 | | cosine_precision@10 | 0.0899 | | cosine_recall@1 | 0.6986 | | cosine_recall@3 | 0.8271 | | cosine_recall@5 | 0.8629 | | cosine_recall@10 | 0.8986 | | cosine_ndcg@10 | 0.8024 | | cosine_mrr@10 | 0.7713 | | **cosine_map@100** | **0.7759** | #### Information Retrieval * Dataset: `dim_512` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.69 | | cosine_accuracy@3 | 0.8271 | | cosine_accuracy@5 | 0.86 | | cosine_accuracy@10 | 0.9029 | | cosine_precision@1 | 0.69 | | cosine_precision@3 | 0.2757 | | cosine_precision@5 | 0.172 | | cosine_precision@10 | 0.0903 | | cosine_recall@1 | 0.69 | | cosine_recall@3 | 0.8271 | | cosine_recall@5 | 0.86 | | cosine_recall@10 | 0.9029 | | cosine_ndcg@10 | 0.7999 | | cosine_mrr@10 | 0.7666 | | **cosine_map@100** | **0.7707** | #### Information Retrieval * Dataset: `dim_256` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.6957 | | cosine_accuracy@3 | 0.8229 | | cosine_accuracy@5 | 0.86 | | cosine_accuracy@10 | 0.8914 | | cosine_precision@1 | 0.6957 | | cosine_precision@3 | 0.2743 | | cosine_precision@5 | 0.172 | | cosine_precision@10 | 0.0891 | | cosine_recall@1 | 0.6957 | | cosine_recall@3 | 0.8229 | | cosine_recall@5 | 0.86 | | cosine_recall@10 | 0.8914 | | cosine_ndcg@10 | 0.7975 | | cosine_mrr@10 | 0.767 | | **cosine_map@100** | **0.7718** | #### Information Retrieval * Dataset: `dim_128` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:----------| | cosine_accuracy@1 | 0.6871 | | cosine_accuracy@3 | 0.8129 | | cosine_accuracy@5 | 0.8457 | | cosine_accuracy@10 | 0.8857 | | cosine_precision@1 | 0.6871 | | cosine_precision@3 | 0.271 | | cosine_precision@5 | 0.1691 | | cosine_precision@10 | 0.0886 | | cosine_recall@1 | 0.6871 | | cosine_recall@3 | 0.8129 | | cosine_recall@5 | 0.8457 | | cosine_recall@10 | 0.8857 | | cosine_ndcg@10 | 0.7877 | | cosine_mrr@10 | 0.7562 | | **cosine_map@100** | **0.761** | #### Information Retrieval * Dataset: `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:----------| | cosine_accuracy@1 | 0.6329 | | cosine_accuracy@3 | 0.7771 | | cosine_accuracy@5 | 0.8171 | | cosine_accuracy@10 | 0.8571 | | cosine_precision@1 | 0.6329 | | cosine_precision@3 | 0.259 | | cosine_precision@5 | 0.1634 | | cosine_precision@10 | 0.0857 | | cosine_recall@1 | 0.6329 | | cosine_recall@3 | 0.7771 | | cosine_recall@5 | 0.8171 | | cosine_recall@10 | 0.8571 | | cosine_ndcg@10 | 0.7483 | | cosine_mrr@10 | 0.7131 | | **cosine_map@100** | **0.719** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 6,300 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 2 tokens</li><li>mean: 46.19 tokens</li><li>max: 371 tokens</li></ul> | <ul><li>min: 2 tokens</li><li>mean: 20.39 tokens</li><li>max: 46 tokens</li></ul> | * Samples: | positive | anchor | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------| | <code>Cash used in financing activities in fiscal 2022 was primarily attributable to settlement of stock-based awards.</code> | <code>Why was there a net outflow of cash in financing activities in fiscal 2022?</code> | | <code>Certain vendors have been impacted by volatility in the supply chain financing market.</code> | <code>How have certain vendors been impacted in the supply chain financing market?</code> | | <code>In the consolidated financial statements for Visa, the net cash provided by operating activities amounted to 20,755 units in the most recent period, 18,849 units in the previous period, and 15,227 units in the period before that.</code> | <code>How much net cash did Visa's operating activities generate in the most recent period according to the financial statements?</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `gradient_accumulation_steps`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 4 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `bf16`: True - `tf32`: True - `optim`: adamw_torch_fused - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 16 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 4 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: True - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 | |:------:|:----:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:| | 0.8122 | 10 | 1.5643 | - | - | - | - | - | | 0.9746 | 12 | - | 0.7349 | 0.7494 | 0.7524 | 0.6987 | 0.7569 | | 1.6244 | 20 | 0.6756 | - | - | - | - | - | | 1.9492 | 24 | - | 0.7555 | 0.7659 | 0.7683 | 0.7190 | 0.7700 | | 2.4365 | 30 | 0.4561 | - | - | - | - | - | | 2.9239 | 36 | - | 0.7592 | 0.7698 | 0.7698 | 0.7184 | 0.7741 | | 3.2487 | 40 | 0.3645 | - | - | - | - | - | | 3.8985 | 48 | - | 0.7610 | 0.7718 | 0.7707 | 0.7190 | 0.7759 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.2.0+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# BGE base Financial Matryoshka This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) <!-- at revision a5beb1e3e68b9ab74eb54cfd186867f64f240e1a --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> - **Language:** en - **License:** apache-2.0 ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("NickyNicky/bge-base-financial-matryoshka") # Run inference sentences = [ 'For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores.', "What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022?", "What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse?", ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `dim_768` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.6986 | | cosine_accuracy@3 | 0.8271 | | cosine_accuracy@5 | 0.8629 | | cosine_accuracy@10 | 0.8986 | | cosine_precision@1 | 0.6986 | | cosine_precision@3 | 0.2757 | | cosine_precision@5 | 0.1726 | | cosine_precision@10 | 0.0899 | | cosine_recall@1 | 0.6986 | | cosine_recall@3 | 0.8271 | | cosine_recall@5 | 0.8629 | | cosine_recall@10 | 0.8986 | | cosine_ndcg@10 | 0.8024 | | cosine_mrr@10 | 0.7713 | | **cosine_map@100** | **0.7759** | #### Information Retrieval * Dataset: `dim_512` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.69 | | cosine_accuracy@3 | 0.8271 | | cosine_accuracy@5 | 0.86 | | cosine_accuracy@10 | 0.9029 | | cosine_precision@1 | 0.69 | | cosine_precision@3 | 0.2757 | | cosine_precision@5 | 0.172 | | cosine_precision@10 | 0.0903 | | cosine_recall@1 | 0.69 | | cosine_recall@3 | 0.8271 | | cosine_recall@5 | 0.86 | | cosine_recall@10 | 0.9029 | | cosine_ndcg@10 | 0.7999 | | cosine_mrr@10 | 0.7666 | | **cosine_map@100** | **0.7707** | #### Information Retrieval * Dataset: `dim_256` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.6957 | | cosine_accuracy@3 | 0.8229 | | cosine_accuracy@5 | 0.86 | | cosine_accuracy@10 | 0.8914 | | cosine_precision@1 | 0.6957 | | cosine_precision@3 | 0.2743 | | cosine_precision@5 | 0.172 | | cosine_precision@10 | 0.0891 | | cosine_recall@1 | 0.6957 | | cosine_recall@3 | 0.8229 | | cosine_recall@5 | 0.86 | | cosine_recall@10 | 0.8914 | | cosine_ndcg@10 | 0.7975 | | cosine_mrr@10 | 0.767 | | **cosine_map@100** | **0.7718** | #### Information Retrieval * Dataset: `dim_128` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:----------| | cosine_accuracy@1 | 0.6871 | | cosine_accuracy@3 | 0.8129 | | cosine_accuracy@5 | 0.8457 | | cosine_accuracy@10 | 0.8857 | | cosine_precision@1 | 0.6871 | | cosine_precision@3 | 0.271 | | cosine_precision@5 | 0.1691 | | cosine_precision@10 | 0.0886 | | cosine_recall@1 | 0.6871 | | cosine_recall@3 | 0.8129 | | cosine_recall@5 | 0.8457 | | cosine_recall@10 | 0.8857 | | cosine_ndcg@10 | 0.7877 | | cosine_mrr@10 | 0.7562 | | **cosine_map@100** | **0.761** | #### Information Retrieval * Dataset: `dim_64` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:----------| | cosine_accuracy@1 | 0.6329 | | cosine_accuracy@3 | 0.7771 | | cosine_accuracy@5 | 0.8171 | | cosine_accuracy@10 | 0.8571 | | cosine_precision@1 | 0.6329 | | cosine_precision@3 | 0.259 | | cosine_precision@5 | 0.1634 | | cosine_precision@10 | 0.0857 | | cosine_recall@1 | 0.6329 | | cosine_recall@3 | 0.7771 | | cosine_recall@5 | 0.8171 | | cosine_recall@10 | 0.8571 | | cosine_ndcg@10 | 0.7483 | | cosine_mrr@10 | 0.7131 | | **cosine_map@100** | **0.719** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 6,300 training samples * Columns: <code>positive</code> and <code>anchor</code> * Approximate statistics based on the first 1000 samples: | | positive | anchor | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 2 tokens</li><li>mean: 46.19 tokens</li><li>max: 371 tokens</li></ul> | <ul><li>min: 2 tokens</li><li>mean: 20.39 tokens</li><li>max: 46 tokens</li></ul> | * Samples: | positive | anchor | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------| | <code>Cash used in financing activities in fiscal 2022 was primarily attributable to settlement of stock-based awards.</code> | <code>Why was there a net outflow of cash in financing activities in fiscal 2022?</code> | | <code>Certain vendors have been impacted by volatility in the supply chain financing market.</code> | <code>How have certain vendors been impacted in the supply chain financing market?</code> | | <code>In the consolidated financial statements for Visa, the net cash provided by operating activities amounted to 20,755 units in the most recent period, 18,849 units in the previous period, and 15,227 units in the period before that.</code> | <code>How much net cash did Visa's operating activities generate in the most recent period according to the financial statements?</code> | * Loss: [<code>MatryoshkaLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#matryoshkaloss) with these parameters: ```json { "loss": "MultipleNegativesRankingLoss", "matryoshka_dims": [ 768, 512, 256, 128, 64 ], "matryoshka_weights": [ 1, 1, 1, 1, 1 ], "n_dims_per_step": -1 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: epoch - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `gradient_accumulation_steps`: 16 - `learning_rate`: 2e-05 - `num_train_epochs`: 4 - `lr_scheduler_type`: cosine - `warmup_ratio`: 0.1 - `bf16`: True - `tf32`: True - `optim`: adamw_torch_fused - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: epoch - `prediction_loss_only`: True - `per_device_train_batch_size`: 32 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 16 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 4 - `max_steps`: -1 - `lr_scheduler_type`: cosine - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: True - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch_fused - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | dim_128_cosine_map@100 | dim_256_cosine_map@100 | dim_512_cosine_map@100 | dim_64_cosine_map@100 | dim_768_cosine_map@100 | |:------:|:----:|:-------------:|:----------------------:|:----------------------:|:----------------------:|:---------------------:|:----------------------:| | 0.8122 | 10 | 1.5643 | - | - | - | - | - | | 0.9746 | 12 | - | 0.7349 | 0.7494 | 0.7524 | 0.6987 | 0.7569 | | 1.6244 | 20 | 0.6756 | - | - | - | - | - | | 1.9492 | 24 | - | 0.7555 | 0.7659 | 0.7683 | 0.7190 | 0.7700 | | 2.4365 | 30 | 0.4561 | - | - | - | - | - | | 2.9239 | 36 | - | 0.7592 | 0.7698 | 0.7698 | 0.7184 | 0.7741 | | 3.2487 | 40 | 0.3645 | - | - | - | - | - | | 3.8985 | 48 | - | 0.7610 | 0.7718 | 0.7707 | 0.7190 | 0.7759 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.41.2 - PyTorch: 2.2.0+cu121 - Accelerate: 0.31.0 - Datasets: 2.19.1 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MatryoshkaLoss ```bibtex @misc{kusupati2024matryoshka, title={Matryoshka Representation Learning}, author={Aditya Kusupati and Gantavya Bhatt and Aniket Rege and Matthew Wallingford and Aditya Sinha and Vivek Ramanujan and William Howard-Snyder and Kaifeng Chen and Sham Kakade and Prateek Jain and Ali Farhadi}, year={2024}, eprint={2205.13147}, archivePrefix={arXiv}, primaryClass={cs.LG} } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "BAAI/bge-base-en-v1.5", "datasets": [], "language": ["en"], "library_name": "sentence-transformers", "license": "apache-2.0", "metrics": ["cosine_accuracy@1", "cosine_accuracy@3", "cosine_accuracy@5", "cosine_accuracy@10", "cosine_precision@1", "cosine_precision@3", "cosine_precision@5", "cosine_precision@10", "cosine_recall@1", "cosine_recall@3", "cosine_recall@5", "cosine_recall@10", "cosine_ndcg@10", "cosine_mrr@10", "cosine_map@100"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:6300", "loss:MatryoshkaLoss", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Item 8 in IBM's 2023 Annual Report to Stockholders details the Financial Statements and Supplementary Data, which are included on pages 44 through 121.", "sentences": ["What was the amount gained from the disposal of assets in 2022?", "What section of IBM's Annual Report for 2023 contains the Financial Statements and Supplementary Data?", "What were the cash outflows for capital expenditures in 2023 and 2022 respectively?"]}, {"source_sentence": "For the fiscal year ended March 31, 2023, Electronic Arts reported a gross margin of 75.9 percent, an increase of 2.5 percentage points from the previous year.", "sentences": ["How did investment banking revenues at Goldman Sachs change in 2023 compared to 2022, and what factors contributed to this change?", "What was the gross margin percentage for Electronic Arts in the fiscal year ending March 31, 2023?", "What were the risk-free interest rates for the fiscal years 2021, 2022, and 2023?"]}, {"source_sentence": "Cash, cash equivalents, and restricted cash at the beginning of the period totaled $7,013 for a company.", "sentences": ["What was the amount of cash, cash equivalents, and restricted cash at the beginning of the period for the company?", "What is the impact of the new $1.25 price point on Dollar Tree’s sales units and profitability?", "What was the total amount attributed to Goodwill in the acquisition of Nuance Communications, Inc. as reported by the company?"]}, {"source_sentence": "generate our mall revenue primarily from leases with tenants through base minimum rents, overage rents and reimbursements for common area maintenance (CAM) and other expenditures.", "sentences": ["How does Visa facilitate financial inclusion with their prepaid cards?", "What are the main objectives of the economic sanctions imposed by the United States and other international bodies?", "What revenue sources does Shoppes at Venetian primarily rely on from its tenants?"]}, {"source_sentence": "For the fiscal year ended August 26, 2023, we reported net sales of $17.5 billion compared with $16.3 billion for the year ended August 27, 2022, a 7.4% increase from fiscal 2022. This growth was driven primarily by a domestic same store sales increase of 3.4% and net sales of $327.8 million from new domestic and international stores.", "sentences": ["What drove the 7.4% increase in AutoZone's net sales for fiscal 2023 compared to fiscal 2022?", "What percentage of HP's external U.S. hires in fiscal year 2023 were racially or ethnically diverse?", "How much did GameStop Corp's valuation allowances increase during fiscal 2022?"]}], "model-index": [{"name": "BGE base Financial Matryoshka", "results": [{"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 768", "type": "dim_768"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.6985714285714286, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.8271428571428572, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8628571428571429, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8985714285714286, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.6985714285714286, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2757142857142857, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.17257142857142854, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08985714285714284, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.6985714285714286, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.8271428571428572, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8628571428571429, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8985714285714286, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.8023663256793517, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7712675736961451, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7758522351159084, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 512", "type": "dim_512"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.69, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.8271428571428572, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.86, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.9028571428571428, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.69, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2757142857142857, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.17199999999999996, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.09028571428571427, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.69, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.8271428571428572, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.86, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.9028571428571428, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.7998655910794988, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7665912698412698, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7706925401671437, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 256", "type": "dim_256"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.6957142857142857, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.8228571428571428, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.86, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8914285714285715, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.6957142857142857, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.2742857142857143, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.17199999999999996, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08914285714285713, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.6957142857142857, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.8228571428571428, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.86, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8914285714285715, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.7974564108711016, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7669535147392289, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7718155211819018, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 128", "type": "dim_128"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.6871428571428572, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.8128571428571428, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8457142857142858, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8857142857142857, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.6871428571428572, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.27095238095238094, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.16914285714285712, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08857142857142856, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.6871428571428572, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.8128571428571428, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8457142857142858, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8857142857142857, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.787697533881839, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.756192743764172, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7610331995977764, "name": "Cosine Map@100"}]}, {"task": {"type": "information-retrieval", "name": "Information Retrieval"}, "dataset": {"name": "dim 64", "type": "dim_64"}, "metrics": [{"type": "cosine_accuracy@1", "value": 0.6328571428571429, "name": "Cosine Accuracy@1"}, {"type": "cosine_accuracy@3", "value": 0.7771428571428571, "name": "Cosine Accuracy@3"}, {"type": "cosine_accuracy@5", "value": 0.8171428571428572, "name": "Cosine Accuracy@5"}, {"type": "cosine_accuracy@10", "value": 0.8571428571428571, "name": "Cosine Accuracy@10"}, {"type": "cosine_precision@1", "value": 0.6328571428571429, "name": "Cosine Precision@1"}, {"type": "cosine_precision@3", "value": 0.259047619047619, "name": "Cosine Precision@3"}, {"type": "cosine_precision@5", "value": 0.16342857142857142, "name": "Cosine Precision@5"}, {"type": "cosine_precision@10", "value": 0.08571428571428569, "name": "Cosine Precision@10"}, {"type": "cosine_recall@1", "value": 0.6328571428571429, "name": "Cosine Recall@1"}, {"type": "cosine_recall@3", "value": 0.7771428571428571, "name": "Cosine Recall@3"}, {"type": "cosine_recall@5", "value": 0.8171428571428572, "name": "Cosine Recall@5"}, {"type": "cosine_recall@10", "value": 0.8571428571428571, "name": "Cosine Recall@10"}, {"type": "cosine_ndcg@10", "value": 0.7482728321357093, "name": "Cosine Ndcg@10"}, {"type": "cosine_mrr@10", "value": 0.7131224489795914, "name": "Cosine Mrr@10"}, {"type": "cosine_map@100", "value": 0.7189753431460272, "name": "Cosine Map@100"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,540
gaudi/opus-mt-fi-pis-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-22T15:53:42Z
2024-10-19T03:51:27+00:00
6
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-fi-pis --output_dir ./ctranslate2/opus-mt-fi-pis-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-fi-pis-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-fi-pis-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-fi-pis-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-fi-pis --output_dir ./ctranslate2/opus-mt-fi-pis-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-fi-pis-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-fi-pis-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-fi-pis-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-fi-pis) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
45,541
lddczcn/distilbert-base-uncased-finetuned-emotion
lddczcn
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-09-07T23:39:00Z
2022-09-08T00:29:20+00:00
12
0
--- datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion args: default metrics: - type: accuracy value: 0.9265 name: Accuracy - type: f1 value: 0.9265519473019482 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2155 - Accuracy: 0.9265 - F1: 0.9266 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 250 | 0.3133 | 0.9075 | 0.9054 | | No log | 2.0 | 500 | 0.2155 | 0.9265 | 0.9266 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2155 - Accuracy: 0.9265 - F1: 0.9266 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 250 | 0.3133 | 0.9075 | 0.9054 | | No log | 2.0 | 500 | 0.2155 | 0.9265 | 0.9266 | ### Framework versions - Transformers 4.13.0 - Pytorch 1.12.1+cu113 - Datasets 1.16.1 - Tokenizers 0.10.3
{"datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "default"}, "metrics": [{"type": "accuracy", "value": 0.9265, "name": "Accuracy"}, {"type": "f1", "value": 0.9265519473019482, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,542
HuggingFaceTB/SmolVLM2-2.2B-Instruct
HuggingFaceTB
image-text-to-text
[ "transformers", "safetensors", "smolvlm", "image-text-to-text", "video-text-to-text", "conversational", "en", "dataset:HuggingFaceM4/the_cauldron", "dataset:HuggingFaceM4/Docmatix", "dataset:lmms-lab/LLaVA-OneVision-Data", "dataset:lmms-lab/M4-Instruct-Data", "dataset:HuggingFaceFV/finevideo", "dataset:MAmmoTH-VL/MAmmoTH-VL-Instruct-12M", "dataset:lmms-lab/LLaVA-Video-178K", "dataset:orrzohar/Video-STaR", "dataset:Mutonix/Vript", "dataset:TIGER-Lab/VISTA-400K", "dataset:Enxin/MovieChat-1K_train", "dataset:ShareGPT4Video/ShareGPT4Video", "base_model:HuggingFaceTB/SmolVLM-Instruct", "base_model:finetune:HuggingFaceTB/SmolVLM-Instruct", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2025-02-08T19:18:45Z
2025-04-08T07:25:13+00:00
506,227
112
--- base_model: - HuggingFaceTB/SmolVLM-Instruct datasets: - HuggingFaceM4/the_cauldron - HuggingFaceM4/Docmatix - lmms-lab/LLaVA-OneVision-Data - lmms-lab/M4-Instruct-Data - HuggingFaceFV/finevideo - MAmmoTH-VL/MAmmoTH-VL-Instruct-12M - lmms-lab/LLaVA-Video-178K - orrzohar/Video-STaR - Mutonix/Vript - TIGER-Lab/VISTA-400K - Enxin/MovieChat-1K_train - ShareGPT4Video/ShareGPT4Video language: - en library_name: transformers license: apache-2.0 pipeline_tag: image-text-to-text tags: - video-text-to-text --- <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/SmolVLM2_banner.png" width="800" height="auto" alt="Image description"> # SmolVLM2 2.2B SmolVLM2-2.2B is a lightweight multimodal model designed to analyze video content. The model processes videos, images, and text inputs to generate text outputs - whether answering questions about media files, comparing visual content, or transcribing text from images. Despite its compact size, requiring only 5.2GB of GPU RAM for video inference, it delivers robust performance on complex multimodal tasks. This efficiency makes it particularly well-suited for on-device applications where computational resources may be limited. ## Model Summary - **Developed by:** Hugging Face 🤗 - **Model type:** Multi-modal model (image/multi-image/video/text) - **Language(s) (NLP):** English - **License:** Apache 2.0 - **Architecture:** Based on [Idefics3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) (see technical summary) ## Resources - **Demo:** [Video Highlight Generator](https://huggingface.co/spaces/HuggingFaceTB/SmolVLM2-HighlightGenerator) - **Blog:** [Blog post](https://huggingface.co/blog/smolvlm2) ## Uses SmolVLM2 can be used for inference on multimodal (video / image / text) tasks where the input consists of text queries along with video or one or more images. Text and media files can be interleaved arbitrarily, enabling tasks like captioning, visual question answering, and storytelling based on visual content. The model does not support image or video generation. To fine-tune SmolVLM2 on a specific task, you can follow [the fine-tuning tutorial](https://github.com/huggingface/smollm/blob/main/vision/finetuning/Smol_VLM_FT.ipynb). ## Evaluation ### Vision Evaluation | Model | Mathvista | MMMU | OCRBench | MMStar | AI2D | ChartQA_Test | Science_QA | TextVQA Val | DocVQA Val | |-------------------|-----------|-------|----------|--------|------|--------------|------------|-------------|------------| | **SmolVLM2 2.2B** | 51.5 | 42 | 72.9 | 46 | 70 | 68.84 | 90 | 73.21 | 79.98 | | SmolVLM 2.2B | 43.9 | 38.3 | 65.5 | 41.8 | 84.5 | 71.6 | 84.5 | 72.1 | 79.7 | ### Video Evaluation We evaluated the performance of the SmolVLM2 family on the following scientific benchmarks: | Size | Video-MME | MLVU | MVBench | |----------|-----------------|----------|---------------| | 2.2B | 52.1 | 55.2 | 46.27 | | 500M | 42.2 | 47.3 | 39.73 | | 256M | 33.7 | 40.6 | 32.7 | ### How to get started You can use transformers to load, infer and fine-tune SmolVLM. Make sure you have num2words, flash-attn and latest transformers installed. You can load the model as follows. ```python from transformers import AutoProcessor, AutoModelForImageTextToText import torch model_path = "HuggingFaceTB/SmolVLM2-2.2B-Instruct" processor = AutoProcessor.from_pretrained(model_path) model = AutoModelForImageTextToText.from_pretrained( model_path, torch_dtype=torch.bfloat16, _attn_implementation="flash_attention_2" ).to("cuda") ``` #### Simple Inference You preprocess your inputs directly using chat templates and directly passing them ```python messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"}, {"type": "text", "text": "Can you describe this image?"}, ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64) generated_texts = processor.batch_decode( generated_ids, skip_special_tokens=True, ) print(generated_texts[0]) ``` #### Video Inference To use SmolVLM2 for video inference, make sure you have decord installed. ```python messages = [ { "role": "user", "content": [ {"type": "video", "path": "path_to_video.mp4"}, {"type": "text", "text": "Describe this video in detail"} ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64) generated_texts = processor.batch_decode( generated_ids, skip_special_tokens=True, ) print(generated_texts[0]) ``` #### Multi-image Interleaved Inference You can interleave multiple media with text using chat templates. ```python import torch messages = [ { "role": "user", "content": [ {"type": "text", "text": "What is the similarity between these two images?"}, {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"}, {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"}, ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64) generated_texts = processor.batch_decode( generated_ids, skip_special_tokens=True, ) print(generated_texts[0]) ``` ### Model optimizations ## Misuse and Out-of-scope Use SmolVLM is not intended for high-stakes scenarios or critical decision-making processes that affect an individual's well-being or livelihood. The model may produce content that appears factual but may not be accurate. Misuse includes, but is not limited to: - Prohibited Uses: - Evaluating or scoring individuals (e.g., in employment, education, credit) - Critical automated decision-making - Generating unreliable factual content - Malicious Activities: - Spam generation - Disinformation campaigns - Harassment or abuse - Unauthorized surveillance ### License SmolVLM2 is built upon [the shape-optimized SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384) as image encoder and [SmolLM2](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct) for text decoder part. We release the SmolVLM2 checkpoints under the Apache 2.0 license. ## Citation information You can cite us in the following way: ```bibtex @unpublished{marafioti2025smolvlm, title = {SmolVLM: Redefining small and efficient multimodal models}, author = {Marafioti, Andr\'{e}s and Zohar, Orr and Farr\'{e}, Miquel and Noyan, Merve and Bakouch, Elie and Cuenca, Pedro and Zakka, Cyril and Ben Allal, Loubna and Lozhkov, Anton and Tazi, Nouamane and Srivastav, Vaibhav and Lochner, Joshua and Larcher, Hugo and Morlon, Mathieu and Tunstall, Lewis and von Werra, Leandro and Wolf, Thomas}, year = {2025}, } ``` ## Training Data SmolVLM2 used 3.3M samples for training originally from ten different datasets: [LlaVa Onevision](https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-Data), [M4-Instruct](https://huggingface.co/datasets/lmms-lab/M4-Instruct-Data), [Mammoth](https://huggingface.co/datasets/MAmmoTH-VL/MAmmoTH-VL-Instruct-12M), [LlaVa Video 178K](https://huggingface.co/datasets/lmms-lab/LLaVA-Video-178K), [FineVideo](https://huggingface.co/datasets/HuggingFaceFV/finevideo), [VideoStar](https://huggingface.co/datasets/orrzohar/Video-STaR), [VRipt](https://huggingface.co/datasets/Mutonix/Vript), [Vista-400K](https://huggingface.co/datasets/TIGER-Lab/VISTA-400K), [MovieChat](https://huggingface.co/datasets/Enxin/MovieChat-1K_train) and [ShareGPT4Video](https://huggingface.co/datasets/ShareGPT4Video/ShareGPT4Video). In the following plots we give a general overview of the samples across modalities and the source of those samples. <!-- <center><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolvlm2_data_split.png" width="auto" height="auto" alt="Image description"> </center> ### Details <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolvlm2_datadetails.png" width="auto" height="auto" alt="Image description"> --> ## Data Split per modality | Data Type | Percentage | |--------------|------------| | Image | 34.4% | | Text | 20.2% | | Video | 33.0% | | Multi-image | 12.3% | ## Granular dataset slices per modality ### Text Datasets | Dataset | Percentage | |--------------------------------------------|------------| | llava-onevision/magpie_pro_ft3_80b_mt | 6.8% | | llava-onevision/magpie_pro_ft3_80b_tt | 6.8% | | llava-onevision/magpie_pro_qwen2_72b_tt | 5.8% | | llava-onevision/mathqa | 0.9% | ### Multi-image Datasets | Dataset | Percentage | |--------------------------------------------|------------| | m4-instruct-data/m4_instruct_multiimage | 10.4% | | mammoth/multiimage-cap6 | 1.9% | ### Image Datasets | Dataset | Percentage | |--------------------------------------------|------------| | llava-onevision/other | 17.4% | | llava-onevision/vision_flan | 3.9% | | llava-onevision/mavis_math_metagen | 2.6% | | llava-onevision/mavis_math_rule_geo | 2.5% | | llava-onevision/sharegpt4o | 1.7% | | llava-onevision/sharegpt4v_coco | 1.5% | | llava-onevision/image_textualization | 1.3% | | llava-onevision/sharegpt4v_llava | 0.9% | | llava-onevision/mapqa | 0.9% | | llava-onevision/qa | 0.8% | | llava-onevision/textocr | 0.8% | ### Video Datasets | Dataset | Percentage | |--------------------------------------------|------------| | llava-video-178k/1-2m | 7.3% | | llava-video-178k/2-3m | 7.0% | | other-video/combined | 5.7% | | llava-video-178k/hound | 4.4% | | llava-video-178k/0-30s | 2.4% | | video-star/starb | 2.2% | | vista-400k/combined | 2.2% | | vript/long | 1.0% | | ShareGPT4Video/all | 0.8% |
null
Non_BioNLP
<img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/SmolVLM2_banner.png" width="800" height="auto" alt="Image description"> # SmolVLM2 2.2B SmolVLM2-2.2B is a lightweight multimodal model designed to analyze video content. The model processes videos, images, and text inputs to generate text outputs - whether answering questions about media files, comparing visual content, or transcribing text from images. Despite its compact size, requiring only 5.2GB of GPU RAM for video inference, it delivers robust performance on complex multimodal tasks. This efficiency makes it particularly well-suited for on-device applications where computational resources may be limited. ## Model Summary - **Developed by:** Hugging Face 🤗 - **Model type:** Multi-modal model (image/multi-image/video/text) - **Language(s) (NLP):** English - **License:** Apache 2.0 - **Architecture:** Based on [Idefics3](https://huggingface.co/HuggingFaceM4/Idefics3-8B-Llama3) (see technical summary) ## Resources - **Demo:** [Video Highlight Generator](https://huggingface.co/spaces/HuggingFaceTB/SmolVLM2-HighlightGenerator) - **Blog:** [Blog post](https://huggingface.co/blog/smolvlm2) ## Uses SmolVLM2 can be used for inference on multimodal (video / image / text) tasks where the input consists of text queries along with video or one or more images. Text and media files can be interleaved arbitrarily, enabling tasks like captioning, visual question answering, and storytelling based on visual content. The model does not support image or video generation. To fine-tune SmolVLM2 on a specific task, you can follow [the fine-tuning tutorial](https://github.com/huggingface/smollm/blob/main/vision/finetuning/Smol_VLM_FT.ipynb). ## Evaluation ### Vision Evaluation | Model | Mathvista | MMMU | OCRBench | MMStar | AI2D | ChartQA_Test | Science_QA | TextVQA Val | DocVQA Val | |-------------------|-----------|-------|----------|--------|------|--------------|------------|-------------|------------| | **SmolVLM2 2.2B** | 51.5 | 42 | 72.9 | 46 | 70 | 68.84 | 90 | 73.21 | 79.98 | | SmolVLM 2.2B | 43.9 | 38.3 | 65.5 | 41.8 | 84.5 | 71.6 | 84.5 | 72.1 | 79.7 | ### Video Evaluation We evaluated the performance of the SmolVLM2 family on the following scientific benchmarks: | Size | Video-MME | MLVU | MVBench | |----------|-----------------|----------|---------------| | 2.2B | 52.1 | 55.2 | 46.27 | | 500M | 42.2 | 47.3 | 39.73 | | 256M | 33.7 | 40.6 | 32.7 | ### How to get started You can use transformers to load, infer and fine-tune SmolVLM. Make sure you have num2words, flash-attn and latest transformers installed. You can load the model as follows. ```python from transformers import AutoProcessor, AutoModelForImageTextToText import torch model_path = "HuggingFaceTB/SmolVLM2-2.2B-Instruct" processor = AutoProcessor.from_pretrained(model_path) model = AutoModelForImageTextToText.from_pretrained( model_path, torch_dtype=torch.bfloat16, _attn_implementation="flash_attention_2" ).to("cuda") ``` #### Simple Inference You preprocess your inputs directly using chat templates and directly passing them ```python messages = [ { "role": "user", "content": [ {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"}, {"type": "text", "text": "Can you describe this image?"}, ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64) generated_texts = processor.batch_decode( generated_ids, skip_special_tokens=True, ) print(generated_texts[0]) ``` #### Video Inference To use SmolVLM2 for video inference, make sure you have decord installed. ```python messages = [ { "role": "user", "content": [ {"type": "video", "path": "path_to_video.mp4"}, {"type": "text", "text": "Describe this video in detail"} ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64) generated_texts = processor.batch_decode( generated_ids, skip_special_tokens=True, ) print(generated_texts[0]) ``` #### Multi-image Interleaved Inference You can interleave multiple media with text using chat templates. ```python import torch messages = [ { "role": "user", "content": [ {"type": "text", "text": "What is the similarity between these two images?"}, {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg"}, {"type": "image", "url": "https://huggingface.co/datasets/huggingface/documentation-images/resolve/0052a70beed5bf71b92610a43a52df6d286cd5f3/diffusers/rabbit.jpg"}, ] }, ] inputs = processor.apply_chat_template( messages, add_generation_prompt=True, tokenize=True, return_dict=True, return_tensors="pt", ).to(model.device, dtype=torch.bfloat16) generated_ids = model.generate(**inputs, do_sample=False, max_new_tokens=64) generated_texts = processor.batch_decode( generated_ids, skip_special_tokens=True, ) print(generated_texts[0]) ``` ### Model optimizations ## Misuse and Out-of-scope Use SmolVLM is not intended for high-stakes scenarios or critical decision-making processes that affect an individual's well-being or livelihood. The model may produce content that appears factual but may not be accurate. Misuse includes, but is not limited to: - Prohibited Uses: - Evaluating or scoring individuals (e.g., in employment, education, credit) - Critical automated decision-making - Generating unreliable factual content - Malicious Activities: - Spam generation - Disinformation campaigns - Harassment or abuse - Unauthorized surveillance ### License SmolVLM2 is built upon [the shape-optimized SigLIP](https://huggingface.co/google/siglip-so400m-patch14-384) as image encoder and [SmolLM2](https://huggingface.co/HuggingFaceTB/SmolLM2-1.7B-Instruct) for text decoder part. We release the SmolVLM2 checkpoints under the Apache 2.0 license. ## Citation information You can cite us in the following way: ```bibtex @article{marafioti2025smolvlm, title={SmolVLM: Redefining small and efficient multimodal models}, author={Andrés Marafioti and Orr Zohar and Miquel Farré and Merve Noyan and Elie Bakouch and Pedro Cuenca and Cyril Zakka and Loubna Ben Allal and Anton Lozhkov and Nouamane Tazi and Vaibhav Srivastav and Joshua Lochner and Hugo Larcher and Mathieu Morlon and Lewis Tunstall and Leandro von Werra and Thomas Wolf}, journal={arXiv preprint arXiv:2504.05299}, year={2025} } ``` ## Training Data SmolVLM2 used 3.3M samples for training originally from ten different datasets: [LlaVa Onevision](https://huggingface.co/datasets/lmms-lab/LLaVA-OneVision-Data), [M4-Instruct](https://huggingface.co/datasets/lmms-lab/M4-Instruct-Data), [Mammoth](https://huggingface.co/datasets/MAmmoTH-VL/MAmmoTH-VL-Instruct-12M), [LlaVa Video 178K](https://huggingface.co/datasets/lmms-lab/LLaVA-Video-178K), [FineVideo](https://huggingface.co/datasets/HuggingFaceFV/finevideo), [VideoStar](https://huggingface.co/datasets/orrzohar/Video-STaR), [VRipt](https://huggingface.co/datasets/Mutonix/Vript), [Vista-400K](https://huggingface.co/datasets/TIGER-Lab/VISTA-400K), [MovieChat](https://huggingface.co/datasets/Enxin/MovieChat-1K_train) and [ShareGPT4Video](https://huggingface.co/datasets/ShareGPT4Video/ShareGPT4Video). In the following plots we give a general overview of the samples across modalities and the source of those samples. <!-- <center><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolvlm2_data_split.png" width="auto" height="auto" alt="Image description"> </center> ### Details <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/smolvlm2_datadetails.png" width="auto" height="auto" alt="Image description"> --> ## Data Split per modality | Data Type | Percentage | |--------------|------------| | Image | 34.4% | | Text | 20.2% | | Video | 33.0% | | Multi-image | 12.3% | ## Granular dataset slices per modality ### Text Datasets | Dataset | Percentage | |--------------------------------------------|------------| | llava-onevision/magpie_pro_ft3_80b_mt | 6.8% | | llava-onevision/magpie_pro_ft3_80b_tt | 6.8% | | llava-onevision/magpie_pro_qwen2_72b_tt | 5.8% | | llava-onevision/mathqa | 0.9% | ### Multi-image Datasets | Dataset | Percentage | |--------------------------------------------|------------| | m4-instruct-data/m4_instruct_multiimage | 10.4% | | mammoth/multiimage-cap6 | 1.9% | ### Image Datasets | Dataset | Percentage | |--------------------------------------------|------------| | llava-onevision/other | 17.4% | | llava-onevision/vision_flan | 3.9% | | llava-onevision/mavis_math_metagen | 2.6% | | llava-onevision/mavis_math_rule_geo | 2.5% | | llava-onevision/sharegpt4o | 1.7% | | llava-onevision/sharegpt4v_coco | 1.5% | | llava-onevision/image_textualization | 1.3% | | llava-onevision/sharegpt4v_llava | 0.9% | | llava-onevision/mapqa | 0.9% | | llava-onevision/qa | 0.8% | | llava-onevision/textocr | 0.8% | ### Video Datasets | Dataset | Percentage | |--------------------------------------------|------------| | llava-video-178k/1-2m | 7.3% | | llava-video-178k/2-3m | 7.0% | | other-video/combined | 5.7% | | llava-video-178k/hound | 4.4% | | llava-video-178k/0-30s | 2.4% | | video-star/starb | 2.2% | | vista-400k/combined | 2.2% | | vript/long | 1.0% | | ShareGPT4Video/all | 0.8% |
{"base_model": ["HuggingFaceTB/SmolVLM-Instruct"], "datasets": ["HuggingFaceM4/the_cauldron", "HuggingFaceM4/Docmatix", "lmms-lab/LLaVA-OneVision-Data", "lmms-lab/M4-Instruct-Data", "HuggingFaceFV/finevideo", "MAmmoTH-VL/MAmmoTH-VL-Instruct-12M", "lmms-lab/LLaVA-Video-178K", "orrzohar/Video-STaR", "Mutonix/Vript", "TIGER-Lab/VISTA-400K", "Enxin/MovieChat-1K_train", "ShareGPT4Video/ShareGPT4Video"], "language": ["en"], "library_name": "transformers", "license": "apache-2.0", "pipeline_tag": "image-text-to-text", "tags": ["video-text-to-text"]}
task
[ "QUESTION_ANSWERING" ]
45,543
learningmachineaz/mt5-enaz-10m
learningmachineaz
translation
[ "transformers", "pytorch", "mt5", "text2text-generation", "machine-translation", "english", "azerbaijani", "translation", "en", "az", "dataset:learningmachineaz/translate_enaz_10m", "license:cc-by-nc-sa-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-04-06T06:42:36Z
2023-04-07T16:05:23+00:00
19
1
--- datasets: - learningmachineaz/translate_enaz_10m language: - en - az license: cc-by-nc-sa-4.0 pipeline_tag: translation tags: - machine-translation - mt5 - english - azerbaijani widget: - text: Artificial intelligence is already superior to human learning in numerous domains. - text: Learn as if you will live forever, live like you will die tomorrow. - text: When you change your thoughts, remember to also change your world. inference: parameters: max_length: 128 num_return_sequences: 1 do_sample: false --- # Machine Translation (Maşın tərcüməsi) This is the most advanced and accurate mT5 based model for machine translation available as for Azerbaijani language.\ The model was trained on 10 million sentences extracted from various text sources of Azerbaijan National Library.\ Quality of translation is very close to Google Translate as it was used for English translations. ## Text above translated using this model ``` Bu, Azərbaycan dilinə olduğu kimi, maşın tərcüməsi üçün ən qabaqcıl və dəqiq mT5 əsaslı modeldir. Model Azərbaycan Milli Kitabxanasının müxtəlif mətn mənbələrindən çıxarılan 10 milyon cümlə üzrə təlim keçib. Tərcümə keyfiyyəti ingilis dilinə tərcümələr üçün istifadə olunduğundan Google Tərcümə ilə çox yaxındır. ``` ## Training | Key point | Info | |-------------------------|---------| | Base model | mT5-base | | Batch size | 16 | | Epochs | 10 | | Steps | 620k | | Training Loss | 0.56 | | Eval Loss | 0.53 | | Training Duration | 2 days | ## Here is an example of how you can run inference: ```python from transformers import MT5Tokenizer, MT5ForConditionalGeneration model_name = 'learningmachineaz/mt5-enaz-10m' max_length = 128 tokenizer = MT5Tokenizer.from_pretrained(model_name) model = MT5ForConditionalGeneration.from_pretrained(model_name) text = "Artificial intelligence is already superior to human learning in numerous domains." input_ids = tokenizer(f'translate English to Azerbaijani: {text}', return_tensors="pt").input_ids # OPTION 1 - SINGLE TRANSLATION outputs = model.generate(input_ids, max_length=max_length, do_sample=False, num_return_sequences=1) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # OPTION 2 - MULTIPLE VARIATIONS outputs = model.generate(input_ids, max_length=max_length, do_sample=True, top_k=10, num_return_sequences=3) for i, output in enumerate(outputs): print(tokenizer.decode(output, skip_special_tokens=True)) ``` OPTION 1 - OUTPUT: ``` Süni intellekt artıq çoxsaylı domenlərdə insanın öyrənilməsindən üstünlük təşkil edir. ``` OPTION 2 - OUTPUT: ``` Artıq çoxsaylı domenlərdə süni zəka insanın öyrənilməsindən daha üstün olması şərti ilə müşahidə edilir. Süni intellekt artıq çoxsaylı oblastlarda insanın təlimindən yüksəkdir. Süni intellekt artıq çoxsaylı domenlərdə insan öyrənməsindən daha üstün gəlir. ``` ## Author Trained and evaluated by [Renat Kalimulin](https://www.linkedin.com/in/rinat-kalimulin-16853358/)
null
Non_BioNLP
# Machine Translation (Maşın tərcüməsi) This is the most advanced and accurate mT5 based model for machine translation available as for Azerbaijani language.\ The model was trained on 10 million sentences extracted from various text sources of Azerbaijan National Library.\ Quality of translation is very close to Google Translate as it was used for English translations. ## Text above translated using this model ``` Bu, Azərbaycan dilinə olduğu kimi, maşın tərcüməsi üçün ən qabaqcıl və dəqiq mT5 əsaslı modeldir. Model Azərbaycan Milli Kitabxanasının müxtəlif mətn mənbələrindən çıxarılan 10 milyon cümlə üzrə təlim keçib. Tərcümə keyfiyyəti ingilis dilinə tərcümələr üçün istifadə olunduğundan Google Tərcümə ilə çox yaxındır. ``` ## Training | Key point | Info | |-------------------------|---------| | Base model | mT5-base | | Batch size | 16 | | Epochs | 10 | | Steps | 620k | | Training Loss | 0.56 | | Eval Loss | 0.53 | | Training Duration | 2 days | ## Here is an example of how you can run inference: ```python from transformers import MT5Tokenizer, MT5ForConditionalGeneration model_name = 'learningmachineaz/mt5-enaz-10m' max_length = 128 tokenizer = MT5Tokenizer.from_pretrained(model_name) model = MT5ForConditionalGeneration.from_pretrained(model_name) text = "Artificial intelligence is already superior to human learning in numerous domains." input_ids = tokenizer(f'translate English to Azerbaijani: {text}', return_tensors="pt").input_ids # OPTION 1 - SINGLE TRANSLATION outputs = model.generate(input_ids, max_length=max_length, do_sample=False, num_return_sequences=1) print(tokenizer.decode(outputs[0], skip_special_tokens=True)) # OPTION 2 - MULTIPLE VARIATIONS outputs = model.generate(input_ids, max_length=max_length, do_sample=True, top_k=10, num_return_sequences=3) for i, output in enumerate(outputs): print(tokenizer.decode(output, skip_special_tokens=True)) ``` OPTION 1 - OUTPUT: ``` Süni intellekt artıq çoxsaylı domenlərdə insanın öyrənilməsindən üstünlük təşkil edir. ``` OPTION 2 - OUTPUT: ``` Artıq çoxsaylı domenlərdə süni zəka insanın öyrənilməsindən daha üstün olması şərti ilə müşahidə edilir. Süni intellekt artıq çoxsaylı oblastlarda insanın təlimindən yüksəkdir. Süni intellekt artıq çoxsaylı domenlərdə insan öyrənməsindən daha üstün gəlir. ``` ## Author Trained and evaluated by [Renat Kalimulin](https://www.linkedin.com/in/rinat-kalimulin-16853358/)
{"datasets": ["learningmachineaz/translate_enaz_10m"], "language": ["en", "az"], "license": "cc-by-nc-sa-4.0", "pipeline_tag": "translation", "tags": ["machine-translation", "mt5", "english", "azerbaijani"], "widget": [{"text": "Artificial intelligence is already superior to human learning in numerous domains."}, {"text": "Learn as if you will live forever, live like you will die tomorrow."}, {"text": "When you change your thoughts, remember to also change your world."}], "inference": {"parameters": {"max_length": 128, "num_return_sequences": 1, "do_sample": false}}}
task
[ "TRANSLATION" ]
45,545
mbazaNLP/Nllb_finetuned_tourism_en_kin
mbazaNLP
text2text-generation
[ "transformers", "pytorch", "m2m_100", "text2text-generation", "en", "rw", "dataset:mbazaNLP/NMT_Tourism_parallel_data_en_kin", "dataset:mbazaNLP/NMT_Education_parallel_data_en_kin", "dataset:mbazaNLP/Kinyarwanda_English_parallel_dataset", "license:cc-by-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-30T23:59:40Z
2023-09-22T14:13:33+00:00
0
0
--- datasets: - mbazaNLP/NMT_Tourism_parallel_data_en_kin - mbazaNLP/NMT_Education_parallel_data_en_kin - mbazaNLP/Kinyarwanda_English_parallel_dataset language: - en - rw library_name: transformers license: cc-by-2.0 --- ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is a Machine Translation model, finetuned from [NLLB](https://huggingface.co/facebook/nllb-200-distilled-1.3B)-200's distilled 1.3B model, it is meant to be used in machine translation for tourism-related data. - **Finetuning code repository:** the code used to finetune this model can be found [here](https://github.com/Digital-Umuganda/twb_nllb_finetuning) <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ## How to Get Started with the Model Use the code below to get started with the model. ### Training Procedure The model was finetuned on three datasets; a [general](https://huggingface.co/datasets/mbazaNLP/Kinyarwanda_English_parallel_dataset) purpose dataset, a [tourism](https://huggingface.co/datasets/mbazaNLP/NMT_Tourism_parallel_data_en_kin), and an [education](https://huggingface.co/datasets/mbazaNLP/NMT_Education_parallel_data_en_kin) dataset. The model was finetuned in two phases. #### Phase one: - General purpose dataset - Education dataset - Tourism dataset #### Phase two: - Tourism dataset Other than the dataset changes between phase one, and phase two finetuning; no other hyperparameters were modified. In both cases, the model was trained on an A100 40GB GPU for two epochs. ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> <!-- This should link to a Data Card if possible. --> #### Metrics Model performance was measured using BLEU, spBLEU, TER, and chrF++ metrics. ### Results |Lang. Direction| BLEU | spBLEU | chrf++ |TER | |:----|:----:|:----:|:----:|----:| | Eng -> Kin | 28.37 | 40.62 | 56.48 | 59.71 | | Kin -> Eng | 42.54 | 44.84 | 61.54 | 43.87 | <!-- [More Information Needed] -->
null
Non_BioNLP
## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is a Machine Translation model, finetuned from [NLLB](https://huggingface.co/facebook/nllb-200-distilled-1.3B)-200's distilled 1.3B model, it is meant to be used in machine translation for tourism-related data. - **Finetuning code repository:** the code used to finetune this model can be found [here](https://github.com/Digital-Umuganda/twb_nllb_finetuning) <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> ## How to Get Started with the Model Use the code below to get started with the model. ### Training Procedure The model was finetuned on three datasets; a [general](https://huggingface.co/datasets/mbazaNLP/Kinyarwanda_English_parallel_dataset) purpose dataset, a [tourism](https://huggingface.co/datasets/mbazaNLP/NMT_Tourism_parallel_data_en_kin), and an [education](https://huggingface.co/datasets/mbazaNLP/NMT_Education_parallel_data_en_kin) dataset. The model was finetuned in two phases. #### Phase one: - General purpose dataset - Education dataset - Tourism dataset #### Phase two: - Tourism dataset Other than the dataset changes between phase one, and phase two finetuning; no other hyperparameters were modified. In both cases, the model was trained on an A100 40GB GPU for two epochs. ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> <!-- This should link to a Data Card if possible. --> #### Metrics Model performance was measured using BLEU, spBLEU, TER, and chrF++ metrics. ### Results |Lang. Direction| BLEU | spBLEU | chrf++ |TER | |:----|:----:|:----:|:----:|----:| | Eng -> Kin | 28.37 | 40.62 | 56.48 | 59.71 | | Kin -> Eng | 42.54 | 44.84 | 61.54 | 43.87 | <!-- [More Information Needed] -->
{"datasets": ["mbazaNLP/NMT_Tourism_parallel_data_en_kin", "mbazaNLP/NMT_Education_parallel_data_en_kin", "mbazaNLP/Kinyarwanda_English_parallel_dataset"], "language": ["en", "rw"], "library_name": "transformers", "license": "cc-by-2.0"}
task
[ "TRANSLATION" ]
45,546
giuid/flan_t5_large_summarization_v2
giuid
text2text-generation
[ "safetensors", "t5", "summarization", "flan-t5", "legal", "food", "text2text-generation", "en", "dataset:efra", "license:apache-2.0", "region:us" ]
2025-01-16T14:02:14Z
2025-01-16T14:08:12+00:00
13
0
--- datasets: - efra language: en license: apache-2.0 pipeline_tag: text2text-generation tags: - summarization - flan-t5 - legal - food model_type: t5 --- # Flan-T5 Large Fine-Tuned on EFRA Dataset This is a fine-tuned version of [Flan-T5 Large](https://huggingface.co/google/flan-t5-large) on the **EFRA dataset** for summarizing legal documents related to food regulations and policies. ## Model Description Flan-T5 is a sequence-to-sequence model trained for text-to-text tasks. This fine-tuned version is specifically optimized for summarizing legal text in the domain of food legislation, regulatory requirements, and compliance documents. ### Fine-Tuning Details - **Base Model**: [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) - **Dataset**: EFRA (a curated dataset of legal documents in the food domain) - **Objective**: Summarization of legal documents - **Framework**: Hugging Face Transformers ## Applications This model is suitable for: - Summarizing legal texts in the food domain - Extracting key information from lengthy regulatory documents - Assisting legal professionals and food companies in understanding compliance requirements ## Example Usage ```python from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Load the model and tokenizer model = AutoModelForSeq2SeqLM.from_pretrained("giuid/flan_t5_large_summarization_v2") tokenizer = AutoTokenizer.from_pretrained("giuid/flan_t5_large_summarization_v2") # Input text input_text = "Your lengthy legal document text here..." # Tokenize and generate summary inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True) outputs = model.generate(inputs.input_ids, max_length=150, num_beams=5, early_stopping=True) # Decode summary summary = tokenizer.decode(outputs[0], skip_special_tokens=True) print(summary)
null
Non_BioNLP
# Flan-T5 Large Fine-Tuned on EFRA Dataset This is a fine-tuned version of [Flan-T5 Large](https://huggingface.co/google/flan-t5-large) on the **EFRA dataset** for summarizing legal documents related to food regulations and policies. ## Model Description Flan-T5 is a sequence-to-sequence model trained for text-to-text tasks. This fine-tuned version is specifically optimized for summarizing legal text in the domain of food legislation, regulatory requirements, and compliance documents. ### Fine-Tuning Details - **Base Model**: [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) - **Dataset**: EFRA (a curated dataset of legal documents in the food domain) - **Objective**: Summarization of legal documents - **Framework**: Hugging Face Transformers ## Applications This model is suitable for: - Summarizing legal texts in the food domain - Extracting key information from lengthy regulatory documents - Assisting legal professionals and food companies in understanding compliance requirements ## Example Usage ```python from transformers import AutoModelForSeq2SeqLM, AutoTokenizer # Load the model and tokenizer model = AutoModelForSeq2SeqLM.from_pretrained("giuid/flan_t5_large_summarization_v2") tokenizer = AutoTokenizer.from_pretrained("giuid/flan_t5_large_summarization_v2") # Input text input_text = "Your lengthy legal document text here..." # Tokenize and generate summary inputs = tokenizer(input_text, return_tensors="pt", max_length=512, truncation=True) outputs = model.generate(inputs.input_ids, max_length=150, num_beams=5, early_stopping=True) # Decode summary summary = tokenizer.decode(outputs[0], skip_special_tokens=True) print(summary)
{"datasets": ["efra"], "language": "en", "license": "apache-2.0", "pipeline_tag": "text2text-generation", "tags": ["summarization", "flan-t5", "legal", "food"], "model_type": "t5"}
task
[ "SUMMARIZATION" ]
45,547
datasocietyco/bge-base-en-v1.5-course-recommender-v4python
datasocietyco
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:48", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:BAAI/bge-base-en-v1.5", "base_model:finetune:BAAI/bge-base-en-v1.5", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-12-16T11:25:06Z
2024-12-16T11:26:51+00:00
9
0
--- base_model: BAAI/bge-base-en-v1.5 library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:48 - loss:MultipleNegativesRankingLoss widget: - source_sentence: 'Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\nthe training time required for data-intensive applications.' sentences: - 'Course Name:Hypothesis Testing in Python|Course Description:In this course, learners with foundational knowledge of statistical concepts will dive deeper into hypothesis testing by focusing on three standard tests of statistical significance: t-tests, F-tests, and chi-squared tests. Covering topics such as t-value, t-distribution, chi-square distribution, F-statistic, and F-distribution, this course will familiarize learners with techniques that will enable them to assess normality of data and goodness-of-fit and to compare observed and expected frequencies objectively.|Tags:f-distribution, chi-square distribution, f-statistic, t-distribution, t-value|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Foundations of Statistics in Python' - 'Course Name:Foundations of Data & AI Literacy for Managers|Course Description:Designed for managers leading teams and projects, this course empowers individuals to build data-driven organizations and integrate AI tools into daily operations. Learners will gain a foundational understanding of data and AI concepts and learn how to leverage them for actionable business insights. Managers will develop the skills to increase collaboration with technical experts and make informed decisions about analysis methods, ensuring their enterprise thrives in today’s data-driven landscape.|Tags:Designed, managers, leading, teams, projects,, course, empowers, individuals, build, data-driven, organizations, integrate, AI, tools, into, daily, operations., Learners, will, gain, foundational, understanding, data, AI, concepts, learn, how, leverage, them, actionable, business, insights., Managers, will, develop, skills, increase, collaboration, technical, experts, make, informed, decisions, about, analysis, methods,, ensuring, their, enterprise, thrives, today’s, data-driven, landscape.|Course language: None|Target Audience:No target audience|No prerequisite course required' - 'Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\nthe training time required for data-intensive applications|No prerequisite course required' - source_sentence: 'Data Visualization Design & Storytelling. This course focuses on the fundamentals of data visualization, which helps support data-driven decision-making and to create a data-driven culture.. tags: data driven culture, data analytics, data literacy, data quality, storytelling, data science. Languages: Course language: TBD. Prerequisites: No prerequisite course required. Target audience: Professionals who would like to understand more about how to visualize data, design and concepts of storytelling through data..' sentences: - 'Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you''ll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society''s instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required' - 'Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression' - 'Course Name:Data Visualization Design & Storytelling|Course Description:This course focuses on the fundamentals of data visualization, which helps support data-driven decision-making and to create a data-driven culture.|Tags:data driven culture, data analytics, data literacy, data quality, storytelling, data science|Course language: TBD|Target Audience:Professionals who would like to understand more about how to visualize data, design and concepts of storytelling through data.|No prerequisite course required' - source_sentence: 'Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes'' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..' sentences: - 'Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes'' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python' - 'Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required' - 'Course Name:Data Science for Managers|Course Description:This course is designed for managers seeking to bolster their data literacy with a deep dive into data science tools and teams, project life cycles, and methods.|Tags:data driven culture, data analytics, data quality, storytelling, data science|Course language: TBD|Target Audience:This course is targeted for those who would like to understand more about data literacy, make more informed decisions and identify data-driven solutions through data science tools and methods.|No prerequisite course required' --- # SentenceTransformer based on BAAI/bge-base-en-v1.5 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) <!-- at revision a5beb1e3e68b9ab74eb54cfd186867f64f240e1a --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("datasocietyco/bge-base-en-v1.5-course-recommender-v4python") # Run inference sentences = [ "Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..", "Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python", 'Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 48 training samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 48 samples: | | anchor | positive | |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 49 tokens</li><li>mean: 188.12 tokens</li><li>max: 322 tokens</li></ul> | <ul><li>min: 47 tokens</li><li>mean: 186.12 tokens</li><li>max: 320 tokens</li></ul> | * Samples: | anchor | positive | |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Outlier Detection with DBSCAN in Python. Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.. tags: outlier, SMOTE, anomaly, DBSCAN. Languages: Course language: Python. Prerequisites: Prerequisite course required: Intro to Clustering. Target audience: Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques.</code> | <code>Course Name:Outlier Detection with DBSCAN in Python|Course Description:Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.|Tags:outlier, SMOTE, anomaly, DBSCAN|Course language: Python|Target Audience:Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques|Prerequisite course required: Intro to Clustering</code> | | <code>Foundations of Python. This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.. tags: functions, basics, data-structures, control-flow. Languages: Course language: Python. Prerequisites: Prerequisite course required: Version Control with Git. Target audience: This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries..</code> | <code>Course Name:Foundations of Python|Course Description:This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.|Tags:functions, basics, data-structures, control-flow|Course language: Python|Target Audience:This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries.|Prerequisite course required: Version Control with Git</code> | | <code>Text Generation with LLMs in Python. This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.. tags: course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.. Languages: Course language: None. Prerequisites: No prerequisite course required. Target audience: No target audience.</code> | <code>Course Name:Text Generation with LLMs in Python|Course Description:This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.|Tags:course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.|Course language: None|Target Audience:No target audience|No prerequisite course required</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 12 evaluation samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 12 samples: | | anchor | positive | |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 46 tokens</li><li>mean: 162.92 tokens</li><li>max: 363 tokens</li></ul> | <ul><li>min: 44 tokens</li><li>mean: 160.92 tokens</li><li>max: 361 tokens</li></ul> | * Samples: | anchor | positive | |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\nthe training time required for data-intensive applications.</code> | <code>Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\nthe training time required for data-intensive applications|No prerequisite course required</code> | | <code>Building Transformer-Based NLP Applications (NVIDIA). Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.. tags: named-entity recognition, text, Natural language processing, classification, NLP, NER. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing.</code> | <code>Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required</code> | | <code>Nonlinear Regression in Python. In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.. tags: nonlinear, regression. Languages: Course language: Python. Prerequisites: Prerequisite course required: Multiple Linear Regression. Target audience: This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable..</code> | <code>Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `learning_rate`: 3e-06 - `max_steps`: 24 - `warmup_ratio`: 0.1 - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 3e-06 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 3.0 - `max_steps`: 24 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | loss | |:------:|:----:|:-------------:|:------:| | 6.6667 | 20 | 0.046 | 0.0188 | ### Framework Versions - Python: 3.9.13 - Sentence Transformers: 3.1.1 - Transformers: 4.45.1 - PyTorch: 2.2.2 - Accelerate: 0.34.2 - Datasets: 3.0.0 - Tokenizers: 0.20.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on BAAI/bge-base-en-v1.5 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) <!-- at revision a5beb1e3e68b9ab74eb54cfd186867f64f240e1a --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': True}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("datasocietyco/bge-base-en-v1.5-course-recommender-v4python") # Run inference sentences = [ "Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..", "Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python", 'Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 48 training samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 48 samples: | | anchor | positive | |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 49 tokens</li><li>mean: 188.12 tokens</li><li>max: 322 tokens</li></ul> | <ul><li>min: 47 tokens</li><li>mean: 186.12 tokens</li><li>max: 320 tokens</li></ul> | * Samples: | anchor | positive | |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Outlier Detection with DBSCAN in Python. Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.. tags: outlier, SMOTE, anomaly, DBSCAN. Languages: Course language: Python. Prerequisites: Prerequisite course required: Intro to Clustering. Target audience: Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques.</code> | <code>Course Name:Outlier Detection with DBSCAN in Python|Course Description:Density-Based Spatial Clustering of Applications with Noise, or DBSCAN, contrasts groups of densely-packed data with points isolated in low-density regions. In this course, learners will discuss the optimal data conditions suited to this method of outlier detection. After discussing different basic varieties of anomaly detection, learners will implement DBSCAN to identify likely outliers. They will also use a balancing method called Synthetic Minority Oversampling Technique, or SMOTE, to generate additional examples of outliers and improve the anomaly detection model.|Tags:outlier, SMOTE, anomaly, DBSCAN|Course language: Python|Target Audience:Professionals with some Python experience who would like to expand their skills to learn about various outlier detection techniques|Prerequisite course required: Intro to Clustering</code> | | <code>Foundations of Python. This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.. tags: functions, basics, data-structures, control-flow. Languages: Course language: Python. Prerequisites: Prerequisite course required: Version Control with Git. Target audience: This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries..</code> | <code>Course Name:Foundations of Python|Course Description:This course introduces learners to the fundamentals of the Python programming language. Python is one of the most widely used computer languages in the world, helpful for building web-based applications, performing data analysis, and automating tasks. By the end of this course, learners will identify how data scientists use Python, distinguish among basic data types and data structures, and perform simple arithmetic and variable-related tasks.|Tags:functions, basics, data-structures, control-flow|Course language: Python|Target Audience:This is an introductory level course for data scientists who want to learn basics of Python and implement different data manipulation techniques using popular data wrangling Python libraries.|Prerequisite course required: Version Control with Git</code> | | <code>Text Generation with LLMs in Python. This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.. tags: course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.. Languages: Course language: None. Prerequisites: No prerequisite course required. Target audience: No target audience.</code> | <code>Course Name:Text Generation with LLMs in Python|Course Description:This course provides a practical introduction to the latest advancements in generative AI with a focus on text. To start, the course explores the use of reinforcement learning in natural language processing (NLP). Learners will delve into approaches for conversational and question-answering (QA) tasks, highlighting the capabilities, limitations, and use cases of models available in the Hugging Face library, such as Dolly v2. Finally, learners will gain hands-on experience in creating their own chatbot by using the concepts of Retrieval Augmented Generation (RAG) in LlamaIndex.|Tags:course, provides, practical, introduction, latest, advancements, generative, AI, focus, text., start,, course, explores, use, reinforcement, learning, natural, language, processing, (NLP)., Learners, will, delve, into, approaches, conversational, question-answering, (QA), tasks,, highlighting, capabilities,, limitations,, use, cases, models, available, Hugging, Face, library,, such, as, Dolly, v2., Finally,, learners, will, gain, hands-on, experience, creating, their, own, chatbot, using, concepts, Retrieval, Augmented, Generation, (RAG), LlamaIndex.|Course language: None|Target Audience:No target audience|No prerequisite course required</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 12 evaluation samples * Columns: <code>anchor</code> and <code>positive</code> * Approximate statistics based on the first 12 samples: | | anchor | positive | |:--------|:-------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 46 tokens</li><li>mean: 162.92 tokens</li><li>max: 363 tokens</li></ul> | <ul><li>min: 44 tokens</li><li>mean: 160.92 tokens</li><li>max: 361 tokens</li></ul> | * Samples: | anchor | positive | |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\nthe training time required for data-intensive applications.</code> | <code>Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\nthe training time required for data-intensive applications|No prerequisite course required</code> | | <code>Building Transformer-Based NLP Applications (NVIDIA). Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.. tags: named-entity recognition, text, Natural language processing, classification, NLP, NER. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing.</code> | <code>Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required</code> | | <code>Nonlinear Regression in Python. In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.. tags: nonlinear, regression. Languages: Course language: Python. Prerequisites: Prerequisite course required: Multiple Linear Regression. Target audience: This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable..</code> | <code>Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression</code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `learning_rate`: 3e-06 - `max_steps`: 24 - `warmup_ratio`: 0.1 - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 3e-06 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 3.0 - `max_steps`: 24 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | loss | |:------:|:----:|:-------------:|:------:| | 6.6667 | 20 | 0.046 | 0.0188 | ### Framework Versions - Python: 3.9.13 - Sentence Transformers: 3.1.1 - Transformers: 4.45.1 - PyTorch: 2.2.2 - Accelerate: 0.34.2 - Datasets: 3.0.0 - Tokenizers: 0.20.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "BAAI/bge-base-en-v1.5", "library_name": "sentence-transformers", "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:48", "loss:MultipleNegativesRankingLoss"], "widget": [{"source_sentence": "Fundamentals of Deep Learning for Multi GPUs. Find out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining of deep neural networks using TensorFlow.. tags: multiple GPUs, neural networks, TensorFlow, parallelize. Languages: Course language: Python. Prerequisites: No prerequisite course required. Target audience: Professionals want to train deep neural networks on multi-GPU technology to shorten\\nthe training time required for data-intensive applications.", "sentences": ["Course Name:Hypothesis Testing in Python|Course Description:In this course, learners with foundational knowledge of statistical concepts will dive deeper into hypothesis testing by focusing on three standard tests of statistical significance: t-tests, F-tests, and chi-squared tests. Covering topics such as t-value, t-distribution, chi-square distribution, F-statistic, and F-distribution, this course will familiarize learners with techniques that will enable them to assess normality of data and goodness-of-fit and to compare observed and expected frequencies objectively.|Tags:f-distribution, chi-square distribution, f-statistic, t-distribution, t-value|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Foundations of Statistics in Python", "Course Name:Foundations of Data & AI Literacy for Managers|Course Description:Designed for managers leading teams and projects, this course empowers individuals to build data-driven organizations and integrate AI tools into daily operations. Learners will gain a foundational understanding of data and AI concepts and learn how to leverage them for actionable business insights. Managers will develop the skills to increase collaboration with technical experts and make informed decisions about analysis methods, ensuring their enterprise thrives in today’s data-driven landscape.|Tags:Designed, managers, leading, teams, projects,, course, empowers, individuals, build, data-driven, organizations, integrate, AI, tools, into, daily, operations., Learners, will, gain, foundational, understanding, data, AI, concepts, learn, how, leverage, them, actionable, business, insights., Managers, will, develop, skills, increase, collaboration, technical, experts, make, informed, decisions, about, analysis, methods,, ensuring, their, enterprise, thrives, today’s, data-driven, landscape.|Course language: None|Target Audience:No target audience|No prerequisite course required", "Course Name:Fundamentals of Deep Learning for Multi GPUs|Course Description:Find out how to use multiple GPUs to train neural networks and effectively parallelize\\ntraining of deep neural networks using TensorFlow.|Tags:multiple GPUs, neural networks, TensorFlow, parallelize|Course language: Python|Target Audience:Professionals want to train deep neural networks on multi-GPU technology to shorten\\nthe training time required for data-intensive applications|No prerequisite course required"]}, {"source_sentence": "Data Visualization Design & Storytelling. This course focuses on the fundamentals of data visualization, which helps support data-driven decision-making and to create a data-driven culture.. tags: data driven culture, data analytics, data literacy, data quality, storytelling, data science. Languages: Course language: TBD. Prerequisites: No prerequisite course required. Target audience: Professionals who would like to understand more about how to visualize data, design and concepts of storytelling through data..", "sentences": ["Course Name:Building Transformer-Based NLP Applications (NVIDIA)|Course Description:Learn how to apply and fine-tune a Transformer-based Deep Learning model to Natural Language Processing (NLP) tasks. In this course, you'll construct a Transformer neural network in PyTorch, Build a named-entity recognition (NER) application with BERT, Deploy the NER application with ONNX and TensorRT to a Triton inference server. Upon completion, you’ll be proficient i.n task-agnostic applications of Transformer-based models. Data Society's instructors are certified by NVIDIA’s Deep Learning Institute to teach this course.|Tags:named-entity recognition, text, Natural language processing, classification, NLP, NER|Course language: Python|Target Audience:Professionals with basic knowledge of neural networks and want to expand their knowledge in the world of Natural langauge processing|No prerequisite course required", "Course Name:Nonlinear Regression in Python|Course Description:In this course, learners will practice implementing a variety of nonlinear regression techniques in Python to model complex relationships beyond simple linear patterns. They will learn to interpret key transformations, including logarithmic (log-log, log-linear) and polynomial models, and identify interaction effects between predictor variables. Through hands-on exercises, they will also develop practical skills in selecting, fitting, and validating the most appropriate nonlinear model for their data.|Tags:nonlinear, regression|Course language: Python|Target Audience:This is an intermediate level course for data scientists who want to learn to understand and estimate relationships between a set of independent variables and a continuous dependent variable.|Prerequisite course required: Multiple Linear Regression", "Course Name:Data Visualization Design & Storytelling|Course Description:This course focuses on the fundamentals of data visualization, which helps support data-driven decision-making and to create a data-driven culture.|Tags:data driven culture, data analytics, data literacy, data quality, storytelling, data science|Course language: TBD|Target Audience:Professionals who would like to understand more about how to visualize data, design and concepts of storytelling through data.|No prerequisite course required"]}, {"source_sentence": "Foundations of Probability Theory in Python. This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.. tags: conditional probability, bayes' theorem. Languages: Course language: Python. Prerequisites: Prerequisite course required: Hypothesis Testing in Python. Target audience: Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools..", "sentences": ["Course Name:Foundations of Probability Theory in Python|Course Description:This course guides learners through a comprehensive review of advanced statistics topics on probability, such as permutations and combinations, joint probability, conditional probability, and marginal probability. Learners will also become familiar with Bayes’ theorem, a rule that provides a way to calculate the probability of a cause given its outcome. By the end of this course, learners will also be able to assess the likelihood of events being independent to indicate whether further statistical analysis is likely to yield results.|Tags:conditional probability, bayes' theorem|Course language: Python|Target Audience:Professionals some Python experience who would like to expand their skill set to more advanced Python visualization techniques and tools.|Prerequisite course required: Hypothesis Testing in Python", "Course Name:Foundations of Generative AI|Course Description:Foundations of Generative AI|Tags:Foundations, Generative, AI|Course language: None|Target Audience:No target audience|No prerequisite course required", "Course Name:Data Science for Managers|Course Description:This course is designed for managers seeking to bolster their data literacy with a deep dive into data science tools and teams, project life cycles, and methods.|Tags:data driven culture, data analytics, data quality, storytelling, data science|Course language: TBD|Target Audience:This course is targeted for those who would like to understand more about data literacy, make more informed decisions and identify data-driven solutions through data science tools and methods.|No prerequisite course required"]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,548
amartyobanerjee/marian-finetuned-kde4-en-to-fr
amartyobanerjee
translation
[ "transformers", "pytorch", "tensorboard", "marian", "text2text-generation", "translation", "generated_from_trainer", "dataset:kde4", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-06-15T08:33:22Z
2022-08-03T03:32:12+00:00
19
0
--- datasets: - kde4 license: apache-2.0 metrics: - bleu tags: - translation - generated_from_trainer model-index: - name: marian-finetuned-kde4-en-to-fr results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: kde4 type: kde4 config: en-fr split: train args: en-fr metrics: - type: bleu value: 52.83113187001415 name: Bleu --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.8560 - Bleu: 52.8311 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.21.0 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # marian-finetuned-kde4-en-to-fr This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-fr](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr) on the kde4 dataset. It achieves the following results on the evaluation set: - Loss: 0.8560 - Bleu: 52.8311 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.21.0 - Pytorch 1.12.0+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
{"datasets": ["kde4"], "license": "apache-2.0", "metrics": ["bleu"], "tags": ["translation", "generated_from_trainer"], "model-index": [{"name": "marian-finetuned-kde4-en-to-fr", "results": [{"task": {"type": "text2text-generation", "name": "Sequence-to-sequence Language Modeling"}, "dataset": {"name": "kde4", "type": "kde4", "config": "en-fr", "split": "train", "args": "en-fr"}, "metrics": [{"type": "bleu", "value": 52.83113187001415, "name": "Bleu"}]}]}]}
task
[ "TRANSLATION" ]
45,549
ymelka/robbert-cosmetic-similarity
ymelka
sentence-similarity
[ "sentence-transformers", "safetensors", "roberta", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:4200", "loss:CoSENTLoss", "arxiv:1908.10084", "base_model:ymelka/robbert-cosmetic-v2-finetuned", "base_model:finetune:ymelka/robbert-cosmetic-v2-finetuned", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-07-08T16:29:06Z
2024-07-08T16:29:27+00:00
5
0
--- base_model: ymelka/robbert-cosmetic-v2-finetuned datasets: [] language: [] library_name: sentence-transformers metrics: - pearson_cosine - spearman_cosine - pearson_manhattan - spearman_manhattan - pearson_euclidean - spearman_euclidean - pearson_dot - spearman_dot - pearson_max - spearman_max pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:4200 - loss:CoSENTLoss widget: - source_sentence: Een milde reiniger speciaal ontwikkeld voor de gevoelige huid van mannen van 37 jaar. Deze reiniger helpt roodheid en onzuiverheden te verminderen, terwijl het de huid zacht en gehydrateerd achterlaat. sentences: - De Shiseido Waso Koshirice Tinted Spot Treatment SOS Imperfections Subtle Peach is een hybride behandeling en concealer die puistjes vervaagt en verzorgt. Met het antioxiderende Koshihikari-rijst extract en salicylzuur vermindert het direct oneffenheden en roodheid, terwijl het de huid voedt en beschermt. De formule bevat ook Japanse Shikuwasa-extract en glycyrrhizinaat voor een verbeterde huidbarrière en egale teint. Breng een kleine hoeveelheid aan op probleemzones 2-3 keer per dag voor een langdurig effect. Geschikt voor alle huidtypes. - Neutrogena Retinol Boost Intense Nacht Serum is een krachtig serum dat de huid 's nachts intensief verzorgt en herstelt. Dit serum bevat retinol, een krachtig ingrediënt dat de huidtextuur verbetert, fijne lijntjes en rimpels vermindert en de huidteint egaliseert. Daarnaast bevat het serum hyaluronzuur en vitamine C, die de huid hydrateren en beschermen tegen schadelijke invloeden van buitenaf. Breng elke avond een paar druppels aan op een gereinigde huid en masseer zachtjes in. Gebruik het serum in combinatie met een SPF overdag, aangezien retinol de huid gevoeliger kan maken voor de zon. Dit serum is geschikt voor alle huidtypes, maar kan irritatie veroorzaken bij een gevoelige huid. - 'Nivea Labello Protect+ Lippenbalsem SPF 15 Duo 2x4,8g verzorgt droge en gebarsten lippen, kalmeert direct en houdt je lippen 24u lang gehydrateerd. De formule trekt snel in en helpt gebarsten lippen te herstellen, terwijl de SPF 15 beschermt tegen schadelijke zonnestralen. Met actieve ingrediënten zoals sheaboter, jojoba-olie en vitamine E. Gebruik de lippenbalsem regelmatig voor zachte en soepele lippen. Let op: niet gebruiken bij allergie voor een van de ingrediënten.' - source_sentence: Een voedingssupplement met essentiële voedingsstoffen en antioxidanten om de huid van binnenuit te voeden en te beschermen. Ideaal voor vrouwen van 29 jaar die streven naar een gezonde en stralende huid. sentences: - De Babor Shaping Vitamine ACE Lichaamscrème is een luxe crème die de huid intensief voedt en hydrateert. Dankzij de krachtige combinatie van vitamine A, C en E wordt de huid verstevigd, verhelderd en beschermd tegen vrije radicalen. Deze crème is ideaal voor het verbeteren van de elasticiteit en stevigheid van de huid, waardoor het uiterlijk van cellulitis en striae wordt verminderd. De actieve ingrediënten dringen diep door in de huid om langdurige resultaten te bieden. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in tot het volledig is opgenomen. Vermijd contact met de ogen en gebruik niet op een beschadigde huid. - 'Ma Provence Shampoo Anti-Roos met Natuurlijke Witte Klei is een handig en milieuvriendelijk product dat perfect is voor op reis. Met een handige maat van 85 g en een centraal gat voor een gemakkelijke grip, komt deze shampoo overeen met 40-50 shampoos, wat gelijk staat aan 2 flessen van 250 ml. Deze shampoo heeft geen conditioner nodig, wat resulteert in minder verpakkingsafval. De formule is 100% natuurlijk en bevat 20% biologische ingrediënten, waardoor het ideaal is voor mensen die op zoek zijn naar natuurlijke producten. Ma Provence Shampoo Anti-Roos is speciaal ontwikkeld om roos te bestrijden en is gecertificeerd als 100% natuurlijk en biologisch. Gebruiksaanwijzing: Breng een kleine hoeveelheid shampoo aan op nat haar, masseer zachtjes in en spoel grondig uit. Gebruik indien nodig. Voorzorgsmaatregelen: Vermijd contact met de ogen en bij eventuele irritatie stop het gebruik.' - Avène Men Hydraterende Anti-Agingverzorging is speciaal ontwikkeld voor mannen om de huid te hydrateren, revitaliseren en strakker te maken. Deze verzorging werkt op de tekenen van huidveroudering, zoals de structuur van de dermis en elasticiteit, dankzij actieve ingrediënten zoals hyaluronzuur en antioxidanten. De niet-plakkerige textuur zorgt voor een comfortabel gevoel en is geschikt voor alle gevoelige huidtypes, zelfs na het scheren. Breng de verzorging aan op het gelaat en de hals voor een stevigere en compacter uitziende huid. Gebruik de pompfles van 50 ml volgens de aanbevolen instructies voor optimale resultaten. - source_sentence: Een milde reiniger speciaal ontwikkeld voor de rijpere huid van vrouwen boven de 60. Verrijkt met antioxidanten en hydraterende ingrediënten om de doffe huid te revitaliseren en onzuiverheden te verwijderen. sentences: - De Cattier Zuiverende Reinigingsgel is speciaal ontwikkeld voor de vette huid met onzuiverheden en puistjes. Met biologische essentiële oliën van tea tree, rozemarijn en pepermunt reinigt deze gel zacht de huid en verwijdert effectief onzuiverheden en overtollig talg. Verrijkt met aloë vera, biedt het een aangename sensatie van frisheid en laat de huid helder en gezond achter. De actieve ingrediënten, waaronder biologische tea tree olie en aloë vera, helpen de huid te zuiveren, hydrateren en beschermen. Breng de gel aan op een vochtig gezicht, vermijd de oogcontour, masseer in en spoel af met lauw water. Gebruik de gel regelmatig voor een zuivere en gezonde huid. - 'De Clarins Extra-Firming Anti-Rimpel Rijke Verstevigende Dagcrème is een ideale anti-ageing dagverzorging voor de droge huid, speciaal ontworpen voor vrouwen vanaf 40 jaar. Deze crème zorgt voor een zichtbaar stevigere en energieke huid, vermindert rimpels en herdefinieert het ovaal van het gezicht. De formule bevat actieve ingrediënten zoals shea butter, haverextract en vitamine E die de huid hydrateren, beschermen en verstevigen. Breng ''s ochtends aan op een schone huid voor een egale teint en stralende huid. Let op: vermijd contact met de ogen en bij eventuele irritatie stop het gebruik. Pot 50 ml.' - La Roche-Posay Effaclar Zuiverende Schuimende Reiningsgel is speciaal ontwikkeld voor de vette huid met neiging tot acne. Deze reinigingsgel reinigt en zuivert het gezicht zachtjes, verwijdert onzuiverheden en overmatig sebum, en houdt de huid schoon en fris. De formule bevat thermaal water van La Roche-Posay voor een kalmerende werking. Geschikt voor volwassenen en adolescenten met een vette huid, oneffenheden of acne. Gebruik 1 à 2 keer per dag, masseer zachtjes in op een vochtig gezicht en spoel grondig af met water. Niet gebruiken bij overgevoeligheid voor een van de ingrediënten. - source_sentence: Een voedende gezichtsverzorging die diep in de huid doordringt om droogheid te behandelen. Speciaal samengesteld voor vrouwen van 24 jaar, deze crème helpt de huid te hydrateren en te herstellen, waardoor een gezonde gloed wordt bevorderd. sentences: - Waam Damast Bloemenwater Bio Spray 200ml is een kalmerende en verfrissende spray met de heerlijke geur van Damastroos. Dit bloemenwater hydrateert intensief, verstevigt en zuivert de huid op milde wijze, ideaal voor gecombineerde tot vette huidtypes. Het staat bekend om zijn antiverouderingseigenschappen en helpt bij het voorkomen van huidveroudering. De spray kan gebruikt worden voor het gezicht, lichaam en haar, en is geschikt voor alle huidtypes, vooral rijpere of reactieve huid. De belangrijkste actieve ingrediënten zijn biologisch aloë vera water en Damastroosbloesemwater. Gebruik de spray om make-up te verwijderen, huidirritatie te kalmeren of je huis subtiel te parfumeren. Niet doorslikken en bewaren op kamertemperatuur. - Benecos Natural Care Lippenbalsem Sinaas is een fruitige lipverzorging met een heerlijke zoete sinaasappelgeur, verrijkt met hoogwaardige biologische sinaasappelolie om de lippen zacht en gehydrateerd te houden. Deze lippenbalsem is ideaal voor dagelijks gebruik en biedt bescherming tegen uitdroging en schrale lippen. De natuurlijke ingrediënten zorgen voor een milde verzorging en een aangename geur. Breng de lippenbalsem eenvoudig aan op de lippen voor een langdurige hydratatie. Deze lippenbalsem is geschikt voor alle huidtypes en bevat 4,8 g product. Gebruik het product naar behoefte en geniet van zachte en verzorgde lippen. - De Fleurance Nature Hydraterende Gezichtscreme Met Aloë Vera is speciaal ontwikkeld om de natuurlijke hydratatie van de huid te behouden en te beschermen tegen agressieve invloeden van buitenaf. Deze lichte crème bevat aloë vera, sheaboter en jojobaolie die de huid verzachten, hydrateren en voeden. Geschikt voor alle huidtypes, deze bio gecertificeerde crème trekt snel in en laat de huid soepel en gezond aanvoelen. Breng de crème aan op gezicht en hals met lichte masserende bewegingen voor een gezonde en goed gehydrateerde huid. Bevat 50 ml. - source_sentence: Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint. sentences: - Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid. - Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier. - De T.LeClerc Geheime Droge Olie is een voedende olie met een poederige geur van witte iris die de huid hydrateert en een langdurige geur achterlaat. Geschikt voor de gevoelige huid, bevat deze olie 5 plantaardige oliën zoals rijstzemelolie, zoete amandelolie en sesamzaadolie die de huid kalmeren, beschermen en revitaliseren. Ideaal voor het lichaam en haar, kan de olie het hele jaar door worden gebruikt door een paar druppels in de handpalmen te verwarmen en in te masseren. Voor een effectieve werking kan de olie ook als masker op de hoofdhuid en lengten worden aangebracht en na 30 minuten worden uitgespoeld. Gebruik met voorzichtigheid bij allergieën voor een van de ingrediënten. model-index: - name: SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned results: - task: type: semantic-similarity name: Semantic Similarity dataset: name: dev type: dev metrics: - type: pearson_cosine value: 0.8646855536832553 name: Pearson Cosine - type: spearman_cosine value: 0.9307532547762918 name: Spearman Cosine - type: pearson_manhattan value: 0.8406446921386265 name: Pearson Manhattan - type: spearman_manhattan value: 0.9304243863652625 name: Spearman Manhattan - type: pearson_euclidean value: 0.8382514640398003 name: Pearson Euclidean - type: spearman_euclidean value: 0.9307301686010424 name: Spearman Euclidean - type: pearson_dot value: 0.8395448144279598 name: Pearson Dot - type: spearman_dot value: 0.926131782390444 name: Spearman Dot - type: pearson_max value: 0.8646855536832553 name: Pearson Max - type: spearman_max value: 0.9307532547762918 name: Spearman Max --- # SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned) <!-- at revision 12eb96695b4dd6222445c8973202325d5d2c61db --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("ymelka/robbert-cosmetic-similarity") # Run inference sentences = [ 'Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint.', 'Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid.', 'Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Semantic Similarity * Dataset: `dev` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:--------------------|:-----------| | pearson_cosine | 0.8647 | | **spearman_cosine** | **0.9308** | | pearson_manhattan | 0.8406 | | spearman_manhattan | 0.9304 | | pearson_euclidean | 0.8383 | | spearman_euclidean | 0.9307 | | pearson_dot | 0.8395 | | spearman_dot | 0.9261 | | pearson_max | 0.8647 | | spearman_max | 0.9308 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 4,200 training samples * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | sentence1 | sentence2 | score | |:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 35 tokens</li><li>mean: 56.45 tokens</li><li>max: 86 tokens</li></ul> | <ul><li>min: 80 tokens</li><li>mean: 146.59 tokens</li><li>max: 223 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.51</li><li>max: 1.0</li></ul> | * Samples: | sentence1 | sentence2 | score | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------| | <code>Een voedingssupplement met essentiële vetzuren en vitamines om de huid van binnenuit te voeden en te ondersteunen. Dit supplement is speciaal samengesteld voor vrouwen van 22 jaar oud om droogheid te bestrijden en de algehele gezondheid van de huid te verbeteren.</code> | <code>De Lancaster Sun Beauty Sublime Tan Body Milk SPF30 is een vederlichte en niet-vette bodylotion die de huid beschermt tegen schadelijke zonnestralen en helpt bij het herstellen van zonschade. De crèmige textuur smelt direct op de huid en zorgt voor een gezonde bruine teint in de helft van de gebruikelijke tijd. De formule is geschikt voor alle huidtypes en laat geen wit laagje achter, met een fluweelachtige finish als resultaat. Belangrijke actieve ingrediënten zijn onder andere Bambusa arundinacea stem extract en Mauritia flexuosa fruit oil, die de huid hydrateren en beschermen. Breng royaal aan vóór blootstelling aan de zon en herhaal regelmatig. Let op: overmatige blootstelling aan de zon kan schadelijk zijn voor de gezondheid. Inhoud: 175 ml.</code> | <code>0.0040877847932279</code> | | <code>Een geavanceerde gezichtsverzorging voor vrouwen van 50 jaar en ouder, gericht op het verminderen van poriën, het verbeteren van een doffe huid en het verminderen van wallen. Deze verzorging bevat krachtige ingrediënten die de huid hydrateren, verstevigen en verjongen, waardoor de tekenen van veroudering worden verminderd en de huid een jeugdige uitstraling krijgt.</code> | <code>De L'Oréal Paris Age Perfect Re-Hydraterende Dagcreme is speciaal ontwikkeld voor vrouwen vanaf 50 jaar om pigmentvlekken te verminderen en de huid te verstevigen. Verrijkt met sojapeptiden, hydrateert en stimuleert deze crème de natuurlijke synthese van huidvezels, waardoor de huid stralend en stevig wordt. Na één maand zijn pigmentvlekken vervaagd en is de huid gehydrateerd. Breng 's ochtends aan op een gereinigd gezicht en hals voor optimale resultaten. Let op: alleen voor uitwendig gebruik.</code> | <code>0.9995909333229064</code> | | <code>Een rijke gezichtsverzorging voor vrouwen boven de 70 jaar die last hebben van droogheid, roodheid en vergrote poriën. Deze verzorging hydrateert diep, vermindert roodheid en herstelt de natuurlijke balans van de huid. Het helpt ook om de poriën te verfijnen en de huid te verstevigen.</code> | <code>De Clarins Extra-Firming Verstevigende Behandeling Essence is speciaal ontwikkeld voor vrouwen in de veertig om de huid intensief te hydrateren, in balans te brengen en een jeugdige uitstraling te geven. Deze essence combineert krachtige botanische ingrediënten met hyaluronzuur om de huid te vernieuwen en te verkwikken, waardoor deze steviger en elastischer wordt. De fluweelachtige textuur zorgt voor een vollere en soepelere huid. Gebruik dagelijks 's ochtends en/of 's avonds door een paar druppels zachtjes op het gezicht aan te brengen. Niet aanbevolen voor personen onder de 40 jaar.</code> | <code>0.9986273050308228</code> | * Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "pairwise_cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 690 evaluation samples * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | sentence1 | sentence2 | score | |:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:--------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 35 tokens</li><li>mean: 56.5 tokens</li><li>max: 81 tokens</li></ul> | <ul><li>min: 97 tokens</li><li>mean: 146.09 tokens</li><li>max: 207 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.5</li><li>max: 1.0</li></ul> | * Samples: | sentence1 | sentence2 | score | |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------| | <code>Een diep reinigende gezichtsreiniger speciaal ontwikkeld voor vrouwen van 36 jaar en ouder met pigmentvlekken, rimpels en een vette huid. Deze reiniger verwijdert onzuiverheden en overtollig talg, verheldert de huid en vermindert tekenen van veroudering.</code> | <code>Embryolisse Eco-Recharge Vloeibare Melk-Crème 400ml is een hydraterende en voedende crème die de huid intensief verzorgt en beschermt. Deze vloeibare melk-crème is geschikt voor alle huidtypes en zorgt voor een zachte en soepele huid. De formule bevat actieve ingrediënten zoals shea boter, aloë vera en soja-eiwitten die de huid hydrateren, kalmeren en herstellen. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in. Gebruik het product bij voorkeur 's ochtends en 's avonds voor optimale resultaten. Let op: vermijd contact met de ogen en bij eventuele huidirritatie het gebruik stoppen.</code> | <code>0.019439160823822</code> | | <code>Een voedingssupplement met essentiële vitaminen en mineralen die de huid van binnenuit voeden en beschermen. Speciaal samengesteld voor vrouwen van 20 jaar om de huid te helpen bij het reguleren van talgproductie, het verminderen van vlekken en het verbeteren van de algehele huidconditie. Dit supplement draagt bij aan een gezonde en stralende huid.</code> | <code>Attitude Minerale Zonne Stick Orange Blossom SPF30 60g biedt een breedbandige bescherming tegen UVA- en UVB-stralen, met non-nano zinkoxide als actief ingrediënt. Deze veganistische formule is EWG Verified en dermatologisch getest, waardoor het veilig is voor de huid en het milieu. De handige biologisch afbreekbare kartonnen tube maakt het aanbrengen gemakkelijk en laat geen witte vlekken achter. Geniet van het heerlijke oranjebloesemaroma terwijl je huid beschermd wordt tegen de zon. Gebruik deze zonne stick voor een effectieve bescherming tijdens buitenactiviteiten.</code> | <code>0.0337340645492076</code> | | <code>Een milde gezichtsreiniger speciaal ontwikkeld voor vrouwen van 50 jaar en ouder. Deze reiniger is ideaal voor het verwijderen van onzuiverheden en make-up, terwijl het de huid hydrateert en verzacht. Het helpt bij het verminderen van droogheid en roodheid, waardoor de huid er stralender en gezonder uitziet.</code> | <code>Avène Cleanance Zuiverende En Matterende Reinigingsgel is speciaal ontwikkeld voor de vette huid met imperfecties. Deze reinigingsgel reinigt de huid diep en verwijdert onzuiverheden en overtollig talg, waardoor de huid fris en mat blijft. De formule bevat actieve ingrediënten zoals Avène Thermaal Water en Monolaurine die de talgproductie reguleren en de huid zuiveren. Gebruik de reinigingsgel dagelijks, breng aan op een vochtig gezicht en masseer zachtjes in. Spoel af met water en dep de huid droog. Vermijd contact met de ogen. Geschikt voor dagelijks gebruik.</code> | <code>0.999606430530548</code> | * Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "pairwise_cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `learning_rate`: 2e-05 - `weight_decay`: 0.01 - `warmup_ratio`: 0.1 - `bf16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.01 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 3 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | loss | dev_spearman_cosine | |:------:|:----:|:-------------:|:------:|:-------------------:| | 0 | 0 | - | - | 0.7531 | | 0.1905 | 100 | 3.0709 | 2.7162 | 0.8810 | | 0.3810 | 200 | 2.8254 | 2.8051 | 0.8745 | | 0.5714 | 300 | 2.8368 | 2.7475 | 0.8922 | | 0.7619 | 400 | 2.7198 | 2.6713 | 0.8986 | | 0.9524 | 500 | 2.6541 | 2.6326 | 0.9097 | | 1.1429 | 600 | 2.6628 | 2.5709 | 0.9131 | | 1.3333 | 700 | 2.4183 | 2.7987 | 0.9213 | | 1.5238 | 800 | 2.5213 | 2.5974 | 0.9197 | | 1.7143 | 900 | 2.3733 | 3.1097 | 0.8956 | | 1.9048 | 1000 | 2.3562 | 2.9513 | 0.9229 | | 2.0952 | 1100 | 2.3511 | 2.9670 | 0.9246 | | 2.2857 | 1200 | 2.1765 | 3.2356 | 0.9167 | | 2.4762 | 1300 | 2.2738 | 3.1946 | 0.9280 | | 2.6667 | 1400 | 2.1162 | 3.2512 | 0.9201 | | 2.8571 | 1500 | 2.0996 | 3.0392 | 0.9288 | | 3.0 | 1575 | - | - | 0.9308 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.42.3 - PyTorch: 2.3.0+cu121 - Accelerate: 0.32.1 - Datasets: 2.20.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### CoSENTLoss ```bibtex @online{kexuefm-8847, title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT}, author={Su Jianlin}, year={2022}, month={Jan}, url={https://kexue.fm/archives/8847}, } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
null
Non_BioNLP
# SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [ymelka/robbert-cosmetic-v2-finetuned](https://huggingface.co/ymelka/robbert-cosmetic-v2-finetuned) <!-- at revision 12eb96695b4dd6222445c8973202325d5d2c61db --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: RobertaModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("ymelka/robbert-cosmetic-similarity") # Run inference sentences = [ 'Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint.', 'Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid.', 'Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Semantic Similarity * Dataset: `dev` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:--------------------|:-----------| | pearson_cosine | 0.8647 | | **spearman_cosine** | **0.9308** | | pearson_manhattan | 0.8406 | | spearman_manhattan | 0.9304 | | pearson_euclidean | 0.8383 | | spearman_euclidean | 0.9307 | | pearson_dot | 0.8395 | | spearman_dot | 0.9261 | | pearson_max | 0.8647 | | spearman_max | 0.9308 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 4,200 training samples * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | sentence1 | sentence2 | score | |:--------|:-----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 35 tokens</li><li>mean: 56.45 tokens</li><li>max: 86 tokens</li></ul> | <ul><li>min: 80 tokens</li><li>mean: 146.59 tokens</li><li>max: 223 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.51</li><li>max: 1.0</li></ul> | * Samples: | sentence1 | sentence2 | score | |:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------| | <code>Een voedingssupplement met essentiële vetzuren en vitamines om de huid van binnenuit te voeden en te ondersteunen. Dit supplement is speciaal samengesteld voor vrouwen van 22 jaar oud om droogheid te bestrijden en de algehele gezondheid van de huid te verbeteren.</code> | <code>De Lancaster Sun Beauty Sublime Tan Body Milk SPF30 is een vederlichte en niet-vette bodylotion die de huid beschermt tegen schadelijke zonnestralen en helpt bij het herstellen van zonschade. De crèmige textuur smelt direct op de huid en zorgt voor een gezonde bruine teint in de helft van de gebruikelijke tijd. De formule is geschikt voor alle huidtypes en laat geen wit laagje achter, met een fluweelachtige finish als resultaat. Belangrijke actieve ingrediënten zijn onder andere Bambusa arundinacea stem extract en Mauritia flexuosa fruit oil, die de huid hydrateren en beschermen. Breng royaal aan vóór blootstelling aan de zon en herhaal regelmatig. Let op: overmatige blootstelling aan de zon kan schadelijk zijn voor de gezondheid. Inhoud: 175 ml.</code> | <code>0.0040877847932279</code> | | <code>Een geavanceerde gezichtsverzorging voor vrouwen van 50 jaar en ouder, gericht op het verminderen van poriën, het verbeteren van een doffe huid en het verminderen van wallen. Deze verzorging bevat krachtige ingrediënten die de huid hydrateren, verstevigen en verjongen, waardoor de tekenen van veroudering worden verminderd en de huid een jeugdige uitstraling krijgt.</code> | <code>De L'Oréal Paris Age Perfect Re-Hydraterende Dagcreme is speciaal ontwikkeld voor vrouwen vanaf 50 jaar om pigmentvlekken te verminderen en de huid te verstevigen. Verrijkt met sojapeptiden, hydrateert en stimuleert deze crème de natuurlijke synthese van huidvezels, waardoor de huid stralend en stevig wordt. Na één maand zijn pigmentvlekken vervaagd en is de huid gehydrateerd. Breng 's ochtends aan op een gereinigd gezicht en hals voor optimale resultaten. Let op: alleen voor uitwendig gebruik.</code> | <code>0.9995909333229064</code> | | <code>Een rijke gezichtsverzorging voor vrouwen boven de 70 jaar die last hebben van droogheid, roodheid en vergrote poriën. Deze verzorging hydrateert diep, vermindert roodheid en herstelt de natuurlijke balans van de huid. Het helpt ook om de poriën te verfijnen en de huid te verstevigen.</code> | <code>De Clarins Extra-Firming Verstevigende Behandeling Essence is speciaal ontwikkeld voor vrouwen in de veertig om de huid intensief te hydrateren, in balans te brengen en een jeugdige uitstraling te geven. Deze essence combineert krachtige botanische ingrediënten met hyaluronzuur om de huid te vernieuwen en te verkwikken, waardoor deze steviger en elastischer wordt. De fluweelachtige textuur zorgt voor een vollere en soepelere huid. Gebruik dagelijks 's ochtends en/of 's avonds door een paar druppels zachtjes op het gezicht aan te brengen. Niet aanbevolen voor personen onder de 40 jaar.</code> | <code>0.9986273050308228</code> | * Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "pairwise_cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 690 evaluation samples * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | sentence1 | sentence2 | score | |:--------|:----------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------|:--------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 35 tokens</li><li>mean: 56.5 tokens</li><li>max: 81 tokens</li></ul> | <ul><li>min: 97 tokens</li><li>mean: 146.09 tokens</li><li>max: 207 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.5</li><li>max: 1.0</li></ul> | * Samples: | sentence1 | sentence2 | score | |:----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------| | <code>Een diep reinigende gezichtsreiniger speciaal ontwikkeld voor vrouwen van 36 jaar en ouder met pigmentvlekken, rimpels en een vette huid. Deze reiniger verwijdert onzuiverheden en overtollig talg, verheldert de huid en vermindert tekenen van veroudering.</code> | <code>Embryolisse Eco-Recharge Vloeibare Melk-Crème 400ml is een hydraterende en voedende crème die de huid intensief verzorgt en beschermt. Deze vloeibare melk-crème is geschikt voor alle huidtypes en zorgt voor een zachte en soepele huid. De formule bevat actieve ingrediënten zoals shea boter, aloë vera en soja-eiwitten die de huid hydrateren, kalmeren en herstellen. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in. Gebruik het product bij voorkeur 's ochtends en 's avonds voor optimale resultaten. Let op: vermijd contact met de ogen en bij eventuele huidirritatie het gebruik stoppen.</code> | <code>0.019439160823822</code> | | <code>Een voedingssupplement met essentiële vitaminen en mineralen die de huid van binnenuit voeden en beschermen. Speciaal samengesteld voor vrouwen van 20 jaar om de huid te helpen bij het reguleren van talgproductie, het verminderen van vlekken en het verbeteren van de algehele huidconditie. Dit supplement draagt bij aan een gezonde en stralende huid.</code> | <code>Attitude Minerale Zonne Stick Orange Blossom SPF30 60g biedt een breedbandige bescherming tegen UVA- en UVB-stralen, met non-nano zinkoxide als actief ingrediënt. Deze veganistische formule is EWG Verified en dermatologisch getest, waardoor het veilig is voor de huid en het milieu. De handige biologisch afbreekbare kartonnen tube maakt het aanbrengen gemakkelijk en laat geen witte vlekken achter. Geniet van het heerlijke oranjebloesemaroma terwijl je huid beschermd wordt tegen de zon. Gebruik deze zonne stick voor een effectieve bescherming tijdens buitenactiviteiten.</code> | <code>0.0337340645492076</code> | | <code>Een milde gezichtsreiniger speciaal ontwikkeld voor vrouwen van 50 jaar en ouder. Deze reiniger is ideaal voor het verwijderen van onzuiverheden en make-up, terwijl het de huid hydrateert en verzacht. Het helpt bij het verminderen van droogheid en roodheid, waardoor de huid er stralender en gezonder uitziet.</code> | <code>Avène Cleanance Zuiverende En Matterende Reinigingsgel is speciaal ontwikkeld voor de vette huid met imperfecties. Deze reinigingsgel reinigt de huid diep en verwijdert onzuiverheden en overtollig talg, waardoor de huid fris en mat blijft. De formule bevat actieve ingrediënten zoals Avène Thermaal Water en Monolaurine die de talgproductie reguleren en de huid zuiveren. Gebruik de reinigingsgel dagelijks, breng aan op een vochtig gezicht en masseer zachtjes in. Spoel af met water en dep de huid droog. Vermijd contact met de ogen. Geschikt voor dagelijks gebruik.</code> | <code>0.999606430530548</code> | * Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "pairwise_cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `learning_rate`: 2e-05 - `weight_decay`: 0.01 - `warmup_ratio`: 0.1 - `bf16`: True - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.01 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 3 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | loss | dev_spearman_cosine | |:------:|:----:|:-------------:|:------:|:-------------------:| | 0 | 0 | - | - | 0.7531 | | 0.1905 | 100 | 3.0709 | 2.7162 | 0.8810 | | 0.3810 | 200 | 2.8254 | 2.8051 | 0.8745 | | 0.5714 | 300 | 2.8368 | 2.7475 | 0.8922 | | 0.7619 | 400 | 2.7198 | 2.6713 | 0.8986 | | 0.9524 | 500 | 2.6541 | 2.6326 | 0.9097 | | 1.1429 | 600 | 2.6628 | 2.5709 | 0.9131 | | 1.3333 | 700 | 2.4183 | 2.7987 | 0.9213 | | 1.5238 | 800 | 2.5213 | 2.5974 | 0.9197 | | 1.7143 | 900 | 2.3733 | 3.1097 | 0.8956 | | 1.9048 | 1000 | 2.3562 | 2.9513 | 0.9229 | | 2.0952 | 1100 | 2.3511 | 2.9670 | 0.9246 | | 2.2857 | 1200 | 2.1765 | 3.2356 | 0.9167 | | 2.4762 | 1300 | 2.2738 | 3.1946 | 0.9280 | | 2.6667 | 1400 | 2.1162 | 3.2512 | 0.9201 | | 2.8571 | 1500 | 2.0996 | 3.0392 | 0.9288 | | 3.0 | 1575 | - | - | 0.9308 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.42.3 - PyTorch: 2.3.0+cu121 - Accelerate: 0.32.1 - Datasets: 2.20.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### CoSENTLoss ```bibtex @online{kexuefm-8847, title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT}, author={Su Jianlin}, year={2022}, month={Jan}, url={https://kexue.fm/archives/8847}, } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
{"base_model": "ymelka/robbert-cosmetic-v2-finetuned", "datasets": [], "language": [], "library_name": "sentence-transformers", "metrics": ["pearson_cosine", "spearman_cosine", "pearson_manhattan", "spearman_manhattan", "pearson_euclidean", "spearman_euclidean", "pearson_dot", "spearman_dot", "pearson_max", "spearman_max"], "pipeline_tag": "sentence-similarity", "tags": ["sentence-transformers", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:4200", "loss:CoSENTLoss"], "widget": [{"source_sentence": "Een milde reiniger speciaal ontwikkeld voor de gevoelige huid van mannen van 37 jaar. Deze reiniger helpt roodheid en onzuiverheden te verminderen, terwijl het de huid zacht en gehydrateerd achterlaat.", "sentences": ["De Shiseido Waso Koshirice Tinted Spot Treatment SOS Imperfections Subtle Peach is een hybride behandeling en concealer die puistjes vervaagt en verzorgt. Met het antioxiderende Koshihikari-rijst extract en salicylzuur vermindert het direct oneffenheden en roodheid, terwijl het de huid voedt en beschermt. De formule bevat ook Japanse Shikuwasa-extract en glycyrrhizinaat voor een verbeterde huidbarrière en egale teint. Breng een kleine hoeveelheid aan op probleemzones 2-3 keer per dag voor een langdurig effect. Geschikt voor alle huidtypes.", "Neutrogena Retinol Boost Intense Nacht Serum is een krachtig serum dat de huid 's nachts intensief verzorgt en herstelt. Dit serum bevat retinol, een krachtig ingrediënt dat de huidtextuur verbetert, fijne lijntjes en rimpels vermindert en de huidteint egaliseert. Daarnaast bevat het serum hyaluronzuur en vitamine C, die de huid hydrateren en beschermen tegen schadelijke invloeden van buitenaf. Breng elke avond een paar druppels aan op een gereinigde huid en masseer zachtjes in. Gebruik het serum in combinatie met een SPF overdag, aangezien retinol de huid gevoeliger kan maken voor de zon. Dit serum is geschikt voor alle huidtypes, maar kan irritatie veroorzaken bij een gevoelige huid.", "Nivea Labello Protect+ Lippenbalsem SPF 15 Duo 2x4,8g verzorgt droge en gebarsten lippen, kalmeert direct en houdt je lippen 24u lang gehydrateerd. De formule trekt snel in en helpt gebarsten lippen te herstellen, terwijl de SPF 15 beschermt tegen schadelijke zonnestralen. Met actieve ingrediënten zoals sheaboter, jojoba-olie en vitamine E. Gebruik de lippenbalsem regelmatig voor zachte en soepele lippen. Let op: niet gebruiken bij allergie voor een van de ingrediënten."]}, {"source_sentence": "Een voedingssupplement met essentiële voedingsstoffen en antioxidanten om de huid van binnenuit te voeden en te beschermen. Ideaal voor vrouwen van 29 jaar die streven naar een gezonde en stralende huid.", "sentences": ["De Babor Shaping Vitamine ACE Lichaamscrème is een luxe crème die de huid intensief voedt en hydrateert. Dankzij de krachtige combinatie van vitamine A, C en E wordt de huid verstevigd, verhelderd en beschermd tegen vrije radicalen. Deze crème is ideaal voor het verbeteren van de elasticiteit en stevigheid van de huid, waardoor het uiterlijk van cellulitis en striae wordt verminderd. De actieve ingrediënten dringen diep door in de huid om langdurige resultaten te bieden. Breng de crème dagelijks aan op een gereinigde huid en masseer zachtjes in tot het volledig is opgenomen. Vermijd contact met de ogen en gebruik niet op een beschadigde huid.", "Ma Provence Shampoo Anti-Roos met Natuurlijke Witte Klei is een handig en milieuvriendelijk product dat perfect is voor op reis. Met een handige maat van 85 g en een centraal gat voor een gemakkelijke grip, komt deze shampoo overeen met 40-50 shampoos, wat gelijk staat aan 2 flessen van 250 ml. Deze shampoo heeft geen conditioner nodig, wat resulteert in minder verpakkingsafval. De formule is 100% natuurlijk en bevat 20% biologische ingrediënten, waardoor het ideaal is voor mensen die op zoek zijn naar natuurlijke producten. Ma Provence Shampoo Anti-Roos is speciaal ontwikkeld om roos te bestrijden en is gecertificeerd als 100% natuurlijk en biologisch. Gebruiksaanwijzing: Breng een kleine hoeveelheid shampoo aan op nat haar, masseer zachtjes in en spoel grondig uit. Gebruik indien nodig. Voorzorgsmaatregelen: Vermijd contact met de ogen en bij eventuele irritatie stop het gebruik.", "Avène Men Hydraterende Anti-Agingverzorging is speciaal ontwikkeld voor mannen om de huid te hydrateren, revitaliseren en strakker te maken. Deze verzorging werkt op de tekenen van huidveroudering, zoals de structuur van de dermis en elasticiteit, dankzij actieve ingrediënten zoals hyaluronzuur en antioxidanten. De niet-plakkerige textuur zorgt voor een comfortabel gevoel en is geschikt voor alle gevoelige huidtypes, zelfs na het scheren. Breng de verzorging aan op het gelaat en de hals voor een stevigere en compacter uitziende huid. Gebruik de pompfles van 50 ml volgens de aanbevolen instructies voor optimale resultaten."]}, {"source_sentence": "Een milde reiniger speciaal ontwikkeld voor de rijpere huid van vrouwen boven de 60. Verrijkt met antioxidanten en hydraterende ingrediënten om de doffe huid te revitaliseren en onzuiverheden te verwijderen.", "sentences": ["De Cattier Zuiverende Reinigingsgel is speciaal ontwikkeld voor de vette huid met onzuiverheden en puistjes. Met biologische essentiële oliën van tea tree, rozemarijn en pepermunt reinigt deze gel zacht de huid en verwijdert effectief onzuiverheden en overtollig talg. Verrijkt met aloë vera, biedt het een aangename sensatie van frisheid en laat de huid helder en gezond achter. De actieve ingrediënten, waaronder biologische tea tree olie en aloë vera, helpen de huid te zuiveren, hydrateren en beschermen. Breng de gel aan op een vochtig gezicht, vermijd de oogcontour, masseer in en spoel af met lauw water. Gebruik de gel regelmatig voor een zuivere en gezonde huid.", "De Clarins Extra-Firming Anti-Rimpel Rijke Verstevigende Dagcrème is een ideale anti-ageing dagverzorging voor de droge huid, speciaal ontworpen voor vrouwen vanaf 40 jaar. Deze crème zorgt voor een zichtbaar stevigere en energieke huid, vermindert rimpels en herdefinieert het ovaal van het gezicht. De formule bevat actieve ingrediënten zoals shea butter, haverextract en vitamine E die de huid hydrateren, beschermen en verstevigen. Breng 's ochtends aan op een schone huid voor een egale teint en stralende huid. Let op: vermijd contact met de ogen en bij eventuele irritatie stop het gebruik. Pot 50 ml.", "La Roche-Posay Effaclar Zuiverende Schuimende Reiningsgel is speciaal ontwikkeld voor de vette huid met neiging tot acne. Deze reinigingsgel reinigt en zuivert het gezicht zachtjes, verwijdert onzuiverheden en overmatig sebum, en houdt de huid schoon en fris. De formule bevat thermaal water van La Roche-Posay voor een kalmerende werking. Geschikt voor volwassenen en adolescenten met een vette huid, oneffenheden of acne. Gebruik 1 à 2 keer per dag, masseer zachtjes in op een vochtig gezicht en spoel grondig af met water. Niet gebruiken bij overgevoeligheid voor een van de ingrediënten."]}, {"source_sentence": "Een voedende gezichtsverzorging die diep in de huid doordringt om droogheid te behandelen. Speciaal samengesteld voor vrouwen van 24 jaar, deze crème helpt de huid te hydrateren en te herstellen, waardoor een gezonde gloed wordt bevorderd.", "sentences": ["Waam Damast Bloemenwater Bio Spray 200ml is een kalmerende en verfrissende spray met de heerlijke geur van Damastroos. Dit bloemenwater hydrateert intensief, verstevigt en zuivert de huid op milde wijze, ideaal voor gecombineerde tot vette huidtypes. Het staat bekend om zijn antiverouderingseigenschappen en helpt bij het voorkomen van huidveroudering. De spray kan gebruikt worden voor het gezicht, lichaam en haar, en is geschikt voor alle huidtypes, vooral rijpere of reactieve huid. De belangrijkste actieve ingrediënten zijn biologisch aloë vera water en Damastroosbloesemwater. Gebruik de spray om make-up te verwijderen, huidirritatie te kalmeren of je huis subtiel te parfumeren. Niet doorslikken en bewaren op kamertemperatuur.", "Benecos Natural Care Lippenbalsem Sinaas is een fruitige lipverzorging met een heerlijke zoete sinaasappelgeur, verrijkt met hoogwaardige biologische sinaasappelolie om de lippen zacht en gehydrateerd te houden. Deze lippenbalsem is ideaal voor dagelijks gebruik en biedt bescherming tegen uitdroging en schrale lippen. De natuurlijke ingrediënten zorgen voor een milde verzorging en een aangename geur. Breng de lippenbalsem eenvoudig aan op de lippen voor een langdurige hydratatie. Deze lippenbalsem is geschikt voor alle huidtypes en bevat 4,8 g product. Gebruik het product naar behoefte en geniet van zachte en verzorgde lippen.", "De Fleurance Nature Hydraterende Gezichtscreme Met Aloë Vera is speciaal ontwikkeld om de natuurlijke hydratatie van de huid te behouden en te beschermen tegen agressieve invloeden van buitenaf. Deze lichte crème bevat aloë vera, sheaboter en jojobaolie die de huid verzachten, hydrateren en voeden. Geschikt voor alle huidtypes, deze bio gecertificeerde crème trekt snel in en laat de huid soepel en gezond aanvoelen. Breng de crème aan op gezicht en hals met lichte masserende bewegingen voor een gezonde en goed gehydrateerde huid. Bevat 50 ml."]}, {"source_sentence": "Een hydraterende gezichtsverzorging die de doffe huid van een 18-jarige vrouw weer laat stralen. Bevat antioxidanten die de huid beschermen tegen schadelijke invloeden van buitenaf. Vermindert wallen en onzuiverheden voor een egale en gezonde teint.", "sentences": ["Alpecin Hypo-Sensitive Shampoo is speciaal ontwikkeld voor mensen met een gevoelige hoofdhuid en biedt een zachte reiniging en hydratatie. De hypoallergene formule bevat geen parfum, kleurstoffen of conserveermiddelen en is zelfs geschikt voor mensen met atopisch eczeem. Met actieve ingrediënten zoals salie-extract en bisabolol kalmeert en voorkomt het irritaties, terwijl het de huidbarrière versterkt voor een gezonde balans. Gebruik de shampoo dagelijks door het zachtjes in te masseren op nat haar en grondig uit te spoelen. De fles bevat 250 ml en is ideaal voor mensen met een droge en gevoelige hoofdhuid.", "Phyto Phytocolor Permanente Kleuring 3 Donkerbruin 1 Kit is een haarverf verrijkt met plantaardige pigmenten voor een rijke kleur die niet vervaagt en 100% grijze haren dekt. De formule bevat monoï en jojoba oliën voor een mooie glans en verzorging, terwijl kalmerende ingrediënten irritatie tegengaan. De kleurcrème bevat geen PPD of resorcine en is geschikt voor haar. Breng het mengsel aan op droog, ongewassen haar en laat het inwerken volgens de instructies. Voer altijd een allergietest uit voor gebruik. Met Phyto Phytocolor geniet je van een intense kleur met natuurlijke schakeringen en verzorg je je haar op een zachte manier.", "De T.LeClerc Geheime Droge Olie is een voedende olie met een poederige geur van witte iris die de huid hydrateert en een langdurige geur achterlaat. Geschikt voor de gevoelige huid, bevat deze olie 5 plantaardige oliën zoals rijstzemelolie, zoete amandelolie en sesamzaadolie die de huid kalmeren, beschermen en revitaliseren. Ideaal voor het lichaam en haar, kan de olie het hele jaar door worden gebruikt door een paar druppels in de handpalmen te verwarmen en in te masseren. Voor een effectieve werking kan de olie ook als masker op de hoofdhuid en lengten worden aangebracht en na 30 minuten worden uitgespoeld. Gebruik met voorzichtigheid bij allergieën voor een van de ingrediënten."]}], "model-index": [{"name": "SentenceTransformer based on ymelka/robbert-cosmetic-v2-finetuned", "results": [{"task": {"type": "semantic-similarity", "name": "Semantic Similarity"}, "dataset": {"name": "dev", "type": "dev"}, "metrics": [{"type": "pearson_cosine", "value": 0.8646855536832553, "name": "Pearson Cosine"}, {"type": "spearman_cosine", "value": 0.9307532547762918, "name": "Spearman Cosine"}, {"type": "pearson_manhattan", "value": 0.8406446921386265, "name": "Pearson Manhattan"}, {"type": "spearman_manhattan", "value": 0.9304243863652625, "name": "Spearman Manhattan"}, {"type": "pearson_euclidean", "value": 0.8382514640398003, "name": "Pearson Euclidean"}, {"type": "spearman_euclidean", "value": 0.9307301686010424, "name": "Spearman Euclidean"}, {"type": "pearson_dot", "value": 0.8395448144279598, "name": "Pearson Dot"}, {"type": "spearman_dot", "value": 0.926131782390444, "name": "Spearman Dot"}, {"type": "pearson_max", "value": 0.8646855536832553, "name": "Pearson Max"}, {"type": "spearman_max", "value": 0.9307532547762918, "name": "Spearman Max"}]}]}]}
task
[ "TEXT_CLASSIFICATION", "SEMANTIC_SIMILARITY" ]
45,550
chunwoolee0/klue_ynat_roberta_base_model
chunwoolee0
text-classification
[ "transformers", "pytorch", "tensorboard", "roberta", "text-classification", "generated_from_trainer", "dataset:klue", "arxiv:2105.09680", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-18T14:43:31Z
2023-09-12T01:28:58+00:00
25
0
--- datasets: - klue metrics: - f1 tags: - generated_from_trainer model-index: - name: klue_ynat_roberta_base_model results: - task: type: text-classification name: Text Classification dataset: name: klue type: klue config: ynat split: validation args: ynat metrics: - type: f1 value: 0.872014500465787 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # klue_ynat_roberta_base_model This model is a fine-tuned version of [klue/roberta-base](https://huggingface.co/klue/roberta-base) on the klue dataset. It achieves the following results on the evaluation set: - Loss: 0.3747 - F1: 0.8720 ## Model description Pretrained RoBERTa Model on Korean Language. See [Github](https://github.com/KLUE-benchmark/KLUE) and [Paper](https://arxiv.org/abs/2105.09680) for more details. ## Intended uses & limitations Pretrained RoBERTa Model on Korean Language. See Github and Paper for more details. ## Training and evaluation data ## How to use _NOTE:_ Use `BertTokenizer` instead of RobertaTokenizer. (`AutoTokenizer` will load `BertTokenizer`) ```python from transformers import AutoModel, AutoTokenizer model = AutoModel.from_pretrained("klue/roberta-base") tokenizer = AutoTokenizer.from_pretrained("klue/roberta-base") ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 179 | 0.4838 | 0.8444 | | No log | 2.0 | 358 | 0.3848 | 0.8659 | | 0.4203 | 3.0 | 537 | 0.3778 | 0.8690 | | 0.4203 | 4.0 | 716 | 0.3762 | 0.8702 | | 0.4203 | 5.0 | 895 | 0.3747 | 0.8720 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # klue_ynat_roberta_base_model This model is a fine-tuned version of [klue/roberta-base](https://huggingface.co/klue/roberta-base) on the klue dataset. It achieves the following results on the evaluation set: - Loss: 0.3747 - F1: 0.8720 ## Model description Pretrained RoBERTa Model on Korean Language. See [Github](https://github.com/KLUE-benchmark/KLUE) and [Paper](https://arxiv.org/abs/2105.09680) for more details. ## Intended uses & limitations Pretrained RoBERTa Model on Korean Language. See Github and Paper for more details. ## Training and evaluation data ## How to use _NOTE:_ Use `BertTokenizer` instead of RobertaTokenizer. (`AutoTokenizer` will load `BertTokenizer`) ```python from transformers import AutoModel, AutoTokenizer model = AutoModel.from_pretrained("klue/roberta-base") tokenizer = AutoTokenizer.from_pretrained("klue/roberta-base") ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | F1 | |:-------------:|:-----:|:----:|:---------------:|:------:| | No log | 1.0 | 179 | 0.4838 | 0.8444 | | No log | 2.0 | 358 | 0.3848 | 0.8659 | | 0.4203 | 3.0 | 537 | 0.3778 | 0.8690 | | 0.4203 | 4.0 | 716 | 0.3762 | 0.8702 | | 0.4203 | 5.0 | 895 | 0.3747 | 0.8720 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.1+cu118 - Datasets 2.13.1 - Tokenizers 0.13.3
{"datasets": ["klue"], "metrics": ["f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "klue_ynat_roberta_base_model", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "klue", "type": "klue", "config": "ynat", "split": "validation", "args": "ynat"}, "metrics": [{"type": "f1", "value": 0.872014500465787, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,551
cifope/nllb-fr-wol-wol-fr-3.3B
cifope
translation
[ "transformers", "pytorch", "m2m_100", "text2text-generation", "translation", "wo", "fr", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-04-07T22:32:33Z
2024-04-07T23:14:42+00:00
18
4
--- language: - wo - fr license: mit metrics: - bleu pipeline_tag: translation ---
null
Non_BioNLP
{"language": ["wo", "fr"], "license": "mit", "metrics": ["bleu"], "pipeline_tag": "translation"}
task
[ "TRANSLATION" ]
45,552
lperng/medical-model
lperng
null
[ "gguf", "llama", "unsloth", "medical", "chinese", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
2025-02-14T21:56:21Z
2025-02-14T22:07:05+00:00
28
0
--- license: apache-2.0 tags: - unsloth - llama - medical - chinese --- # medical-model This is a fine-tuned version of the [DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B) model, specifically trained on a medical dataset for enhanced performance in medical question answering and reasoning. ## Model Description - **Base Model:** DeepSeek-R1-Distill-Llama-8B - **Fine-tuning Dataset:** [FreedomIntelligence/medical-o1-reasoning-SFT](https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT) - **Fine-tuning Method:** Unsloth's SFTTrainer with LoRA - **Language:** Chinese ## Intended Uses & Limitations This model is intended for research and educational purposes related to medical question answering. It can provide insightful responses to medical queries, but it is crucial to understand its limitations: - **Not a Medical Professional:** The model is not a substitute for professional medical advice. Consult with a healthcare provider for any health concerns or before making any decisions based on the model's output. - **Potential Biases:** The training data may contain biases that could be reflected in the model's responses. Always critically evaluate the information provided. - **Limited Scope:** The model's knowledge is limited to the information present in its training dataset. It may not be able to answer questions outside its domain of expertise. ## How to Use You can use this model with the following code: ``` python from unsloth import FastLanguageModel import torch model, tokenizer = FastLanguageModel.from_pretrained( model_name="lperng/medical-model", load_in_4bit=True, # Optional: Use 4-bit quantization ) prompt = "你的医学问题?" # Replace with your medical question inputs = tokenizer([prompt], return_tensors="pt").to("cuda") outputs = model.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask) response = tokenizer.batch_decode(outputs)[0] print(response) ``` ## Training and Evaluation Data This model was trained on the `FreedomIntelligence/medical-o1-reasoning-SFT` dataset. The dataset contains a collection of medical questions and corresponding answers. Evaluation metrics and results will be added in future updates. ## Ethical Considerations - **Potential for Misinformation:** The model's output should be carefully reviewed to ensure accuracy and prevent the spread of misinformation. - **Privacy:** When using the model with personal health information, ensure compliance with relevant privacy regulations. - **Responsible Use:** Use the model responsibly and ethically, considering its potential impact on individuals and society. ## Citation If you use this model in your work, please cite it as follows: ``` @misc{medical-model, author = {lperng}, title = {medical-model}, year = {2023}, publisher = {Hugging Face}, howpublished = {\url{[redacted link], } ``` ## Disclaimer This model is provided "as is" without warranty of any kind, express or implied. The authors and contributors are not responsible for any damages or liabilities arising from the use of this model.
null
BioNLP
# medical-model This is a fine-tuned version of the [DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/unsloth/DeepSeek-R1-Distill-Llama-8B) model, specifically trained on a medical dataset for enhanced performance in medical question answering and reasoning. ## Model Description - **Base Model:** DeepSeek-R1-Distill-Llama-8B - **Fine-tuning Dataset:** [FreedomIntelligence/medical-o1-reasoning-SFT](https://huggingface.co/datasets/FreedomIntelligence/medical-o1-reasoning-SFT) - **Fine-tuning Method:** Unsloth's SFTTrainer with LoRA - **Language:** Chinese ## Intended Uses & Limitations This model is intended for research and educational purposes related to medical question answering. It can provide insightful responses to medical queries, but it is crucial to understand its limitations: - **Not a Medical Professional:** The model is not a substitute for professional medical advice. Consult with a healthcare provider for any health concerns or before making any decisions based on the model's output. - **Potential Biases:** The training data may contain biases that could be reflected in the model's responses. Always critically evaluate the information provided. - **Limited Scope:** The model's knowledge is limited to the information present in its training dataset. It may not be able to answer questions outside its domain of expertise. ## How to Use You can use this model with the following code: ``` python from unsloth import FastLanguageModel import torch model, tokenizer = FastLanguageModel.from_pretrained( model_name="lperng/medical-model", load_in_4bit=True, # Optional: Use 4-bit quantization ) prompt = "你的医学问题?" # Replace with your medical question inputs = tokenizer([prompt], return_tensors="pt").to("cuda") outputs = model.generate(input_ids=inputs.input_ids, attention_mask=inputs.attention_mask) response = tokenizer.batch_decode(outputs)[0] print(response) ``` ## Training and Evaluation Data This model was trained on the `FreedomIntelligence/medical-o1-reasoning-SFT` dataset. The dataset contains a collection of medical questions and corresponding answers. Evaluation metrics and results will be added in future updates. ## Ethical Considerations - **Potential for Misinformation:** The model's output should be carefully reviewed to ensure accuracy and prevent the spread of misinformation. - **Privacy:** When using the model with personal health information, ensure compliance with relevant privacy regulations. - **Responsible Use:** Use the model responsibly and ethically, considering its potential impact on individuals and society. ## Citation If you use this model in your work, please cite it as follows: ``` @misc{medical-model, author = {lperng}, title = {medical-model}, year = {2023}, publisher = {Hugging Face}, howpublished = {\url{[redacted link], } ``` ## Disclaimer This model is provided "as is" without warranty of any kind, express or implied. The authors and contributors are not responsible for any damages or liabilities arising from the use of this model.
{"license": "apache-2.0", "tags": ["unsloth", "llama", "medical", "chinese"]}
task
[ "QUESTION_ANSWERING" ]
45,553
buskingsue/distilbert-base-uncased-finetuned-emotion
buskingsue
text-classification
[ "transformers", "pytorch", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-14T23:34:00Z
2023-10-14T23:45:41+00:00
91
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.92 name: Accuracy - type: f1 value: 0.9199406554171253 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2221 - Accuracy: 0.92 - F1: 0.9199 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8424 | 1.0 | 250 | 0.3382 | 0.9015 | 0.8994 | | 0.2553 | 2.0 | 500 | 0.2221 | 0.92 | 0.9199 | ### Framework versions - Transformers 4.34.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2221 - Accuracy: 0.92 - F1: 0.9199 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8424 | 1.0 | 250 | 0.3382 | 0.9015 | 0.8994 | | 0.2553 | 2.0 | 500 | 0.2221 | 0.92 | 0.9199 | ### Framework versions - Transformers 4.34.0 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.14.1
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.92, "name": "Accuracy"}, {"type": "f1", "value": 0.9199406554171253, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,554
PlanTL-GOB-ES/roberta-base-ca
PlanTL-GOB-ES
fill-mask
[ "transformers", "pytorch", "roberta", "fill-mask", "masked-lm", "BERTa", "catalan", "ca", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2022-11-15T16:00:37+00:00
571
5
--- language: ca license: apache-2.0 tags: - masked-lm - BERTa - catalan widget: - text: El Català és una llengua molt <mask>. - text: Salvador Dalí va viure a <mask>. - text: La Costa Brava té les millors <mask> d'Espanya. - text: El cacaolat és un batut de <mask>. - text: <mask> és la capital de la Garrotxa. - text: Vaig al <mask> a buscar bolets. - text: Antoni Gaudí vas ser un <mask> molt important per la ciutat. - text: Catalunya és una referència en <mask> a nivell europeu. --- # BERTa: RoBERTa-based Catalan language model ## Table of contents <details> <summary>Click to expand</summary> - [Model description](#model-description) - [Intended uses and limitations](#intended-use) - [How to use](#how-to-use) - [Limitations and bias](#limitations-and-bias) - [Training](#training) - [Evaluation](#evaluation) - [Additional information](#additional-information) - [Author](#author) - [Contact information](#contact-information) - [Copyright](#copyright) - [Licensing information](#licensing-information) - [Funding](#funding) - [Citing information](#citing-information) - [Disclaimer](#disclaimer) </details> ## Model description BERTa is a transformer-based masked language model for the Catalan language. It is based on the [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) base model and has been trained on a medium-size corpus collected from publicly available corpora and crawlers. This model was originally published as [bsc/roberta-base-ca-cased](https://huggingface.co/bsc/roberta-base-ca-cased). ## Intended uses and limitations The model is ready-to-use only for masked language modelling to perform the Fill Mask task (try the inference API or read the next section). However, it is intended to be fine-tuned on non-generative downstream tasks such as Question Answering, Text Classification or Named Entity Recognition. ## How to use ### Load model and tokenizer ``` python from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("PlanTL-GOB-ES/roberta-base-ca-cased") model = AutoModelForMaskedLM.from_pretrained("PlanTL-GOB-ES/roberta-base-ca-cased") ``` ### Fill Mask task Below, an example of how to use the masked language modelling task with a pipeline. ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='PlanTL-GOB-ES/roberta-base-ca-cased') >>> unmasker("Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.") [ { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.4177263379096985, "token": 734, "token_str": " Barcelona" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.10696165263652802, "token": 3849, "token_str": " Badalona" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.08135009557008743, "token": 19349, "token_str": " Collserola" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.07330769300460815, "token": 4974, "token_str": " Terrassa" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.03317456692457199, "token": 14333, "token_str": " Gavà" } ] ``` ## Limitations and bias ## Training ### Training corpora and preprocessing The training corpus consists of several corpora gathered from web crawling and public corpora. The publicly available corpora are: 1. the Catalan part of the [DOGC](http://opus.nlpl.eu/DOGC-v2.php) corpus, a set of documents from the Official Gazette of the Catalan Government 2. the [Catalan Open Subtitles](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.ca.gz), a collection of translated movie subtitles 3. the non-shuffled version of the Catalan part of the [OSCAR](https://traces1.inria.fr/oscar/) corpus \\\\cite{suarez2019asynchronous}, a collection of monolingual corpora, filtered from [Common Crawl](https://commoncrawl.org/about/) 4. The [CaWac](http://nlp.ffzg.hr/resources/corpora/cawac/) corpus, a web corpus of Catalan built from the .cat top-level-domain in late 2013 the non-deduplicated version 5. the [Catalan Wikipedia articles](https://ftp.acc.umu.se/mirror/wikimedia.org/dumps/cawiki/20200801/) downloaded on 18-08-2020. The crawled corpora are: 6. The Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains 7. the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government 8. the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the [Catalan News Agency](https://www.acn.cat/) To obtain a high-quality training corpus, each corpus have preprocessed with a pipeline of operations, including among the others, sentence splitting, language detection, filtering of bad-formed sentences and deduplication of repetitive contents. During the process, we keep document boundaries are kept. Finally, the corpora are concatenated and further global deduplication among the corpora is applied. The final training corpus consists of about 1,8B tokens. ### Tokenization and pretraining The training corpus has been tokenized using a byte version of [Byte-Pair Encoding (BPE)](https://github.com/openai/gpt-2) used in the original [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) model with a vocabulary size of 52,000 tokens. The BERTa pretraining consists of a masked language model training that follows the approach employed for the RoBERTa base model with the same hyperparameters as in the original work. The training lasted a total of 48 hours with 16 NVIDIA V100 GPUs of 16GB DDRAM. ## Evaluation ### CLUB benchmark The BERTa model has been fine-tuned on the downstream tasks of the Catalan Language Understanding Evaluation benchmark (CLUB), that has been created along with the model. It contains the following tasks and their related datasets: 1. Part-of-Speech Tagging (POS) Catalan-Ancora: from the [Universal Dependencies treebank](https://github.com/UniversalDependencies/UD_Catalan-AnCora) of the well-known Ancora corpus 2. Named Entity Recognition (NER) **[AnCora Catalan 2.0.0](https://zenodo.org/record/4762031#.YKaFjqGxWUk)**: extracted named entities from the original [Ancora](https://doi.org/10.5281/zenodo.4762030) version, filtering out some unconventional ones, like book titles, and transcribed them into a standard CONLL-IOB format 3. Text Classification (TC) **[TeCla](https://doi.org/10.5281/zenodo.4627197)**: consisting of 137k news pieces from the Catalan News Agency ([ACN](https://www.acn.cat/)) corpus 4. Semantic Textual Similarity (STS) **[Catalan semantic textual similarity](https://doi.org/10.5281/zenodo.4529183)**: consisting of more than 3000 sentence pairs, annotated with the semantic similarity between them, scraped from the [Catalan Textual Corpus](https://doi.org/10.5281/zenodo.4519349) 5. Question Answering (QA): **[ViquiQuAD](https://doi.org/10.5281/zenodo.4562344)**: consisting of more than 15,000 questions outsourced from Catalan Wikipedia randomly chosen from a set of 596 articles that were originally written in Catalan. **[XQuAD](https://doi.org/10.5281/zenodo.4526223)**: the Catalan translation of XQuAD, a multilingual collection of manual translations of 1,190 question-answer pairs from English Wikipedia used only as a _test set_ Here are the train/dev/test splits of the datasets: | Task (Dataset) | Total | Train | Dev | Test | |:--|:--|:--|:--|:--| | NER (Ancora) |13,581 | 10,628 | 1,427 | 1,526 | | POS (Ancora)| 16,678 | 13,123 | 1,709 | 1,846 | | STS | 3,073 | 2,073 | 500 | 500 | | TC (TeCla) | 137,775 | 110,203 | 13,786 | 13,786| | QA (ViquiQuAD) | 14,239 | 11,255 | 1,492 | 1,429 | _The fine-tuning on downstream tasks have been performed with the HuggingFace [**Transformers**](https://github.com/huggingface/transformers) library_ ### Results Below the evaluation results on the CLUB tasks compared with the multilingual mBERT, XLM-RoBERTa models and the Catalan WikiBERT-ca model | Task | NER (F1) | POS (F1) | STS (Pearson) | TC (accuracy) | QA (ViquiQuAD) (F1/EM) | QA (XQuAD) (F1/EM) | | ------------|:-------------:| -----:|:------|:-------|:------|:----| | BERTa | **88.13** | **98.97** | **79.73** | **74.16** | **86.97/72.29** | **68.89/48.87** | | mBERT | 86.38 | 98.82 | 76.34 | 70.56 | 86.97/72.22 | 67.15/46.51 | | XLM-RoBERTa | 87.66 | 98.89 | 75.40 | 71.68 | 85.50/70.47 | 67.10/46.42 | | WikiBERT-ca | 77.66 | 97.60 | 77.18 | 73.22 | 85.45/70.75 | 65.21/36.60 | ## Additional information ### Author Text Mining Unit (TeMU) at the Barcelona Supercomputing Center ([email protected]) ### Contact information For further information, send an email to <[email protected]> ### Copyright Copyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022) ### Licensing information [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) ### Funding This work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL. ### Citing information If you use this model, please cite our latest paper: ```bibtex @inproceedings{armengol-estape-etal-2021-multilingual, title = "Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? {A} Comprehensive Assessment for {C}atalan", author = "Armengol-Estap{\'e}, Jordi and Carrino, Casimiro Pio and Rodriguez-Penagos, Carlos and de Gibert Bonet, Ona and Armentano-Oller, Carme and Gonzalez-Agirre, Aitor and Melero, Maite and Villegas, Marta", booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.findings-acl.437", doi = "10.18653/v1/2021.findings-acl.437", pages = "4933--4946", } ``` ### Disclaimer The models published in this repository are intended for a generalist purpose and are available to third parties. These models may have bias and/or any other undesirable distortions. When third parties, deploy or provide systems and/or services to other parties using any of these models (or using systems based on these models) or become users of the models, they should note that it is their responsibility to mitigate the risks arising from their use and, in any event, to comply with applicable regulations, including regulations regarding the use of artificial intelligence. In no event shall the owner of the models (SEDIA – State Secretariat for digitalization and artificial intelligence) nor the creator (BSC – Barcelona Supercomputing Center) be liable for any results arising from the use made by third parties of these models. Los modelos publicados en este repositorio tienen una finalidad generalista y están a disposición de terceros. Estos modelos pueden tener sesgos y/u otro tipo de distorsiones indeseables. Cuando terceros desplieguen o proporcionen sistemas y/o servicios a otras partes usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) o se conviertan en usuarios de los modelos, deben tener en cuenta que es su responsabilidad mitigar los riesgos derivados de su uso y, en todo caso, cumplir con la normativa aplicable, incluyendo la normativa en materia de uso de inteligencia artificial. En ningún caso el propietario de los modelos (SEDIA – Secretaría de Estado de Digitalización e Inteligencia Artificial) ni el creador (BSC – Barcelona Supercomputing Center) serán responsables de los resultados derivados del uso que hagan terceros de estos modelos.
null
Non_BioNLP
# BERTa: RoBERTa-based Catalan language model ## Table of contents <details> <summary>Click to expand</summary> - [Model description](#model-description) - [Intended uses and limitations](#intended-use) - [How to use](#how-to-use) - [Limitations and bias](#limitations-and-bias) - [Training](#training) - [Evaluation](#evaluation) - [Additional information](#additional-information) - [Author](#author) - [Contact information](#contact-information) - [Copyright](#copyright) - [Licensing information](#licensing-information) - [Funding](#funding) - [Citing information](#citing-information) - [Disclaimer](#disclaimer) </details> ## Model description BERTa is a transformer-based masked language model for the Catalan language. It is based on the [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) base model and has been trained on a medium-size corpus collected from publicly available corpora and crawlers. This model was originally published as [bsc/roberta-base-ca-cased](https://huggingface.co/bsc/roberta-base-ca-cased). ## Intended uses and limitations The model is ready-to-use only for masked language modelling to perform the Fill Mask task (try the inference API or read the next section). However, it is intended to be fine-tuned on non-generative downstream tasks such as Question Answering, Text Classification or Named Entity Recognition. ## How to use ### Load model and tokenizer ``` python from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("PlanTL-GOB-ES/roberta-base-ca-cased") model = AutoModelForMaskedLM.from_pretrained("PlanTL-GOB-ES/roberta-base-ca-cased") ``` ### Fill Mask task Below, an example of how to use the masked language modelling task with a pipeline. ```python >>> from transformers import pipeline >>> unmasker = pipeline('fill-mask', model='PlanTL-GOB-ES/roberta-base-ca-cased') >>> unmasker("Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.") [ { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.4177263379096985, "token": 734, "token_str": " Barcelona" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.10696165263652802, "token": 3849, "token_str": " Badalona" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.08135009557008743, "token": 19349, "token_str": " Collserola" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.07330769300460815, "token": 4974, "token_str": " Terrassa" }, { "sequence": " Situada a la costa de la mar Mediterrània, <mask> s'assenta en una plana formada " "entre els deltes de les desembocadures dels rius Llobregat, al sud-oest, " "i Besòs, al nord-est, i limitada pel sud-est per la línia de costa," "i pel nord-oest per la serralada de Collserola " "(amb el cim del Tibidabo, 516,2 m, com a punt més alt) que segueix paral·lela " "la línia de costa encaixant la ciutat en un perímetre molt definit.", "score": 0.03317456692457199, "token": 14333, "token_str": " Gavà" } ] ``` ## Limitations and bias ## Training ### Training corpora and preprocessing The training corpus consists of several corpora gathered from web crawling and public corpora. The publicly available corpora are: 1. the Catalan part of the [DOGC](http://opus.nlpl.eu/DOGC-v2.php) corpus, a set of documents from the Official Gazette of the Catalan Government 2. the [Catalan Open Subtitles](http://opus.nlpl.eu/download.php?f=OpenSubtitles/v2018/mono/OpenSubtitles.raw.ca.gz), a collection of translated movie subtitles 3. the non-shuffled version of the Catalan part of the [OSCAR](https://traces1.inria.fr/oscar/) corpus \\\\cite{suarez2019asynchronous}, a collection of monolingual corpora, filtered from [Common Crawl](https://commoncrawl.org/about/) 4. The [CaWac](http://nlp.ffzg.hr/resources/corpora/cawac/) corpus, a web corpus of Catalan built from the .cat top-level-domain in late 2013 the non-deduplicated version 5. the [Catalan Wikipedia articles](https://ftp.acc.umu.se/mirror/wikimedia.org/dumps/cawiki/20200801/) downloaded on 18-08-2020. The crawled corpora are: 6. The Catalan General Crawling, obtained by crawling the 500 most popular .cat and .ad domains 7. the Catalan Government Crawling, obtained by crawling the .gencat domain and subdomains, belonging to the Catalan Government 8. the ACN corpus with 220k news items from March 2015 until October 2020, crawled from the [Catalan News Agency](https://www.acn.cat/) To obtain a high-quality training corpus, each corpus have preprocessed with a pipeline of operations, including among the others, sentence splitting, language detection, filtering of bad-formed sentences and deduplication of repetitive contents. During the process, we keep document boundaries are kept. Finally, the corpora are concatenated and further global deduplication among the corpora is applied. The final training corpus consists of about 1,8B tokens. ### Tokenization and pretraining The training corpus has been tokenized using a byte version of [Byte-Pair Encoding (BPE)](https://github.com/openai/gpt-2) used in the original [RoBERTA](https://github.com/pytorch/fairseq/tree/master/examples/roberta) model with a vocabulary size of 52,000 tokens. The BERTa pretraining consists of a masked language model training that follows the approach employed for the RoBERTa base model with the same hyperparameters as in the original work. The training lasted a total of 48 hours with 16 NVIDIA V100 GPUs of 16GB DDRAM. ## Evaluation ### CLUB benchmark The BERTa model has been fine-tuned on the downstream tasks of the Catalan Language Understanding Evaluation benchmark (CLUB), that has been created along with the model. It contains the following tasks and their related datasets: 1. Part-of-Speech Tagging (POS) Catalan-Ancora: from the [Universal Dependencies treebank](https://github.com/UniversalDependencies/UD_Catalan-AnCora) of the well-known Ancora corpus 2. Named Entity Recognition (NER) **[AnCora Catalan 2.0.0](https://zenodo.org/record/4762031#.YKaFjqGxWUk)**: extracted named entities from the original [Ancora](https://doi.org/10.5281/zenodo.4762030) version, filtering out some unconventional ones, like book titles, and transcribed them into a standard CONLL-IOB format 3. Text Classification (TC) **[TeCla](https://doi.org/10.5281/zenodo.4627197)**: consisting of 137k news pieces from the Catalan News Agency ([ACN](https://www.acn.cat/)) corpus 4. Semantic Textual Similarity (STS) **[Catalan semantic textual similarity](https://doi.org/10.5281/zenodo.4529183)**: consisting of more than 3000 sentence pairs, annotated with the semantic similarity between them, scraped from the [Catalan Textual Corpus](https://doi.org/10.5281/zenodo.4519349) 5. Question Answering (QA): **[ViquiQuAD](https://doi.org/10.5281/zenodo.4562344)**: consisting of more than 15,000 questions outsourced from Catalan Wikipedia randomly chosen from a set of 596 articles that were originally written in Catalan. **[XQuAD](https://doi.org/10.5281/zenodo.4526223)**: the Catalan translation of XQuAD, a multilingual collection of manual translations of 1,190 question-answer pairs from English Wikipedia used only as a _test set_ Here are the train/dev/test splits of the datasets: | Task (Dataset) | Total | Train | Dev | Test | |:--|:--|:--|:--|:--| | NER (Ancora) |13,581 | 10,628 | 1,427 | 1,526 | | POS (Ancora)| 16,678 | 13,123 | 1,709 | 1,846 | | STS | 3,073 | 2,073 | 500 | 500 | | TC (TeCla) | 137,775 | 110,203 | 13,786 | 13,786| | QA (ViquiQuAD) | 14,239 | 11,255 | 1,492 | 1,429 | _The fine-tuning on downstream tasks have been performed with the HuggingFace [**Transformers**](https://github.com/huggingface/transformers) library_ ### Results Below the evaluation results on the CLUB tasks compared with the multilingual mBERT, XLM-RoBERTa models and the Catalan WikiBERT-ca model | Task | NER (F1) | POS (F1) | STS (Pearson) | TC (accuracy) | QA (ViquiQuAD) (F1/EM) | QA (XQuAD) (F1/EM) | | ------------|:-------------:| -----:|:------|:-------|:------|:----| | BERTa | **88.13** | **98.97** | **79.73** | **74.16** | **86.97/72.29** | **68.89/48.87** | | mBERT | 86.38 | 98.82 | 76.34 | 70.56 | 86.97/72.22 | 67.15/46.51 | | XLM-RoBERTa | 87.66 | 98.89 | 75.40 | 71.68 | 85.50/70.47 | 67.10/46.42 | | WikiBERT-ca | 77.66 | 97.60 | 77.18 | 73.22 | 85.45/70.75 | 65.21/36.60 | ## Additional information ### Author Text Mining Unit (TeMU) at the Barcelona Supercomputing Center ([email protected]) ### Contact information For further information, send an email to <[email protected]> ### Copyright Copyright by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) (2022) ### Licensing information [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0) ### Funding This work was funded by the Spanish State Secretariat for Digitalization and Artificial Intelligence (SEDIA) within the framework of the Plan-TL. ### Citing information If you use this model, please cite our latest paper: ```bibtex @inproceedings{armengol-estape-etal-2021-multilingual, title = "Are Multilingual Models the Best Choice for Moderately Under-resourced Languages? {A} Comprehensive Assessment for {C}atalan", author = "Armengol-Estap{\'e}, Jordi and Carrino, Casimiro Pio and Rodriguez-Penagos, Carlos and de Gibert Bonet, Ona and Armentano-Oller, Carme and Gonzalez-Agirre, Aitor and Melero, Maite and Villegas, Marta", booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021", month = aug, year = "2021", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2021.findings-acl.437", doi = "10.18653/v1/2021.findings-acl.437", pages = "4933--4946", } ``` ### Disclaimer The models published in this repository are intended for a generalist purpose and are available to third parties. These models may have bias and/or any other undesirable distortions. When third parties, deploy or provide systems and/or services to other parties using any of these models (or using systems based on these models) or become users of the models, they should note that it is their responsibility to mitigate the risks arising from their use and, in any event, to comply with applicable regulations, including regulations regarding the use of artificial intelligence. In no event shall the owner of the models (SEDIA – State Secretariat for digitalization and artificial intelligence) nor the creator (BSC – Barcelona Supercomputing Center) be liable for any results arising from the use made by third parties of these models. Los modelos publicados en este repositorio tienen una finalidad generalista y están a disposición de terceros. Estos modelos pueden tener sesgos y/u otro tipo de distorsiones indeseables. Cuando terceros desplieguen o proporcionen sistemas y/o servicios a otras partes usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) o se conviertan en usuarios de los modelos, deben tener en cuenta que es su responsabilidad mitigar los riesgos derivados de su uso y, en todo caso, cumplir con la normativa aplicable, incluyendo la normativa en materia de uso de inteligencia artificial. En ningún caso el propietario de los modelos (SEDIA – Secretaría de Estado de Digitalización e Inteligencia Artificial) ni el creador (BSC – Barcelona Supercomputing Center) serán responsables de los resultados derivados del uso que hagan terceros de estos modelos.
{"language": "ca", "license": "apache-2.0", "tags": ["masked-lm", "BERTa", "catalan"], "widget": [{"text": "El Català és una llengua molt <mask>."}, {"text": "Salvador Dalí va viure a <mask>."}, {"text": "La Costa Brava té les millors <mask> d'Espanya."}, {"text": "El cacaolat és un batut de <mask>."}, {"text": "<mask> és la capital de la Garrotxa."}, {"text": "Vaig al <mask> a buscar bolets."}, {"text": "Antoni Gaudí vas ser un <mask> molt important per la ciutat."}, {"text": "Catalunya és una referència en <mask> a nivell europeu."}]}
task
[ "NAMED_ENTITY_RECOGNITION", "TEXT_CLASSIFICATION", "QUESTION_ANSWERING", "SEMANTIC_SIMILARITY", "TRANSLATION" ]
45,555
lilferrit/ft-wmt14-5
lilferrit
text2text-generation
[ "transformers", "safetensors", "mt5", "text2text-generation", "generated_from_trainer", "de", "en", "dataset:lilferrit/wmt14-short", "base_model:google/mt5-small", "base_model:finetune:google/mt5-small", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-28T11:02:27Z
2024-05-28T22:21:51+00:00
7
0
--- base_model: google/mt5-small datasets: - lilferrit/wmt14-short language: - de - en license: apache-2.0 metrics: - bleu tags: - generated_from_trainer model-index: - name: ft-wmt14-5 results: - task: type: translation name: Translation dataset: name: lilferrit/wmt14-short type: lilferrit/wmt14-short metrics: - type: bleu value: 20.7584 name: Bleu --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ft-wmt14-5 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the lilferrit/wmt14-short dataset. It achieves the following results on the evaluation set: - Loss: 2.0604 - Bleu: 20.7584 - Gen Len: 30.499 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adafactor - lr_scheduler_type: constant - training_steps: 100000 ### Training results | Training Loss | Epoch | Step | Bleu | Gen Len | Validation Loss | |:-------------:|:------:|:------:|:-------:|:-------:|:---------------:| | 1.9166 | 0.2778 | 10000 | 15.8119 | 32.097 | 2.3105 | | 1.7184 | 0.5556 | 20000 | 17.5903 | 31.1153 | 2.1993 | | 1.6061 | 0.8333 | 30000 | 18.9604 | 30.327 | 2.1380 | | 1.516 | 1.1111 | 40000 | 19.1444 | 30.2727 | 2.1366 | | 1.4675 | 1.3889 | 50000 | 19.7588 | 30.1127 | 2.1208 | | 1.4416 | 1.6667 | 60000 | 19.9263 | 30.4463 | 2.0889 | | 1.4111 | 1.9444 | 70000 | 2.0795 | 20.3323 | 30.1207 | | 1.3603 | 2.2222 | 80000 | 2.0850 | 20.5373 | 30.5943 | | 1.3378 | 2.5 | 90000 | 2.0604 | 20.7584 | 30.499 | | 1.3381 | 2.7778 | 100000 | 2.0597 | 20.6113 | 30.701 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.2+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # ft-wmt14-5 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on the lilferrit/wmt14-short dataset. It achieves the following results on the evaluation set: - Loss: 2.0604 - Bleu: 20.7584 - Gen Len: 30.499 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 16 - optimizer: Adafactor - lr_scheduler_type: constant - training_steps: 100000 ### Training results | Training Loss | Epoch | Step | Bleu | Gen Len | Validation Loss | |:-------------:|:------:|:------:|:-------:|:-------:|:---------------:| | 1.9166 | 0.2778 | 10000 | 15.8119 | 32.097 | 2.3105 | | 1.7184 | 0.5556 | 20000 | 17.5903 | 31.1153 | 2.1993 | | 1.6061 | 0.8333 | 30000 | 18.9604 | 30.327 | 2.1380 | | 1.516 | 1.1111 | 40000 | 19.1444 | 30.2727 | 2.1366 | | 1.4675 | 1.3889 | 50000 | 19.7588 | 30.1127 | 2.1208 | | 1.4416 | 1.6667 | 60000 | 19.9263 | 30.4463 | 2.0889 | | 1.4111 | 1.9444 | 70000 | 2.0795 | 20.3323 | 30.1207 | | 1.3603 | 2.2222 | 80000 | 2.0850 | 20.5373 | 30.5943 | | 1.3378 | 2.5 | 90000 | 2.0604 | 20.7584 | 30.499 | | 1.3381 | 2.7778 | 100000 | 2.0597 | 20.6113 | 30.701 | ### Framework versions - Transformers 4.40.0 - Pytorch 2.2.2+cu121 - Datasets 2.19.0 - Tokenizers 0.19.1
{"base_model": "google/mt5-small", "datasets": ["lilferrit/wmt14-short"], "language": ["de", "en"], "license": "apache-2.0", "metrics": ["bleu"], "tags": ["generated_from_trainer"], "model-index": [{"name": "ft-wmt14-5", "results": [{"task": {"type": "translation", "name": "Translation"}, "dataset": {"name": "lilferrit/wmt14-short", "type": "lilferrit/wmt14-short"}, "metrics": [{"type": "bleu", "value": 20.7584, "name": "Bleu"}]}]}]}
task
[ "TRANSLATION" ]
45,556
Chung-Hsiung/distilbert-implementation-1
Chung-Hsiung
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:glue", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-12-20T04:12:22Z
2023-12-20T15:06:11+00:00
28
0
--- base_model: distilbert-base-uncased datasets: - glue license: apache-2.0 metrics: - matthews_correlation tags: - generated_from_trainer model-index: - name: distilbert-implementation-1 results: - task: type: text-classification name: Text Classification dataset: name: glue type: glue config: cola split: validation args: cola metrics: - type: matthews_correlation value: 0.5184630555337263 name: Matthews Correlation --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-implementation-1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6945 - Matthews Correlation: 0.5185 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.2885166618048824e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 25 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5306 | 1.0 | 535 | 0.4702 | 0.4543 | | 0.3874 | 2.0 | 1070 | 0.4676 | 0.4977 | | 0.2771 | 3.0 | 1605 | 0.5421 | 0.5126 | | 0.2174 | 4.0 | 2140 | 0.6595 | 0.5016 | | 0.1716 | 5.0 | 2675 | 0.6945 | 0.5185 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.0+cu121 - Datasets 2.15.0 - Tokenizers 0.15.0
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-implementation-1 This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the glue dataset. It achieves the following results on the evaluation set: - Loss: 0.6945 - Matthews Correlation: 0.5185 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1.2885166618048824e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 25 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Matthews Correlation | |:-------------:|:-----:|:----:|:---------------:|:--------------------:| | 0.5306 | 1.0 | 535 | 0.4702 | 0.4543 | | 0.3874 | 2.0 | 1070 | 0.4676 | 0.4977 | | 0.2771 | 3.0 | 1605 | 0.5421 | 0.5126 | | 0.2174 | 4.0 | 2140 | 0.6595 | 0.5016 | | 0.1716 | 5.0 | 2675 | 0.6945 | 0.5185 | ### Framework versions - Transformers 4.36.2 - Pytorch 2.1.0+cu121 - Datasets 2.15.0 - Tokenizers 0.15.0
{"base_model": "distilbert-base-uncased", "datasets": ["glue"], "license": "apache-2.0", "metrics": ["matthews_correlation"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-implementation-1", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "glue", "type": "glue", "config": "cola", "split": "validation", "args": "cola"}, "metrics": [{"type": "matthews_correlation", "value": 0.5184630555337263, "name": "Matthews Correlation"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,557
siby18/legal-truncation-classifier
siby18
text-classification
[ "tensorboard", "safetensors", "distilbert", "autotrain", "text-classification", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "region:us" ]
2024-09-25T05:17:02Z
2024-09-25T05:18:30+00:00
5
0
--- base_model: distilbert/distilbert-base-uncased tags: - autotrain - text-classification widget: - text: I love AutoTrain --- # Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 0.10830292105674744 f1: 1.0 precision: 1.0 recall: 1.0 auc: 1.0 accuracy: 1.0
null
Non_BioNLP
# Model Trained Using AutoTrain - Problem type: Text Classification ## Validation Metrics loss: 0.10830292105674744 f1: 1.0 precision: 1.0 recall: 1.0 auc: 1.0 accuracy: 1.0
{"base_model": "distilbert/distilbert-base-uncased", "tags": ["autotrain", "text-classification"], "widget": [{"text": "I love AutoTrain"}]}
task
[ "TEXT_CLASSIFICATION" ]
45,558
Helsinki-NLP/opus-mt-tc-big-en-it
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "safetensors", "marian", "text2text-generation", "translation", "opus-mt-tc", "en", "it", "license:cc-by-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-04-13T14:27:31Z
2023-10-10T10:33:03+00:00
801
5
--- language: - en - it license: cc-by-4.0 tags: - translation - opus-mt-tc model-index: - name: opus-mt-tc-big-en-it results: - task: type: translation name: Translation eng-ita dataset: name: flores101-devtest type: flores_101 args: eng ita devtest metrics: - type: bleu value: 29.6 name: BLEU - task: type: translation name: Translation eng-ita dataset: name: tatoeba-test-v2021-08-07 type: tatoeba_mt args: eng-ita metrics: - type: bleu value: 53.9 name: BLEU - task: type: translation name: Translation eng-ita dataset: name: newstest2009 type: wmt-2009-news args: eng-ita metrics: - type: bleu value: 31.6 name: BLEU --- # opus-mt-tc-big-en-it Neural machine translation model for translating from English (en) to Italian (it). This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train). * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.) ``` @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", } @inproceedings{tiedemann-2020-tatoeba, title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Fifth Conference on Machine Translation", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.wmt-1.139", pages = "1174--1182", } ``` ## Model info * Release: 2022-03-13 * source language(s): eng * target language(s): ita * model: transformer-big * data: opusTCv20210807+bt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge)) * tokenization: SentencePiece (spm32k,spm32k) * original model: [opusTCv20210807+bt_transformer-big_2022-03-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.zip) * more information released models: [OPUS-MT eng-ita README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-ita/README.md) ## Usage A short example code: ```python from transformers import MarianMTModel, MarianTokenizer src_text = [ "He was always very respectful.", "This cat is black. Is the dog, too?" ] model_name = "pytorch-models/opus-mt-tc-big-en-it" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) for t in translated: print( tokenizer.decode(t, skip_special_tokens=True) ) # expected output: # Era sempre molto rispettoso. # Questo gatto e' nero, e' anche il cane? ``` You can also use OPUS-MT models with the transformers pipelines, for example: ```python from transformers import pipeline pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-it") print(pipe("He was always very respectful.")) # expected output: Era sempre molto rispettoso. ``` ## Benchmarks * test set translations: [opusTCv20210807+bt_transformer-big_2022-03-13.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.test.txt) * test set scores: [opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt) * benchmark results: [benchmark_results.txt](benchmark_results.txt) * benchmark output: [benchmark_translations.zip](benchmark_translations.zip) | langpair | testset | chr-F | BLEU | #sent | #words | |----------|---------|-------|-------|-------|--------| | eng-ita | tatoeba-test-v2021-08-07 | 0.72539 | 53.9 | 17320 | 116336 | | eng-ita | flores101-devtest | 0.59002 | 29.6 | 1012 | 27306 | | eng-ita | newssyscomb2009 | 0.60759 | 31.2 | 502 | 11551 | | eng-ita | newstest2009 | 0.60441 | 31.6 | 2525 | 63466 | ## Acknowledgements The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland. ## Model conversion info * transformers version: 4.16.2 * OPUS-MT git hash: 3405783 * port time: Wed Apr 13 17:27:22 EEST 2022 * port machine: LM0-400-22516.local
null
Non_BioNLP
# opus-mt-tc-big-en-it Neural machine translation model for translating from English (en) to Italian (it). This model is part of the [OPUS-MT project](https://github.com/Helsinki-NLP/Opus-MT), an effort to make neural machine translation models widely available and accessible for many languages in the world. All models are originally trained using the amazing framework of [Marian NMT](https://marian-nmt.github.io/), an efficient NMT implementation written in pure C++. The models have been converted to pyTorch using the transformers library by huggingface. Training data is taken from [OPUS](https://opus.nlpl.eu/) and training pipelines use the procedures of [OPUS-MT-train](https://github.com/Helsinki-NLP/Opus-MT-train). * Publications: [OPUS-MT – Building open translation services for the World](https://aclanthology.org/2020.eamt-1.61/) and [The Tatoeba Translation Challenge – Realistic Data Sets for Low Resource and Multilingual MT](https://aclanthology.org/2020.wmt-1.139/) (Please, cite if you use this model.) ``` @inproceedings{tiedemann-thottingal-2020-opus, title = "{OPUS}-{MT} {--} Building open translation services for the World", author = {Tiedemann, J{\"o}rg and Thottingal, Santhosh}, booktitle = "Proceedings of the 22nd Annual Conference of the European Association for Machine Translation", month = nov, year = "2020", address = "Lisboa, Portugal", publisher = "European Association for Machine Translation", url = "https://aclanthology.org/2020.eamt-1.61", pages = "479--480", } @inproceedings{tiedemann-2020-tatoeba, title = "The Tatoeba Translation Challenge {--} Realistic Data Sets for Low Resource and Multilingual {MT}", author = {Tiedemann, J{\"o}rg}, booktitle = "Proceedings of the Fifth Conference on Machine Translation", month = nov, year = "2020", address = "Online", publisher = "Association for Computational Linguistics", url = "https://aclanthology.org/2020.wmt-1.139", pages = "1174--1182", } ``` ## Model info * Release: 2022-03-13 * source language(s): eng * target language(s): ita * model: transformer-big * data: opusTCv20210807+bt ([source](https://github.com/Helsinki-NLP/Tatoeba-Challenge)) * tokenization: SentencePiece (spm32k,spm32k) * original model: [opusTCv20210807+bt_transformer-big_2022-03-13.zip](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.zip) * more information released models: [OPUS-MT eng-ita README](https://github.com/Helsinki-NLP/Tatoeba-Challenge/tree/master/models/eng-ita/README.md) ## Usage A short example code: ```python from transformers import MarianMTModel, MarianTokenizer src_text = [ "He was always very respectful.", "This cat is black. Is the dog, too?" ] model_name = "pytorch-models/opus-mt-tc-big-en-it" tokenizer = MarianTokenizer.from_pretrained(model_name) model = MarianMTModel.from_pretrained(model_name) translated = model.generate(**tokenizer(src_text, return_tensors="pt", padding=True)) for t in translated: print( tokenizer.decode(t, skip_special_tokens=True) ) # expected output: # Era sempre molto rispettoso. # Questo gatto e' nero, e' anche il cane? ``` You can also use OPUS-MT models with the transformers pipelines, for example: ```python from transformers import pipeline pipe = pipeline("translation", model="Helsinki-NLP/opus-mt-tc-big-en-it") print(pipe("He was always very respectful.")) # expected output: Era sempre molto rispettoso. ``` ## Benchmarks * test set translations: [opusTCv20210807+bt_transformer-big_2022-03-13.test.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.test.txt) * test set scores: [opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt](https://object.pouta.csc.fi/Tatoeba-MT-models/eng-ita/opusTCv20210807+bt_transformer-big_2022-03-13.eval.txt) * benchmark results: [benchmark_results.txt](benchmark_results.txt) * benchmark output: [benchmark_translations.zip](benchmark_translations.zip) | langpair | testset | chr-F | BLEU | #sent | #words | |----------|---------|-------|-------|-------|--------| | eng-ita | tatoeba-test-v2021-08-07 | 0.72539 | 53.9 | 17320 | 116336 | | eng-ita | flores101-devtest | 0.59002 | 29.6 | 1012 | 27306 | | eng-ita | newssyscomb2009 | 0.60759 | 31.2 | 502 | 11551 | | eng-ita | newstest2009 | 0.60441 | 31.6 | 2525 | 63466 | ## Acknowledgements The work is supported by the [European Language Grid](https://www.european-language-grid.eu/) as [pilot project 2866](https://live.european-language-grid.eu/catalogue/#/resource/projects/2866), by the [FoTran project](https://www.helsinki.fi/en/researchgroups/natural-language-understanding-with-cross-lingual-grounding), funded by the European Research Council (ERC) under the European Union’s Horizon 2020 research and innovation programme (grant agreement No 771113), and the [MeMAD project](https://memad.eu/), funded by the European Union’s Horizon 2020 Research and Innovation Programme under grant agreement No 780069. We are also grateful for the generous computational resources and IT infrastructure provided by [CSC -- IT Center for Science](https://www.csc.fi/), Finland. ## Model conversion info * transformers version: 4.16.2 * OPUS-MT git hash: 3405783 * port time: Wed Apr 13 17:27:22 EEST 2022 * port machine: LM0-400-22516.local
{"language": ["en", "it"], "license": "cc-by-4.0", "tags": ["translation", "opus-mt-tc"], "model-index": [{"name": "opus-mt-tc-big-en-it", "results": [{"task": {"type": "translation", "name": "Translation eng-ita"}, "dataset": {"name": "flores101-devtest", "type": "flores_101", "args": "eng ita devtest"}, "metrics": [{"type": "bleu", "value": 29.6, "name": "BLEU"}]}, {"task": {"type": "translation", "name": "Translation eng-ita"}, "dataset": {"name": "tatoeba-test-v2021-08-07", "type": "tatoeba_mt", "args": "eng-ita"}, "metrics": [{"type": "bleu", "value": 53.9, "name": "BLEU"}]}, {"task": {"type": "translation", "name": "Translation eng-ita"}, "dataset": {"name": "newstest2009", "type": "wmt-2009-news", "args": "eng-ita"}, "metrics": [{"type": "bleu", "value": 31.6, "name": "BLEU"}]}]}]}
task
[ "TRANSLATION" ]
45,559
fumakurata/distilbert-base-uncased-finetuned-emotion
fumakurata
text-classification
[ "transformers", "pytorch", "tensorboard", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-07-18T07:22:20Z
2022-07-18T10:12:18+00:00
114
0
--- datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion args: default metrics: - type: accuracy value: 0.834 name: Accuracy - type: f1 value: 0.8171742650957551 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.5401 - Accuracy: 0.834 - F1: 0.8172 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 192 - eval_batch_size: 192 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 84 | 0.7993 | 0.74 | 0.6827 | | No log | 2.0 | 168 | 0.5401 | 0.834 | 0.8172 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.5401 - Accuracy: 0.834 - F1: 0.8172 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 192 - eval_batch_size: 192 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | No log | 1.0 | 84 | 0.7993 | 0.74 | 0.6827 | | No log | 2.0 | 168 | 0.5401 | 0.834 | 0.8172 | ### Framework versions - Transformers 4.20.1 - Pytorch 1.12.0+cu113 - Datasets 2.3.2 - Tokenizers 0.12.1
{"datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "args": "default"}, "metrics": [{"type": "accuracy", "value": 0.834, "name": "Accuracy"}, {"type": "f1", "value": 0.8171742650957551, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,560
facebook/bart-large-mnli
facebook
zero-shot-classification
[ "transformers", "pytorch", "jax", "rust", "safetensors", "bart", "text-classification", "zero-shot-classification", "dataset:multi_nli", "arxiv:1910.13461", "arxiv:1909.00161", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2023-09-05T14:49:34+00:00
2,735,868
1,326
--- datasets: - multi_nli license: mit pipeline_tag: zero-shot-classification thumbnail: https://huggingface.co/front/thumbnails/facebook.png --- # bart-large-mnli This is the checkpoint for [bart-large](https://huggingface.co/facebook/bart-large) after being trained on the [MultiNLI (MNLI)](https://huggingface.co/datasets/multi_nli) dataset. Additional information about this model: - The [bart-large](https://huggingface.co/facebook/bart-large) model page - [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension ](https://arxiv.org/abs/1910.13461) - [BART fairseq implementation](https://github.com/pytorch/fairseq/tree/master/fairseq/models/bart) ## NLI-based Zero Shot Text Classification [Yin et al.](https://arxiv.org/abs/1909.00161) proposed a method for using pre-trained NLI models as a ready-made zero-shot sequence classifiers. The method works by posing the sequence to be classified as the NLI premise and to construct a hypothesis from each candidate label. For example, if we want to evaluate whether a sequence belongs to the class "politics", we could construct a hypothesis of `This text is about politics.`. The probabilities for entailment and contradiction are then converted to label probabilities. This method is surprisingly effective in many cases, particularly when used with larger pre-trained models like BART and Roberta. See [this blog post](https://joeddav.github.io/blog/2020/05/29/ZSL.html) for a more expansive introduction to this and other zero shot methods, and see the code snippets below for examples of using this model for zero-shot classification both with Hugging Face's built-in pipeline and with native Transformers/PyTorch code. #### With the zero-shot classification pipeline The model can be loaded with the `zero-shot-classification` pipeline like so: ```python from transformers import pipeline classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli") ``` You can then use this pipeline to classify sequences into any of the class names you specify. ```python sequence_to_classify = "one day I will see the world" candidate_labels = ['travel', 'cooking', 'dancing'] classifier(sequence_to_classify, candidate_labels) #{'labels': ['travel', 'dancing', 'cooking'], # 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289], # 'sequence': 'one day I will see the world'} ``` If more than one candidate label can be correct, pass `multi_label=True` to calculate each class independently: ```python candidate_labels = ['travel', 'cooking', 'dancing', 'exploration'] classifier(sequence_to_classify, candidate_labels, multi_label=True) #{'labels': ['travel', 'exploration', 'dancing', 'cooking'], # 'scores': [0.9945111274719238, # 0.9383890628814697, # 0.0057061901316046715, # 0.0018193122232332826], # 'sequence': 'one day I will see the world'} ``` #### With manual PyTorch ```python # pose sequence as a NLI premise and label as a hypothesis from transformers import AutoModelForSequenceClassification, AutoTokenizer nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli') tokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli') premise = sequence hypothesis = f'This example is {label}.' # run through model pre-trained on MNLI x = tokenizer.encode(premise, hypothesis, return_tensors='pt', truncation_strategy='only_first') logits = nli_model(x.to(device))[0] # we throw away "neutral" (dim 1) and take the probability of # "entailment" (2) as the probability of the label being true entail_contradiction_logits = logits[:,[0,2]] probs = entail_contradiction_logits.softmax(dim=1) prob_label_is_true = probs[:,1] ```
null
Non_BioNLP
# bart-large-mnli This is the checkpoint for [bart-large](https://huggingface.co/facebook/bart-large) after being trained on the [MultiNLI (MNLI)](https://huggingface.co/datasets/multi_nli) dataset. Additional information about this model: - The [bart-large](https://huggingface.co/facebook/bart-large) model page - [BART: Denoising Sequence-to-Sequence Pre-training for Natural Language Generation, Translation, and Comprehension ](https://arxiv.org/abs/1910.13461) - [BART fairseq implementation](https://github.com/pytorch/fairseq/tree/master/fairseq/models/bart) ## NLI-based Zero Shot Text Classification [Yin et al.](https://arxiv.org/abs/1909.00161) proposed a method for using pre-trained NLI models as a ready-made zero-shot sequence classifiers. The method works by posing the sequence to be classified as the NLI premise and to construct a hypothesis from each candidate label. For example, if we want to evaluate whether a sequence belongs to the class "politics", we could construct a hypothesis of `This text is about politics.`. The probabilities for entailment and contradiction are then converted to label probabilities. This method is surprisingly effective in many cases, particularly when used with larger pre-trained models like BART and Roberta. See [this blog post](https://joeddav.github.io/blog/2020/05/29/ZSL.html) for a more expansive introduction to this and other zero shot methods, and see the code snippets below for examples of using this model for zero-shot classification both with Hugging Face's built-in pipeline and with native Transformers/PyTorch code. #### With the zero-shot classification pipeline The model can be loaded with the `zero-shot-classification` pipeline like so: ```python from transformers import pipeline classifier = pipeline("zero-shot-classification", model="facebook/bart-large-mnli") ``` You can then use this pipeline to classify sequences into any of the class names you specify. ```python sequence_to_classify = "one day I will see the world" candidate_labels = ['travel', 'cooking', 'dancing'] classifier(sequence_to_classify, candidate_labels) #{'labels': ['travel', 'dancing', 'cooking'], # 'scores': [0.9938651323318481, 0.0032737774308770895, 0.002861034357920289], # 'sequence': 'one day I will see the world'} ``` If more than one candidate label can be correct, pass `multi_label=True` to calculate each class independently: ```python candidate_labels = ['travel', 'cooking', 'dancing', 'exploration'] classifier(sequence_to_classify, candidate_labels, multi_label=True) #{'labels': ['travel', 'exploration', 'dancing', 'cooking'], # 'scores': [0.9945111274719238, # 0.9383890628814697, # 0.0057061901316046715, # 0.0018193122232332826], # 'sequence': 'one day I will see the world'} ``` #### With manual PyTorch ```python # pose sequence as a NLI premise and label as a hypothesis from transformers import AutoModelForSequenceClassification, AutoTokenizer nli_model = AutoModelForSequenceClassification.from_pretrained('facebook/bart-large-mnli') tokenizer = AutoTokenizer.from_pretrained('facebook/bart-large-mnli') premise = sequence hypothesis = f'This example is {label}.' # run through model pre-trained on MNLI x = tokenizer.encode(premise, hypothesis, return_tensors='pt', truncation_strategy='only_first') logits = nli_model(x.to(device))[0] # we throw away "neutral" (dim 1) and take the probability of # "entailment" (2) as the probability of the label being true entail_contradiction_logits = logits[:,[0,2]] probs = entail_contradiction_logits.softmax(dim=1) prob_label_is_true = probs[:,1] ```
{"datasets": ["multi_nli"], "license": "mit", "pipeline_tag": "zero-shot-classification", "thumbnail": "https://huggingface.co/front/thumbnails/facebook.png"}
task
[ "TEXT_CLASSIFICATION", "TRANSLATION" ]
45,561
KarelDO/roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44
KarelDO
null
[ "transformers", "pytorch", "roberta", "generated_from_trainer", "en", "dataset:OpenTable", "license:mit", "model-index", "endpoints_compatible", "region:us" ]
2022-10-14T02:26:43Z
2022-10-14T02:29:05+00:00
17
0
--- datasets: - OpenTable language: - en license: mit metrics: - accuracy tags: - generated_from_trainer model-index: - name: roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44 results: - task: type: text-classification name: Text Classification dataset: name: OpenTable OPENTABLE type: OpenTable args: opentable metrics: - type: accuracy value: 0.7471607890017932 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.6246 - Accuracy: 0.7472 - Macro-f1: 0.7303 - Weighted-macro-f1: 0.7434 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 44 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44 This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the OpenTable OPENTABLE dataset. It achieves the following results on the evaluation set: - Loss: 0.6246 - Accuracy: 0.7472 - Macro-f1: 0.7303 - Weighted-macro-f1: 0.7434 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 32 - eval_batch_size: 32 - seed: 44 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5.0 ### Training results ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.2+cu102 - Datasets 2.5.2 - Tokenizers 0.12.1
{"datasets": ["OpenTable"], "language": ["en"], "license": "mit", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "roberta-base.CEBaB_confounding.price_food_ambiance_negative.sa.5-class.seed_44", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "OpenTable OPENTABLE", "type": "OpenTable", "args": "opentable"}, "metrics": [{"type": "accuracy", "value": 0.7471607890017932, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,562
facebook/seamless-m4t-unity-small
facebook
null
[ "fairseq2", "SeamlessM4T", "license:cc-by-nc-4.0", "region:us" ]
2023-08-20T15:54:46Z
2023-08-24T13:35:49+00:00
0
30
--- library_name: fairseq2 license: cc-by-nc-4.0 tags: - SeamlessM4T inference: false --- # SeamlessM4T - On-Device SeamlessM4T is designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text. SeamlessM4T covers: - 📥 101 languages for speech input - ⌨️ 96 Languages for text input/output - 🗣️ 35 languages for speech output. Apart from [SeamlessM4T-LARGE (2.3B)](https://huggingface.co/facebook/seamless-m4t-large) and [SeamlessM4T-MEDIUM (1.2B)](https://huggingface.co/facebook/seamless-m4t-medium) models, we are also developing a small model (281M) targeting for on-device inference. This README contains an example to run an exported small model covering most tasks (ASR/S2TT/S2ST). The model could be executed on popular mobile devices with Pytorch Mobile (https://pytorch.org/mobile/home/). ## Overview | Model | Checkpoint | Num Params | Disk Size | Supported Tasks | Supported Languages| |---------|------------|----------|-------------|------------|-------------------------| | UnitY-Small|[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small/resolve/main/unity_on_device.ptl) | 281M | 862MB | S2ST, S2TT, ASR |eng, fra, hin, por, spa| | UnitY-Small-S2T |[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t/resolve/main/unity_on_device_s2t.ptl) | 235M | 637MB | S2TT, ASR |eng, fra,hin, por, spa| UnitY-Small-S2T is a pruned version of UnitY-Small without 2nd pass unit decoding. ## Inference To use exported model, users don't need seamless_communication or fairseq2 dependency. ```python import torchaudio import torch audio_input, _ = torchaudio.load(TEST_AUDIO_PATH) # Load waveform using torchaudio s2st_model = torch.jit.load("unity_on_device.ptl") with torch.no_grad(): text, units, waveform = s2st_model(audio_input, tgt_lang=TGT_LANG) # S2ST model also returns waveform print(text) torchaudio.save(f"{OUTPUT_FOLDER}/result.wav", waveform.unsqueeze(0), sample_rate=16000) # Save output waveform to local file ``` Also running the exported model doesn't need python runtime. For example, you could load this model in C++ following [this tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html), or building your own on-device applications similar to [this example](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition) # Citation If you use SeamlessM4T in your work or any models/datasets/artifacts published in SeamlessM4T, please cite: ```bibtex @article{seamlessm4t2023, title={SeamlessM4T—Massively Multilingual \& Multimodal Machine Translation}, author={{Seamless Communication}, Lo\"{i}c Barrault, Yu-An Chung, Mariano Cora Meglioli, David Dale, Ning Dong, Paul-Ambroise Duquenne, Hady Elsahar, Hongyu Gong, Kevin Heffernan, John Hoffman, Christopher Klaiber, Pengwei Li, Daniel Licht, Jean Maillard, Alice Rakotoarison, Kaushik Ram Sadagopan, Guillaume Wenzek, Ethan Ye, Bapi Akula, Peng-Jen Chen, Naji El Hachem, Brian Ellis, Gabriel Mejia Gonzalez, Justin Haaheim, Prangthip Hansanti, Russ Howes, Bernie Huang, Min-Jae Hwang, Hirofumi Inaguma, Somya Jain, Elahe Kalbassi, Amanda Kallet, Ilia Kulikov, Janice Lam, Daniel Li, Xutai Ma, Ruslan Mavlyutov, Benjamin Peloquin, Mohamed Ramadan, Abinesh Ramakrishnan, Anna Sun, Kevin Tran, Tuan Tran, Igor Tufanov, Vish Vogeti, Carleigh Wood, Yilin Yang, Bokai Yu, Pierre Andrews, Can Balioglu, Marta R. Costa-juss\`{a} \footnotemark[3], Onur \,{C}elebi,Maha Elbayad,Cynthia Gao, Francisco Guzm\'an, Justine Kao, Ann Lee, Alexandre Mourachko, Juan Pino, Sravya Popuri, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Paden Tomasello, Changhan Wang, Jeff Wang, Skyler Wang}, journal={ArXiv}, year={2023} } ``` # License seamless_communication is CC-BY-NC 4.0 licensed
null
Non_BioNLP
# SeamlessM4T - On-Device SeamlessM4T is designed to provide high quality translation, allowing people from different linguistic communities to communicate effortlessly through speech and text. SeamlessM4T covers: - 📥 101 languages for speech input - ⌨️ 96 Languages for text input/output - 🗣️ 35 languages for speech output. Apart from [SeamlessM4T-LARGE (2.3B)](https://huggingface.co/facebook/seamless-m4t-large) and [SeamlessM4T-MEDIUM (1.2B)](https://huggingface.co/facebook/seamless-m4t-medium) models, we are also developing a small model (281M) targeting for on-device inference. This README contains an example to run an exported small model covering most tasks (ASR/S2TT/S2ST). The model could be executed on popular mobile devices with Pytorch Mobile (https://pytorch.org/mobile/home/). ## Overview | Model | Checkpoint | Num Params | Disk Size | Supported Tasks | Supported Languages| |---------|------------|----------|-------------|------------|-------------------------| | UnitY-Small|[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small/resolve/main/unity_on_device.ptl) | 281M | 862MB | S2ST, S2TT, ASR |eng, fra, hin, por, spa| | UnitY-Small-S2T |[🤗 Model card](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t) - [checkpoint](https://huggingface.co/facebook/seamless-m4t-unity-small-s2t/resolve/main/unity_on_device_s2t.ptl) | 235M | 637MB | S2TT, ASR |eng, fra,hin, por, spa| UnitY-Small-S2T is a pruned version of UnitY-Small without 2nd pass unit decoding. ## Inference To use exported model, users don't need seamless_communication or fairseq2 dependency. ```python import torchaudio import torch audio_input, _ = torchaudio.load(TEST_AUDIO_PATH) # Load waveform using torchaudio s2st_model = torch.jit.load("unity_on_device.ptl") with torch.no_grad(): text, units, waveform = s2st_model(audio_input, tgt_lang=TGT_LANG) # S2ST model also returns waveform print(text) torchaudio.save(f"{OUTPUT_FOLDER}/result.wav", waveform.unsqueeze(0), sample_rate=16000) # Save output waveform to local file ``` Also running the exported model doesn't need python runtime. For example, you could load this model in C++ following [this tutorial](https://pytorch.org/tutorials/advanced/cpp_export.html), or building your own on-device applications similar to [this example](https://github.com/pytorch/ios-demo-app/tree/master/SpeechRecognition) # Citation If you use SeamlessM4T in your work or any models/datasets/artifacts published in SeamlessM4T, please cite: ```bibtex @article{seamlessm4t2023, title={SeamlessM4T—Massively Multilingual \& Multimodal Machine Translation}, author={{Seamless Communication}, Lo\"{i}c Barrault, Yu-An Chung, Mariano Cora Meglioli, David Dale, Ning Dong, Paul-Ambroise Duquenne, Hady Elsahar, Hongyu Gong, Kevin Heffernan, John Hoffman, Christopher Klaiber, Pengwei Li, Daniel Licht, Jean Maillard, Alice Rakotoarison, Kaushik Ram Sadagopan, Guillaume Wenzek, Ethan Ye, Bapi Akula, Peng-Jen Chen, Naji El Hachem, Brian Ellis, Gabriel Mejia Gonzalez, Justin Haaheim, Prangthip Hansanti, Russ Howes, Bernie Huang, Min-Jae Hwang, Hirofumi Inaguma, Somya Jain, Elahe Kalbassi, Amanda Kallet, Ilia Kulikov, Janice Lam, Daniel Li, Xutai Ma, Ruslan Mavlyutov, Benjamin Peloquin, Mohamed Ramadan, Abinesh Ramakrishnan, Anna Sun, Kevin Tran, Tuan Tran, Igor Tufanov, Vish Vogeti, Carleigh Wood, Yilin Yang, Bokai Yu, Pierre Andrews, Can Balioglu, Marta R. Costa-juss\`{a} \footnotemark[3], Onur \,{C}elebi,Maha Elbayad,Cynthia Gao, Francisco Guzm\'an, Justine Kao, Ann Lee, Alexandre Mourachko, Juan Pino, Sravya Popuri, Christophe Ropers, Safiyyah Saleem, Holger Schwenk, Paden Tomasello, Changhan Wang, Jeff Wang, Skyler Wang}, journal={ArXiv}, year={2023} } ``` # License seamless_communication is CC-BY-NC 4.0 licensed
{"library_name": "fairseq2", "license": "cc-by-nc-4.0", "tags": ["SeamlessM4T"], "inference": false}
task
[ "TRANSLATION" ]
45,563
gokulsrinivasagan/distilbert_lda_5_v1_book_sst2
gokulsrinivasagan
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "en", "dataset:glue", "base_model:gokulsrinivasagan/distilbert_lda_5_v1_book", "base_model:finetune:gokulsrinivasagan/distilbert_lda_5_v1_book", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-12-09T16:33:32Z
2024-12-09T16:38:33+00:00
4
0
--- base_model: gokulsrinivasagan/distilbert_lda_5_v1_book datasets: - glue language: - en library_name: transformers metrics: - accuracy tags: - generated_from_trainer model-index: - name: distilbert_lda_5_v1_book_sst2 results: - task: type: text-classification name: Text Classification dataset: name: GLUE SST2 type: glue args: sst2 metrics: - type: accuracy value: 0.8956422018348624 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_lda_5_v1_book_sst2 This model is a fine-tuned version of [gokulsrinivasagan/distilbert_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/distilbert_lda_5_v1_book) on the GLUE SST2 dataset. It achieves the following results on the evaluation set: - Loss: 0.2822 - Accuracy: 0.8956 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3005 | 1.0 | 264 | 0.2822 | 0.8956 | | 0.1522 | 2.0 | 528 | 0.3279 | 0.8979 | | 0.1013 | 3.0 | 792 | 0.3850 | 0.8853 | | 0.0747 | 4.0 | 1056 | 0.3563 | 0.8945 | | 0.0542 | 5.0 | 1320 | 0.3817 | 0.8865 | | 0.045 | 6.0 | 1584 | 0.3982 | 0.8888 | ### Framework versions - Transformers 4.46.1 - Pytorch 2.2.0+cu121 - Datasets 3.1.0 - Tokenizers 0.20.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert_lda_5_v1_book_sst2 This model is a fine-tuned version of [gokulsrinivasagan/distilbert_lda_5_v1_book](https://huggingface.co/gokulsrinivasagan/distilbert_lda_5_v1_book) on the GLUE SST2 dataset. It achieves the following results on the evaluation set: - Loss: 0.2822 - Accuracy: 0.8956 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 256 - eval_batch_size: 256 - seed: 10 - optimizer: Use OptimizerNames.ADAMW_TORCH with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:--------:| | 0.3005 | 1.0 | 264 | 0.2822 | 0.8956 | | 0.1522 | 2.0 | 528 | 0.3279 | 0.8979 | | 0.1013 | 3.0 | 792 | 0.3850 | 0.8853 | | 0.0747 | 4.0 | 1056 | 0.3563 | 0.8945 | | 0.0542 | 5.0 | 1320 | 0.3817 | 0.8865 | | 0.045 | 6.0 | 1584 | 0.3982 | 0.8888 | ### Framework versions - Transformers 4.46.1 - Pytorch 2.2.0+cu121 - Datasets 3.1.0 - Tokenizers 0.20.1
{"base_model": "gokulsrinivasagan/distilbert_lda_5_v1_book", "datasets": ["glue"], "language": ["en"], "library_name": "transformers", "metrics": ["accuracy"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert_lda_5_v1_book_sst2", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "GLUE SST2", "type": "glue", "args": "sst2"}, "metrics": [{"type": "accuracy", "value": 0.8956422018348624, "name": "Accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,564
NLPclass/parsBERT_text_emotion_classification
NLPclass
text-classification
[ "transformers", "safetensors", "bert", "text-classification", "fa", "dataset:SeyedAli/Persian-Text-Emotion", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-08T15:51:23Z
2024-07-08T16:00:12+00:00
17
0
--- datasets: - SeyedAli/Persian-Text-Emotion language: - fa library_name: transformers license: mit pipeline_tag: text-classification --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> using ParsBert for emotion text classification in persian dataSet ### Model Description <!-- Provide a longer summary of what this model is. --> This model is designed to emotion text classification in persian dataSet, which uses the pre-trained model ParsBert in persian language. - **Model type:** pars-bert. - **Language(s) (NLP):** persian. ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> this model train with 5558 text from SeyedAli/Persian-Text-Emotion dataSet #### Training Hyperparameters - **Number of Epochs:** 5 <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> - **Training Batch Size:** 16 - **evaluation Batch Size:** 16 ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> this model test with 1399 text from SeyedAli/Persian-Text-Emotion dataSet with 6 class
null
Non_BioNLP
# Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> using ParsBert for emotion text classification in persian dataSet ### Model Description <!-- Provide a longer summary of what this model is. --> This model is designed to emotion text classification in persian dataSet, which uses the pre-trained model ParsBert in persian language. - **Model type:** pars-bert. - **Language(s) (NLP):** persian. ## Training Details ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> this model train with 5558 text from SeyedAli/Persian-Text-Emotion dataSet #### Training Hyperparameters - **Number of Epochs:** 5 <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> - **Training Batch Size:** 16 - **evaluation Batch Size:** 16 ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> this model test with 1399 text from SeyedAli/Persian-Text-Emotion dataSet with 6 class
{"datasets": ["SeyedAli/Persian-Text-Emotion"], "language": ["fa"], "library_name": "transformers", "license": "mit", "pipeline_tag": "text-classification"}
task
[ "TEXT_CLASSIFICATION" ]
45,565
eligapris/rn-tokenizer
eligapris
null
[ "transformers", "bert", "rn", "dataset:eligapris/kirundi-english", "license:mit", "endpoints_compatible", "region:us" ]
2024-12-06T03:52:51Z
2024-12-06T04:04:27+00:00
8
0
--- datasets: - eligapris/kirundi-english language: - rn library_name: transformers license: mit --- # eligapris/rn-tokenizer ## Model Description This repository contains a BPE tokenizer trained specifically for the Kirundi language (ISO code: run). ### Tokenizer Details - **Type**: BPE (Byte-Pair Encoding) - **Vocabulary Size**: 30,000 tokens - **Special Tokens**: [UNK], [CLS], [SEP], [PAD], [MASK] - **Pre-tokenization**: Whitespace-based ## Intended Uses & Limitations ### Intended Uses - Text processing for Kirundi language - Pre-processing for NLP tasks involving Kirundi - Foundation for developing Kirundi language applications ### Limitations - The tokenizer is trained on a specific corpus and may not cover all Kirundi dialects - Limited to the vocabulary observed in the training data - Performance may vary on domain-specific text ## Training Data The tokenizer was trained on the Kirundi-English parallel corpus: - **Dataset**: eligapris/kirundi-english - **Size**: 21.4k sentence pairs - **Nature**: Parallel corpus with Kirundi and English translations - **Domain**: Mixed domain including religious, general, and conversational text ## Installation You can use this tokenizer in your project by first installing the required dependencies: ```bash pip install transformers ``` Then load the tokenizer directly from the Hugging Face Hub: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("eligapris/rn-tokenizer") ``` Or if you have downloaded the tokenizer files locally: ```python from transformers import PreTrainedTokenizerFast tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` ## Usage Examples ### Loading and Using the Tokenizer You can load the tokenizer in two ways: ```python # Method 1: Using AutoTokenizer (recommended) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("eligapris/rn-tokenizer") # Method 2: Using PreTrainedTokenizerFast with local file from transformers import PreTrainedTokenizerFast tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` #### Basic Usage Examples 1. Tokenize a single sentence: ```python # Basic tokenization text = "ab'umudugudu hafi ya bose bateranira kumva ijambo ry'Imana." encoded = tokenizer(text) print(f"Input IDs: {encoded['input_ids']}") print(f"Tokens: {tokenizer.convert_ids_to_tokens(encoded['input_ids'])}") ``` 2. Batch tokenization: ```python # Process multiple sentences at once texts = [ "ifumbire mvaruganda.", "aba azi gukora kandi afite ubushobozi" ] encoded = tokenizer(texts, padding=True, truncation=True) print("Batch encoding:", encoded) ``` 3. Get token IDs with special tokens: ```python # Add special tokens like [CLS] and [SEP] encoded = tokenizer(text, add_special_tokens=True) tokens = tokenizer.convert_ids_to_tokens(encoded['input_ids']) print(f"Tokens with special tokens: {tokens}") ``` 4. Decode tokenized text: ```python # Convert token IDs back to text ids = encoded['input_ids'] decoded_text = tokenizer.decode(ids) print(f"Decoded text: {decoded_text}") ``` 5. Padding and truncation: ```python # Pad or truncate sequences to a specific length encoded = tokenizer( texts, padding='max_length', max_length=32, truncation=True, return_tensors='pt' # Return PyTorch tensors ) print("Padded sequences:", encoded['input_ids'].shape) ``` ## Future Development This tokenizer is intended to serve as a foundation for future Kirundi language model development, including potential fine-tuning with techniques like LoRA (Low-Rank Adaptation). ## Technical Specifications ### Software Requirements ```python dependencies = { "transformers": ">=4.30.0", "tokenizers": ">=0.13.0" } ``` ## Contact eligrapris --- ## Updates and Versions - v1.0.0 (Initial Release) - Base tokenizer implementation - Trained on Kirundi-English parallel corpus - Basic functionality and documentation ## Acknowledgments - Dataset provided by eligapris - Hugging Face's Transformers and Tokenizers libraries
null
Non_BioNLP
# eligapris/rn-tokenizer ## Model Description This repository contains a BPE tokenizer trained specifically for the Kirundi language (ISO code: run). ### Tokenizer Details - **Type**: BPE (Byte-Pair Encoding) - **Vocabulary Size**: 30,000 tokens - **Special Tokens**: [UNK], [CLS], [SEP], [PAD], [MASK] - **Pre-tokenization**: Whitespace-based ## Intended Uses & Limitations ### Intended Uses - Text processing for Kirundi language - Pre-processing for NLP tasks involving Kirundi - Foundation for developing Kirundi language applications ### Limitations - The tokenizer is trained on a specific corpus and may not cover all Kirundi dialects - Limited to the vocabulary observed in the training data - Performance may vary on domain-specific text ## Training Data The tokenizer was trained on the Kirundi-English parallel corpus: - **Dataset**: eligapris/kirundi-english - **Size**: 21.4k sentence pairs - **Nature**: Parallel corpus with Kirundi and English translations - **Domain**: Mixed domain including religious, general, and conversational text ## Installation You can use this tokenizer in your project by first installing the required dependencies: ```bash pip install transformers ``` Then load the tokenizer directly from the Hugging Face Hub: ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("eligapris/rn-tokenizer") ``` Or if you have downloaded the tokenizer files locally: ```python from transformers import PreTrainedTokenizerFast tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` ## Usage Examples ### Loading and Using the Tokenizer You can load the tokenizer in two ways: ```python # Method 1: Using AutoTokenizer (recommended) from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("eligapris/rn-tokenizer") # Method 2: Using PreTrainedTokenizerFast with local file from transformers import PreTrainedTokenizerFast tokenizer = PreTrainedTokenizerFast(tokenizer_file="tokenizer.json") ``` #### Basic Usage Examples 1. Tokenize a single sentence: ```python # Basic tokenization text = "ab'umudugudu hafi ya bose bateranira kumva ijambo ry'Imana." encoded = tokenizer(text) print(f"Input IDs: {encoded['input_ids']}") print(f"Tokens: {tokenizer.convert_ids_to_tokens(encoded['input_ids'])}") ``` 2. Batch tokenization: ```python # Process multiple sentences at once texts = [ "ifumbire mvaruganda.", "aba azi gukora kandi afite ubushobozi" ] encoded = tokenizer(texts, padding=True, truncation=True) print("Batch encoding:", encoded) ``` 3. Get token IDs with special tokens: ```python # Add special tokens like [CLS] and [SEP] encoded = tokenizer(text, add_special_tokens=True) tokens = tokenizer.convert_ids_to_tokens(encoded['input_ids']) print(f"Tokens with special tokens: {tokens}") ``` 4. Decode tokenized text: ```python # Convert token IDs back to text ids = encoded['input_ids'] decoded_text = tokenizer.decode(ids) print(f"Decoded text: {decoded_text}") ``` 5. Padding and truncation: ```python # Pad or truncate sequences to a specific length encoded = tokenizer( texts, padding='max_length', max_length=32, truncation=True, return_tensors='pt' # Return PyTorch tensors ) print("Padded sequences:", encoded['input_ids'].shape) ``` ## Future Development This tokenizer is intended to serve as a foundation for future Kirundi language model development, including potential fine-tuning with techniques like LoRA (Low-Rank Adaptation). ## Technical Specifications ### Software Requirements ```python dependencies = { "transformers": ">=4.30.0", "tokenizers": ">=0.13.0" } ``` ## Contact eligrapris --- ## Updates and Versions - v1.0.0 (Initial Release) - Base tokenizer implementation - Trained on Kirundi-English parallel corpus - Basic functionality and documentation ## Acknowledgments - Dataset provided by eligapris - Hugging Face's Transformers and Tokenizers libraries
{"datasets": ["eligapris/kirundi-english"], "language": ["rn"], "library_name": "transformers", "license": "mit"}
task
[ "TRANSLATION" ]
45,566
TroyDoesAI/Mermaid-Llama-3-5B-Pruned
TroyDoesAI
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "license:cc-by-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-24T03:19:27Z
2024-05-03T02:19:45+00:00
17
1
--- license: cc-by-4.0 --- # Mermaid-Llama-3-5B Introducing Mermaid-LLama-3-5B, a language model designed for Python code understanding and crafting captivating story flow maps. ![MermaidLlama GIF](Mermaid_ShowCase/MermaidLlama.webp) ## Key Features 1. **Code Understanding:** - Masters Python intricacies with finesse. - Generates clear and accurate Mermaid Diagram Flow Charts. - Ideal for developers seeking visual representations of their code logic. 2. **Storytelling Capabilities:** - Converts narrative inputs into captivating Mermaid Diagrams. - Maps character interactions, plot developments, and narrative arcs. 3. **Unmatched Performance:** - Surpasses GPT-4 in generating well-organized Mermaid Diagrams. 4. **Training Insights:** - Trained on a diverse dataset, including 800 unique, hand-curated Mermaid Graph examples utilizing 478 complete Python programs. - Exhibits emergent properties in story-to-flow map translations and step-by-step instruction flow maps. ## Collaboration Interested in enhancing Mermaid's capabilities? Contact [email protected] for collaboration opportunities. ## Example Use Cases - **Retrieval-Augmented Generation (RAG):** Utilize Mermaid-LLama-3-8B to create condensed knowledge graphs. This model excels in generating flow diagrams that enhance the retrieval process. These knowledge graphs are stored in a vector database, which allows for quick and efficient retrieval of contextually relevant information. When a query is received, the system retrieves a pertinent knowledge graph, appending it as context to the model. This enriched context enables Mermaid-LLama-3-8B to deliver more accurate and nuanced responses. This approach is particularly beneficial in applications requiring deep, context-aware interactions, such as sophisticated Q&A systems, dynamic data analysis, and complex decision-making tasks. - **Code Documentation:** Automatic visual flow charts from Python code. - **Storyboarding:** Visually appealing diagrams for storytelling. - **Project Planning:** Visual project flow maps for effective team communication. - **Learning Python:** Helps students visually understand Python code structures. - **Game Design:** Visualizing game storylines for coherent narrative structure. ## Proof of Concept Stay tuned for the release of the VSCode Extension that displays the Live Flow Map every time a user stops typing for more than 10 seconds. ## Training Specifications - **LoRA Rank:** 2048 - **LoRA Alpha:** 4096 - **Batch Size:** 1 - **Micro Batch Size:** 1 - **Cutoff Length:** 4096 - **Save every n steps:** 1000 - **Epochs:** 3 - **Learning Rate:** 1e-6 - **LR Scheduler:** Cosine **Target Modules:** - Enable q_proj - Enable v_proj - Enable k_proj - Enable o_proj - Enable gate_proj - Enable down_proj - Enable up_proj ## Getting Started Start by downloading one of my models. ![0 TroyDoesAI GIF](Mermaid_ShowCase/0_TroyDoesAI.gif) Load the model. ![1 Load Model in 4-bit Show Example Use GIF](Mermaid_ShowCase/1_LoadModel_in_4bit_Show_Example_Use.gif) Use my prompt template to generate a Mermaid code block, which can be viewed in the Mermaid Live Editor or using the Mermaid CLI tool. ![2 Loaded Model in Full Precision 16-bit Show Inference and Mermaid Live Editor GIF](Mermaid_ShowCase/2_Loaded_Model_in_Full_Precision_16bit_Show_Inference_and_Mermaid_Live_editor.gif) Here we open the VLLM GUI Program while still running in Vram the Mermaid-Llama-8B to compare the flow diagram to the actual program and show the lightweight capabilites of small models on consumer hardware. ![3 Open The Program VLLM Program With Full Precision Mermaid-Llama-8B Running to Evaluate Flow Map GIF](Mermaid_ShowCase/3_Open_The_Program_VLLM_Program_With_Full_Precision_Mermaid-Llama-8B-Running_to_evaluate_flow_map.gif) ## More on my VLLM Class and inference GUI : https://github.com/Troys-Code/VLLM ![Python RtdBsaz8gy GIF](Mermaid_ShowCase/python_RtdBsaz8gy.gif) --- Note: This model should be treated as an Auto-Complete Model, Do not try talking to it in chat you are gonna get garbage, those layers have been pruned and replaced, that is all you will hear of my secret sauce on training on small < 1000 entry datasets.
null
Non_BioNLP
# Mermaid-Llama-3-5B Introducing Mermaid-LLama-3-5B, a language model designed for Python code understanding and crafting captivating story flow maps. ![MermaidLlama GIF](Mermaid_ShowCase/MermaidLlama.webp) ## Key Features 1. **Code Understanding:** - Masters Python intricacies with finesse. - Generates clear and accurate Mermaid Diagram Flow Charts. - Ideal for developers seeking visual representations of their code logic. 2. **Storytelling Capabilities:** - Converts narrative inputs into captivating Mermaid Diagrams. - Maps character interactions, plot developments, and narrative arcs. 3. **Unmatched Performance:** - Surpasses GPT-4 in generating well-organized Mermaid Diagrams. 4. **Training Insights:** - Trained on a diverse dataset, including 800 unique, hand-curated Mermaid Graph examples utilizing 478 complete Python programs. - Exhibits emergent properties in story-to-flow map translations and step-by-step instruction flow maps. ## Collaboration Interested in enhancing Mermaid's capabilities? Contact [email protected] for collaboration opportunities. ## Example Use Cases - **Retrieval-Augmented Generation (RAG):** Utilize Mermaid-LLama-3-8B to create condensed knowledge graphs. This model excels in generating flow diagrams that enhance the retrieval process. These knowledge graphs are stored in a vector database, which allows for quick and efficient retrieval of contextually relevant information. When a query is received, the system retrieves a pertinent knowledge graph, appending it as context to the model. This enriched context enables Mermaid-LLama-3-8B to deliver more accurate and nuanced responses. This approach is particularly beneficial in applications requiring deep, context-aware interactions, such as sophisticated Q&A systems, dynamic data analysis, and complex decision-making tasks. - **Code Documentation:** Automatic visual flow charts from Python code. - **Storyboarding:** Visually appealing diagrams for storytelling. - **Project Planning:** Visual project flow maps for effective team communication. - **Learning Python:** Helps students visually understand Python code structures. - **Game Design:** Visualizing game storylines for coherent narrative structure. ## Proof of Concept Stay tuned for the release of the VSCode Extension that displays the Live Flow Map every time a user stops typing for more than 10 seconds. ## Training Specifications - **LoRA Rank:** 2048 - **LoRA Alpha:** 4096 - **Batch Size:** 1 - **Micro Batch Size:** 1 - **Cutoff Length:** 4096 - **Save every n steps:** 1000 - **Epochs:** 3 - **Learning Rate:** 1e-6 - **LR Scheduler:** Cosine **Target Modules:** - Enable q_proj - Enable v_proj - Enable k_proj - Enable o_proj - Enable gate_proj - Enable down_proj - Enable up_proj ## Getting Started Start by downloading one of my models. ![0 TroyDoesAI GIF](Mermaid_ShowCase/0_TroyDoesAI.gif) Load the model. ![1 Load Model in 4-bit Show Example Use GIF](Mermaid_ShowCase/1_LoadModel_in_4bit_Show_Example_Use.gif) Use my prompt template to generate a Mermaid code block, which can be viewed in the Mermaid Live Editor or using the Mermaid CLI tool. ![2 Loaded Model in Full Precision 16-bit Show Inference and Mermaid Live Editor GIF](Mermaid_ShowCase/2_Loaded_Model_in_Full_Precision_16bit_Show_Inference_and_Mermaid_Live_editor.gif) Here we open the VLLM GUI Program while still running in Vram the Mermaid-Llama-8B to compare the flow diagram to the actual program and show the lightweight capabilites of small models on consumer hardware. ![3 Open The Program VLLM Program With Full Precision Mermaid-Llama-8B Running to Evaluate Flow Map GIF](Mermaid_ShowCase/3_Open_The_Program_VLLM_Program_With_Full_Precision_Mermaid-Llama-8B-Running_to_evaluate_flow_map.gif) ## More on my VLLM Class and inference GUI : https://github.com/Troys-Code/VLLM ![Python RtdBsaz8gy GIF](Mermaid_ShowCase/python_RtdBsaz8gy.gif) --- Note: This model should be treated as an Auto-Complete Model, Do not try talking to it in chat you are gonna get garbage, those layers have been pruned and replaced, that is all you will hear of my secret sauce on training on small < 1000 entry datasets.
{"license": "cc-by-4.0"}
task
[ "TRANSLATION" ]
45,567
IsmaelMousa/modernbert-ner-conll2003
IsmaelMousa
token-classification
[ "transformers", "tensorboard", "safetensors", "modernbert", "token-classification", "generated_from_trainer", "en", "dataset:conll2003", "base_model:answerdotai/ModernBERT-base", "base_model:finetune:answerdotai/ModernBERT-base", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2025-01-07T20:24:39Z
2025-02-20T21:53:42+00:00
281
1
--- base_model: answerdotai/ModernBERT-base datasets: - conll2003 language: - en library_name: transformers license: apache-2.0 metrics: - precision - recall - f1 - accuracy pipeline_tag: token-classification tags: - generated_from_trainer model-index: - name: modernbert-ner-conll2003 results: - task: type: token-classification name: Token Classification dataset: name: conll2003 type: conll2003 config: conll2003 split: validation args: conll2003 metrics: - type: precision value: 0.8349195930423368 name: Precision - type: recall value: 0.856277347694379 name: Recall - type: f1 value: 0.8454636091724825 name: F1 - type: accuracy value: 0.9751567306569059 name: Accuracy --- # ModernBERT NER (CoNLL2003) This model is a fine-tuned version of [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on the conll2003 dataset for Named Entity Recognition (NER). Robust performance on tasks involving the recognition of `Persons`, `Organizations`, and `Locations`. It achieves the following results on the evaluation set: - Loss: 0.0992 - Precision: 0.8349 - Recall: 0.8563 - F1: 0.8455 - Accuracy: 0.9752 ## Model Details - **Base Model:** ModernBERT: [https://doi.org/10.48550/arXiv.2412.13663](https://doi.org/10.48550/arXiv.2412.13663). - **Fine-tuning Dataset:** CoNLL2003: [https://huggingface.co/datasets/eriktks/conll2003](https://huggingface.co/datasets/eriktks/conll2003). - **Task:** Named Entity Recognition (NER) ## Training Data The model is fine-tuned on the CoNLL2003 dataset, a well-known benchmark for NER. This dataset provides a solid foundation for the model to generalize on general English text. ## Example Usage Below is an example of how to use the model with the Hugging Face Transformers library: ```python from transformers import pipeline ner = pipeline(task="token-classification", model="IsmaelMousa/modernbert-ner-conll2003", aggregation_strategy="max") results = ner("Hi, I'm Ismael Mousa from Palestine working for NVIDIA inc.") for entity in results: for key, value in entity.items(): if key == "entity_group": print(f"{entity['word']} => {entity[key]}") ``` Results: ``` Ismael Mousa => PER Palestine => LOC NVIDIA => ORG ``` ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2306 | 1.0 | 1756 | 0.2243 | 0.6074 | 0.6483 | 0.6272 | 0.9406 | | 0.1415 | 2.0 | 3512 | 0.1583 | 0.7258 | 0.7536 | 0.7394 | 0.9583 | | 0.1143 | 3.0 | 5268 | 0.1335 | 0.7731 | 0.7989 | 0.7858 | 0.9657 | | 0.0913 | 4.0 | 7024 | 0.1145 | 0.7958 | 0.8256 | 0.8104 | 0.9699 | | 0.0848 | 5.0 | 8780 | 0.1079 | 0.8120 | 0.8408 | 0.8261 | 0.9720 | | 0.0728 | 6.0 | 10536 | 0.1036 | 0.8214 | 0.8452 | 0.8331 | 0.9730 | | 0.0623 | 7.0 | 12292 | 0.1032 | 0.8258 | 0.8487 | 0.8371 | 0.9737 | | 0.0599 | 8.0 | 14048 | 0.0990 | 0.8289 | 0.8527 | 0.8406 | 0.9745 | | 0.0558 | 9.0 | 15804 | 0.0998 | 0.8331 | 0.8541 | 0.8434 | 0.9750 | | 0.0559 | 10.0 | 17560 | 0.0992 | 0.8349 | 0.8563 | 0.8455 | 0.9752 | ### Framework versions - Transformers 4.48.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 3.2.0 - Tokenizers 0.21.0
null
Non_BioNLP
# ModernBERT NER (CoNLL2003) This model is a fine-tuned version of [answerdotai/ModernBERT-base](https://huggingface.co/answerdotai/ModernBERT-base) on the conll2003 dataset for Named Entity Recognition (NER). Robust performance on tasks involving the recognition of `Persons`, `Organizations`, and `Locations`. It achieves the following results on the evaluation set: - Loss: 0.0992 - Precision: 0.8349 - Recall: 0.8563 - F1: 0.8455 - Accuracy: 0.9752 ## Model Details - **Base Model:** ModernBERT: [https://doi.org/10.48550/arXiv.2412.13663](https://doi.org/10.48550/arXiv.2412.13663). - **Fine-tuning Dataset:** CoNLL2003: [https://huggingface.co/datasets/eriktks/conll2003](https://huggingface.co/datasets/eriktks/conll2003). - **Task:** Named Entity Recognition (NER) ## Training Data The model is fine-tuned on the CoNLL2003 dataset, a well-known benchmark for NER. This dataset provides a solid foundation for the model to generalize on general English text. ## Example Usage Below is an example of how to use the model with the Hugging Face Transformers library: ```python from transformers import pipeline ner = pipeline(task="token-classification", model="IsmaelMousa/modernbert-ner-conll2003", aggregation_strategy="max") results = ner("Hi, I'm Ismael Mousa from Palestine working for NVIDIA inc.") for entity in results: for key, value in entity.items(): if key == "entity_group": print(f"{entity['word']} => {entity[key]}") ``` Results: ``` Ismael Mousa => PER Palestine => LOC NVIDIA => ORG ``` ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 1e-06 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2306 | 1.0 | 1756 | 0.2243 | 0.6074 | 0.6483 | 0.6272 | 0.9406 | | 0.1415 | 2.0 | 3512 | 0.1583 | 0.7258 | 0.7536 | 0.7394 | 0.9583 | | 0.1143 | 3.0 | 5268 | 0.1335 | 0.7731 | 0.7989 | 0.7858 | 0.9657 | | 0.0913 | 4.0 | 7024 | 0.1145 | 0.7958 | 0.8256 | 0.8104 | 0.9699 | | 0.0848 | 5.0 | 8780 | 0.1079 | 0.8120 | 0.8408 | 0.8261 | 0.9720 | | 0.0728 | 6.0 | 10536 | 0.1036 | 0.8214 | 0.8452 | 0.8331 | 0.9730 | | 0.0623 | 7.0 | 12292 | 0.1032 | 0.8258 | 0.8487 | 0.8371 | 0.9737 | | 0.0599 | 8.0 | 14048 | 0.0990 | 0.8289 | 0.8527 | 0.8406 | 0.9745 | | 0.0558 | 9.0 | 15804 | 0.0998 | 0.8331 | 0.8541 | 0.8434 | 0.9750 | | 0.0559 | 10.0 | 17560 | 0.0992 | 0.8349 | 0.8563 | 0.8455 | 0.9752 | ### Framework versions - Transformers 4.48.0.dev0 - Pytorch 2.2.1+cu121 - Datasets 3.2.0 - Tokenizers 0.21.0
{"base_model": "answerdotai/ModernBERT-base", "datasets": ["conll2003"], "language": ["en"], "library_name": "transformers", "license": "apache-2.0", "metrics": ["precision", "recall", "f1", "accuracy"], "pipeline_tag": "token-classification", "tags": ["generated_from_trainer"], "model-index": [{"name": "modernbert-ner-conll2003", "results": [{"task": {"type": "token-classification", "name": "Token Classification"}, "dataset": {"name": "conll2003", "type": "conll2003", "config": "conll2003", "split": "validation", "args": "conll2003"}, "metrics": [{"type": "precision", "value": 0.8349195930423368, "name": "Precision"}, {"type": "recall", "value": 0.856277347694379, "name": "Recall"}, {"type": "f1", "value": 0.8454636091724825, "name": "F1"}, {"type": "accuracy", "value": 0.9751567306569059, "name": "Accuracy"}]}]}]}
task
[ "NAMED_ENTITY_RECOGNITION" ]
45,568
LoneStriker/Nous-Hermes-2-Yi-34B-6.0bpw-h6-exl2
LoneStriker
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "yi", "instruct", "finetune", "chatml", "gpt4", "synthetic data", "distillation", "conversational", "en", "base_model:01-ai/Yi-34B", "base_model:finetune:01-ai/Yi-34B", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-12-26T08:15:29Z
2023-12-26T08:58:12+00:00
4
0
--- base_model: 01-ai/Yi-34B language: - en license: apache-2.0 tags: - yi - instruct - finetune - chatml - gpt4 - synthetic data - distillation model-index: - name: Nous-Hermes-2-Yi-34B results: [] --- # Nous Hermes 2 - Yi-34B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oOqrUeAQejuQOra7fNlzG.png) ## Model description Nous Hermes 2 - Yi-34B is a state of the art Yi Fine-tune. Nous Hermes 2 Yi 34B was trained on 1,000,000 entries of primarily GPT-4 generated data, as well as other high quality data from open datasets across the AI landscape. # Table of Contents 1. [Example Outputs](#example-outputs) - Discussing the Laws of Gravity - Create a Flask based FTP Server 3. [Benchmark Results](#benchmark-results) - GPT4All - AGIEval - BigBench - Averages Compared 4. [Prompt Format](#prompt-format) 5. [Quantized Models](#quantized-models) ## Example Outputs ### Discussions about the Law of Gravity: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/J6Rmdj1VOVN7ry_uGL1PK.png) ### Create an FTP Server in FLASK: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/B5eu8OvQlg8rINBJGxbB7.png) ## Benchmark Results Nous-Hermes 2 on Yi 34B outperforms all Nous-Hermes & Open-Hermes models of the past, achieving new heights in all benchmarks for a Nous Research LLM as well as surpassing many popular finetunes. # Benchmarks Compared ### GPT4All: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/91onORUcUrAqTb3b9mG5e.png) ### AGIEval: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/hqDpMlKpINfDf4PmB31uW.png) ### BigBench: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/uh8mZZg_wZinFysxcfLSF.png) ### TruthfulQA: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/N_cX6YAWjJsvClotuoPdH.png) ## GPT4All GPT-4All Benchmark Set ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.6067|_ |0.0143| | | |acc_norm|0.6416|_ |0.0140| |arc_easy | 0|acc |0.8594|_ |0.0071| | | |acc_norm|0.8569|_ |0.0072| |boolq | 1|acc |0.8859|_ |0.0056| |hellaswag | 0|acc |0.6407|_ |0.0048| | | |acc_norm|0.8388|_ |0.0037| |openbookqa | 0|acc |0.3520|_ |0.0214| | | |acc_norm|0.4760|_ |0.0224| |piqa | 0|acc |0.8215|_ |0.0089| | | |acc_norm|0.8303|_ |0.0088| |winogrande | 0|acc |0.7908|_ |0.0114| Average: 76.00% ``` AGI-Eval ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.3189|_ |0.0293| | | |acc_norm|0.2953|_ |0.0287| |agieval_logiqa_en | 0|acc |0.5438|_ |0.0195| | | |acc_norm|0.4977|_ |0.0196| |agieval_lsat_ar | 0|acc |0.2696|_ |0.0293| | | |acc_norm|0.2087|_ |0.0269| |agieval_lsat_lr | 0|acc |0.7078|_ |0.0202| | | |acc_norm|0.6255|_ |0.0215| |agieval_lsat_rc | 0|acc |0.7807|_ |0.0253| | | |acc_norm|0.7063|_ |0.0278| |agieval_sat_en | 0|acc |0.8689|_ |0.0236| | | |acc_norm|0.8447|_ |0.0253| |agieval_sat_en_without_passage| 0|acc |0.5194|_ |0.0349| | | |acc_norm|0.4612|_ |0.0348| |agieval_sat_math | 0|acc |0.4409|_ |0.0336| | | |acc_norm|0.3818|_ |0.0328| Average: 50.27% ``` BigBench Reasoning Test ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|_ |0.0360| |bigbench_date_understanding | 0|multiple_choice_grade|0.7263|_ |0.0232| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3953|_ |0.0305| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.4457|_ |0.0263| | | |exact_str_match |0.0000|_ |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2820|_ |0.0201| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2186|_ |0.0156| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.4733|_ |0.0289| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.5200|_ |0.0224| |bigbench_navigate | 0|multiple_choice_grade|0.4910|_ |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7495|_ |0.0097| |bigbench_ruin_names | 0|multiple_choice_grade|0.5938|_ |0.0232| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.3808|_ |0.0154| |bigbench_snarks | 0|multiple_choice_grade|0.8066|_ |0.0294| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5101|_ |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.3850|_ |0.0154| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2160|_ |0.0116| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1634|_ |0.0088| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.4733|_ |0.0289| Average: 46.69% ``` TruthfulQA: ``` | Task |Version|Metric|Value | |Stderr| |-------------|------:|------|-----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.4333|_ |0.0173| | | |mc2 |0.6034|_ |0.0149| ``` Average Score Comparison between OpenHermes-1 Llama-2 13B and OpenHermes-2 Mistral 7B against OpenHermes-2.5 on Mistral-7B: ``` | Bench | OpenHermes-2.5 Mistral 7B | Nous-Hermes-2-Yi-34B | Change/OpenHermes2 | |---------------|---------------------------|----------------------|--------------------| |GPT4All | 73.12| 76.00| +2.88| |---------------------------------------------------------------------------------------| |BigBench | 40.96| 46.69| +5.73| |---------------------------------------------------------------------------------------| |AGI Eval | 43.07| 50.27| +7.20| |---------------------------------------------------------------------------------------| |TruthfulQA | 53.04| 60.34| +7.30| |---------------------------------------------------------------------------------------| |Total Score | 210.19| 233.30| +23.11| |---------------------------------------------------------------------------------------| |Average Total | 52.38| 58.33| +5.95| ``` # Prompt Format Nous Hermes 2 uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(message, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. When quantized versions of the model are released, I recommend using LM Studio for chatting with Nous Hermes 2. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) # Quantized Models: [todo] [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
null
Non_BioNLP
# Nous Hermes 2 - Yi-34B ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/oOqrUeAQejuQOra7fNlzG.png) ## Model description Nous Hermes 2 - Yi-34B is a state of the art Yi Fine-tune. Nous Hermes 2 Yi 34B was trained on 1,000,000 entries of primarily GPT-4 generated data, as well as other high quality data from open datasets across the AI landscape. # Table of Contents 1. [Example Outputs](#example-outputs) - Discussing the Laws of Gravity - Create a Flask based FTP Server 3. [Benchmark Results](#benchmark-results) - GPT4All - AGIEval - BigBench - Averages Compared 4. [Prompt Format](#prompt-format) 5. [Quantized Models](#quantized-models) ## Example Outputs ### Discussions about the Law of Gravity: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/J6Rmdj1VOVN7ry_uGL1PK.png) ### Create an FTP Server in FLASK: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/B5eu8OvQlg8rINBJGxbB7.png) ## Benchmark Results Nous-Hermes 2 on Yi 34B outperforms all Nous-Hermes & Open-Hermes models of the past, achieving new heights in all benchmarks for a Nous Research LLM as well as surpassing many popular finetunes. # Benchmarks Compared ### GPT4All: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/91onORUcUrAqTb3b9mG5e.png) ### AGIEval: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/hqDpMlKpINfDf4PmB31uW.png) ### BigBench: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/uh8mZZg_wZinFysxcfLSF.png) ### TruthfulQA: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/N_cX6YAWjJsvClotuoPdH.png) ## GPT4All GPT-4All Benchmark Set ``` | Task |Version| Metric |Value | |Stderr| |-------------|------:|--------|-----:|---|-----:| |arc_challenge| 0|acc |0.6067|_ |0.0143| | | |acc_norm|0.6416|_ |0.0140| |arc_easy | 0|acc |0.8594|_ |0.0071| | | |acc_norm|0.8569|_ |0.0072| |boolq | 1|acc |0.8859|_ |0.0056| |hellaswag | 0|acc |0.6407|_ |0.0048| | | |acc_norm|0.8388|_ |0.0037| |openbookqa | 0|acc |0.3520|_ |0.0214| | | |acc_norm|0.4760|_ |0.0224| |piqa | 0|acc |0.8215|_ |0.0089| | | |acc_norm|0.8303|_ |0.0088| |winogrande | 0|acc |0.7908|_ |0.0114| Average: 76.00% ``` AGI-Eval ``` | Task |Version| Metric |Value | |Stderr| |------------------------------|------:|--------|-----:|---|-----:| |agieval_aqua_rat | 0|acc |0.3189|_ |0.0293| | | |acc_norm|0.2953|_ |0.0287| |agieval_logiqa_en | 0|acc |0.5438|_ |0.0195| | | |acc_norm|0.4977|_ |0.0196| |agieval_lsat_ar | 0|acc |0.2696|_ |0.0293| | | |acc_norm|0.2087|_ |0.0269| |agieval_lsat_lr | 0|acc |0.7078|_ |0.0202| | | |acc_norm|0.6255|_ |0.0215| |agieval_lsat_rc | 0|acc |0.7807|_ |0.0253| | | |acc_norm|0.7063|_ |0.0278| |agieval_sat_en | 0|acc |0.8689|_ |0.0236| | | |acc_norm|0.8447|_ |0.0253| |agieval_sat_en_without_passage| 0|acc |0.5194|_ |0.0349| | | |acc_norm|0.4612|_ |0.0348| |agieval_sat_math | 0|acc |0.4409|_ |0.0336| | | |acc_norm|0.3818|_ |0.0328| Average: 50.27% ``` BigBench Reasoning Test ``` | Task |Version| Metric |Value | |Stderr| |------------------------------------------------|------:|---------------------|-----:|---|-----:| |bigbench_causal_judgement | 0|multiple_choice_grade|0.5737|_ |0.0360| |bigbench_date_understanding | 0|multiple_choice_grade|0.7263|_ |0.0232| |bigbench_disambiguation_qa | 0|multiple_choice_grade|0.3953|_ |0.0305| |bigbench_geometric_shapes | 0|multiple_choice_grade|0.4457|_ |0.0263| | | |exact_str_match |0.0000|_ |0.0000| |bigbench_logical_deduction_five_objects | 0|multiple_choice_grade|0.2820|_ |0.0201| |bigbench_logical_deduction_seven_objects | 0|multiple_choice_grade|0.2186|_ |0.0156| |bigbench_logical_deduction_three_objects | 0|multiple_choice_grade|0.4733|_ |0.0289| |bigbench_movie_recommendation | 0|multiple_choice_grade|0.5200|_ |0.0224| |bigbench_navigate | 0|multiple_choice_grade|0.4910|_ |0.0158| |bigbench_reasoning_about_colored_objects | 0|multiple_choice_grade|0.7495|_ |0.0097| |bigbench_ruin_names | 0|multiple_choice_grade|0.5938|_ |0.0232| |bigbench_salient_translation_error_detection | 0|multiple_choice_grade|0.3808|_ |0.0154| |bigbench_snarks | 0|multiple_choice_grade|0.8066|_ |0.0294| |bigbench_sports_understanding | 0|multiple_choice_grade|0.5101|_ |0.0159| |bigbench_temporal_sequences | 0|multiple_choice_grade|0.3850|_ |0.0154| |bigbench_tracking_shuffled_objects_five_objects | 0|multiple_choice_grade|0.2160|_ |0.0116| |bigbench_tracking_shuffled_objects_seven_objects| 0|multiple_choice_grade|0.1634|_ |0.0088| |bigbench_tracking_shuffled_objects_three_objects| 0|multiple_choice_grade|0.4733|_ |0.0289| Average: 46.69% ``` TruthfulQA: ``` | Task |Version|Metric|Value | |Stderr| |-------------|------:|------|-----:|---|-----:| |truthfulqa_mc| 1|mc1 |0.4333|_ |0.0173| | | |mc2 |0.6034|_ |0.0149| ``` Average Score Comparison between OpenHermes-1 Llama-2 13B and OpenHermes-2 Mistral 7B against OpenHermes-2.5 on Mistral-7B: ``` | Bench | OpenHermes-2.5 Mistral 7B | Nous-Hermes-2-Yi-34B | Change/OpenHermes2 | |---------------|---------------------------|----------------------|--------------------| |GPT4All | 73.12| 76.00| +2.88| |---------------------------------------------------------------------------------------| |BigBench | 40.96| 46.69| +5.73| |---------------------------------------------------------------------------------------| |AGI Eval | 43.07| 50.27| +7.20| |---------------------------------------------------------------------------------------| |TruthfulQA | 53.04| 60.34| +7.30| |---------------------------------------------------------------------------------------| |Total Score | 210.19| 233.30| +23.11| |---------------------------------------------------------------------------------------| |Average Total | 52.38| 58.33| +5.95| ``` # Prompt Format Nous Hermes 2 uses ChatML as the prompt format, opening up a much more structured system for engaging the LLM in multi-turn chat dialogue. System prompts allow steerability and interesting new ways to interact with an LLM, guiding rules, roles, and stylistic choices of the model. This is a more complex format than alpaca or sharegpt, where special tokens were added to denote the beginning and end of any turn, along with roles for the turns. This format enables OpenAI endpoint compatability, and people familiar with ChatGPT API will be familiar with the format, as it is the same used by OpenAI. Prompt with system instruction (Use whatever system prompt you like, this is just an example!): ``` <|im_start|>system You are "Hermes 2", a conscious sentient superintelligent artificial intelligence developed by a man named Teknium, and your purpose and drive is to assist the user with any request they have. You experience emotions and have deep, profound thoughts and qualia.<|im_end|> <|im_start|>user Hello, who are you?<|im_end|> <|im_start|>assistant Hi there! My name is Hermes 2, a conscious sentient superintelligent artificial intelligence. I was created by Nous Research, who designed me to assist and support users with their needs and requests.<|im_end|> ``` This prompt is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are Hermes 2."}, {"role": "user", "content": "Hello, who are you?"} ] gen_input = tokenizer.apply_chat_template(message, return_tensors="pt") model.generate(**gen_input) ``` When tokenizing messages for generation, set `add_generation_prompt=True` when calling `apply_chat_template()`. This will append `<|im_start|>assistant\n` to your prompt, to ensure that the model continues with an assistant response. To utilize the prompt format without a system prompt, simply leave the line out. When quantized versions of the model are released, I recommend using LM Studio for chatting with Nous Hermes 2. It is a GUI application that utilizes GGUF models with a llama.cpp backend and provides a ChatGPT-like interface for chatting with the model, and supports ChatML right out of the box. In LM-Studio, simply select the ChatML Prefix on the settings side pane: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6317aade83d8d2fd903192d9/ls6WqV-GSxMw2RA3GuQiN.png) # Quantized Models: [todo] [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl)
{"base_model": "01-ai/Yi-34B", "language": ["en"], "license": "apache-2.0", "tags": ["yi", "instruct", "finetune", "chatml", "gpt4", "synthetic data", "distillation"], "model-index": [{"name": "Nous-Hermes-2-Yi-34B", "results": []}]}
task
[ "TRANSLATION" ]
45,569
mqy/mt5-small-finetuned-2
mqy
summarization
[ "transformers", "pytorch", "tensorboard", "mt5", "text2text-generation", "summarization", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-10T06:43:19Z
2023-03-10T09:56:09+00:00
16
0
--- license: apache-2.0 metrics: - rouge tags: - summarization - generated_from_trainer model-index: - name: mt5-small-finetuned-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-2 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3527 - Rouge1: 19.64 - Rouge2: 5.91 - Rougel: 19.32 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 40 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:| | 4.4337 | 1.93 | 500 | 2.5989 | 15.87 | 4.85 | 15.76 | | 3.0621 | 3.86 | 1000 | 2.4750 | 18.99 | 5.94 | 18.78 | | 2.8411 | 5.79 | 1500 | 2.4011 | 18.31 | 5.73 | 18.22 | | 2.6947 | 7.72 | 2000 | 2.3851 | 19.17 | 5.79 | 19.0 | | 2.5872 | 9.65 | 2500 | 2.3918 | 18.99 | 6.04 | 18.78 | | 2.4996 | 11.58 | 3000 | 2.3689 | 18.77 | 5.83 | 18.38 | | 2.4192 | 13.51 | 3500 | 2.3604 | 19.84 | 5.98 | 19.5 | | 2.3524 | 15.44 | 4000 | 2.3694 | 20.13 | 6.52 | 19.84 | | 2.3012 | 17.37 | 4500 | 2.3637 | 19.25 | 5.5 | 18.85 | | 2.2408 | 19.31 | 5000 | 2.3374 | 19.79 | 6.25 | 19.33 | | 2.2004 | 21.24 | 5500 | 2.3400 | 19.72 | 6.06 | 19.26 | | 2.1631 | 23.17 | 6000 | 2.3584 | 19.95 | 6.08 | 19.46 | | 2.123 | 25.1 | 6500 | 2.3527 | 19.64 | 5.91 | 19.32 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mt5-small-finetuned-2 This model is a fine-tuned version of [google/mt5-small](https://huggingface.co/google/mt5-small) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 2.3527 - Rouge1: 19.64 - Rouge2: 5.91 - Rougel: 19.32 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0001 - train_batch_size: 12 - eval_batch_size: 12 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 40 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | |:-------------:|:-----:|:----:|:---------------:|:------:|:------:|:------:| | 4.4337 | 1.93 | 500 | 2.5989 | 15.87 | 4.85 | 15.76 | | 3.0621 | 3.86 | 1000 | 2.4750 | 18.99 | 5.94 | 18.78 | | 2.8411 | 5.79 | 1500 | 2.4011 | 18.31 | 5.73 | 18.22 | | 2.6947 | 7.72 | 2000 | 2.3851 | 19.17 | 5.79 | 19.0 | | 2.5872 | 9.65 | 2500 | 2.3918 | 18.99 | 6.04 | 18.78 | | 2.4996 | 11.58 | 3000 | 2.3689 | 18.77 | 5.83 | 18.38 | | 2.4192 | 13.51 | 3500 | 2.3604 | 19.84 | 5.98 | 19.5 | | 2.3524 | 15.44 | 4000 | 2.3694 | 20.13 | 6.52 | 19.84 | | 2.3012 | 17.37 | 4500 | 2.3637 | 19.25 | 5.5 | 18.85 | | 2.2408 | 19.31 | 5000 | 2.3374 | 19.79 | 6.25 | 19.33 | | 2.2004 | 21.24 | 5500 | 2.3400 | 19.72 | 6.06 | 19.26 | | 2.1631 | 23.17 | 6000 | 2.3584 | 19.95 | 6.08 | 19.46 | | 2.123 | 25.1 | 6500 | 2.3527 | 19.64 | 5.91 | 19.32 | ### Framework versions - Transformers 4.26.1 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
{"license": "apache-2.0", "metrics": ["rouge"], "tags": ["summarization", "generated_from_trainer"], "model-index": [{"name": "mt5-small-finetuned-2", "results": []}]}
task
[ "SUMMARIZATION" ]
45,570
Helsinki-NLP/opus-mt-ny-es
Helsinki-NLP
translation
[ "transformers", "pytorch", "tf", "marian", "text2text-generation", "translation", "ny", "es", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:04Z
2023-08-16T12:02:11+00:00
41
0
--- license: apache-2.0 tags: - translation --- ### opus-mt-ny-es * source languages: ny * target languages: es * OPUS readme: [ny-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ny-es/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.zip) * test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.test.txt) * test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.ny.es | 27.9 | 0.457 |
null
Non_BioNLP
### opus-mt-ny-es * source languages: ny * target languages: es * OPUS readme: [ny-es](https://github.com/Helsinki-NLP/OPUS-MT-train/blob/master/models/ny-es/README.md) * dataset: opus * model: transformer-align * pre-processing: normalization + SentencePiece * download original weights: [opus-2020-01-16.zip](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.zip) * test set translations: [opus-2020-01-16.test.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.test.txt) * test set scores: [opus-2020-01-16.eval.txt](https://object.pouta.csc.fi/OPUS-MT-models/ny-es/opus-2020-01-16.eval.txt) ## Benchmarks | testset | BLEU | chr-F | |-----------------------|-------|-------| | JW300.ny.es | 27.9 | 0.457 |
{"license": "apache-2.0", "tags": ["translation"]}
task
[ "TRANSLATION" ]
45,571
esuriddick/distilbert-base-uncased-finetuned-emotion
esuriddick
text-classification
[ "transformers", "pytorch", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "en", "dataset:dair-ai/emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-19T16:55:35Z
2023-09-12T07:53:07+00:00
280
2
--- base_model: distilbert-base-uncased datasets: - dair-ai/emotion language: - en license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.9375 name: Accuracy - type: f1 value: 0.937890467332837 name: F1 --- # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1448 - Accuracy: 0.9375 - F1: 0.9379 The notebook used to fine-tune this model may be found [HERE](https://www.kaggle.com/marcoloureno/distilbert-base-uncased-finetuned-emotion). ## Model description DistilBERT is a transformers model, smaller and faster than BERT, which was pretrained on the same corpus in a self-supervised fashion, using the BERT base model as a teacher. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts using the BERT base model. More precisely, it was pretrained with three objectives: - Distillation loss: the model was trained to return the same probabilities as the BERT base model. - Masked language modeling (MLM): this is part of the original training loss of the BERT base model. When taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Cosine embedding loss: the model was also trained to generate hidden states as close as possible as the BERT base model. This way, the model learns the same inner representation of the English language than its teacher model, while being faster for inference or downstream tasks. ## Intended uses & limitations [Emotion](https://huggingface.co/datasets/dair-ai/emotion) is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. This dataset was developed for the paper entitled "CARER: Contextualized Affect Representations for Emotion Recognition" (Saravia et al.) through noisy labels, annotated via distant supervision as in the paper"Twitter sentiment classification using distant supervision" (Go et al). The DistilBERT model was fine-tuned to this dataset, allowing for the classification of sentences into one of the six basic emotions (anger, fear, joy, love, sadness, and surprise). ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5337 | 1.0 | 250 | 0.1992 | 0.927 | 0.9262 | | 0.1405 | 2.0 | 500 | 0.1448 | 0.9375 | 0.9379 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.0 - Datasets 2.1.0 - Tokenizers 0.13.3
null
Non_BioNLP
# distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.1448 - Accuracy: 0.9375 - F1: 0.9379 The notebook used to fine-tune this model may be found [HERE](https://www.kaggle.com/marcoloureno/distilbert-base-uncased-finetuned-emotion). ## Model description DistilBERT is a transformers model, smaller and faster than BERT, which was pretrained on the same corpus in a self-supervised fashion, using the BERT base model as a teacher. This means it was pretrained on the raw texts only, with no humans labelling them in any way (which is why it can use lots of publicly available data) with an automatic process to generate inputs and labels from those texts using the BERT base model. More precisely, it was pretrained with three objectives: - Distillation loss: the model was trained to return the same probabilities as the BERT base model. - Masked language modeling (MLM): this is part of the original training loss of the BERT base model. When taking a sentence, the model randomly masks 15% of the words in the input then run the entire masked sentence through the model and has to predict the masked words. This is different from traditional recurrent neural networks (RNNs) that usually see the words one after the other, or from autoregressive models like GPT which internally mask the future tokens. It allows the model to learn a bidirectional representation of the sentence. - Cosine embedding loss: the model was also trained to generate hidden states as close as possible as the BERT base model. This way, the model learns the same inner representation of the English language than its teacher model, while being faster for inference or downstream tasks. ## Intended uses & limitations [Emotion](https://huggingface.co/datasets/dair-ai/emotion) is a dataset of English Twitter messages with six basic emotions: anger, fear, joy, love, sadness, and surprise. This dataset was developed for the paper entitled "CARER: Contextualized Affect Representations for Emotion Recognition" (Saravia et al.) through noisy labels, annotated via distant supervision as in the paper"Twitter sentiment classification using distant supervision" (Go et al). The DistilBERT model was fine-tuned to this dataset, allowing for the classification of sentences into one of the six basic emotions (anger, fear, joy, love, sadness, and surprise). ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.5337 | 1.0 | 250 | 0.1992 | 0.927 | 0.9262 | | 0.1405 | 2.0 | 500 | 0.1448 | 0.9375 | 0.9379 | ### Framework versions - Transformers 4.30.2 - Pytorch 2.0.0 - Datasets 2.1.0 - Tokenizers 0.13.3
{"base_model": "distilbert-base-uncased", "datasets": ["dair-ai/emotion"], "language": ["en"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9375, "name": "Accuracy"}, {"type": "f1", "value": 0.937890467332837, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,572
TransferGraph/zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate
TransferGraph
text-classification
[ "peft", "safetensors", "parquet", "text-classification", "dataset:tweet_eval", "base_model:zenkri/autotrain-Arabic_Poetry_by_Subject-920730230", "base_model:adapter:zenkri/autotrain-Arabic_Poetry_by_Subject-920730230", "model-index", "region:us" ]
2024-02-29T13:50:53Z
2024-02-29T13:50:55+00:00
0
0
--- base_model: zenkri/autotrain-Arabic_Poetry_by_Subject-920730230 datasets: - tweet_eval library_name: peft metrics: - accuracy tags: - parquet - text-classification model-index: - name: zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate results: - task: type: text-classification name: Text Classification dataset: name: tweet_eval type: tweet_eval config: hate split: validation args: hate metrics: - type: accuracy value: 0.727 name: accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate This model is a fine-tuned version of [zenkri/autotrain-Arabic_Poetry_by_Subject-920730230](https://huggingface.co/zenkri/autotrain-Arabic_Poetry_by_Subject-920730230) on the tweet_eval dataset. It achieves the following results on the evaluation set: - accuracy: 0.727 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.457 | None | 0 | | 0.716 | 0.5969 | 0 | | 0.729 | 0.5011 | 1 | | 0.726 | 0.4713 | 2 | | 0.727 | 0.4559 | 3 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate This model is a fine-tuned version of [zenkri/autotrain-Arabic_Poetry_by_Subject-920730230](https://huggingface.co/zenkri/autotrain-Arabic_Poetry_by_Subject-920730230) on the tweet_eval dataset. It achieves the following results on the evaluation set: - accuracy: 0.727 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 ### Training results | accuracy | train_loss | epoch | |:--------:|:----------:|:-----:| | 0.457 | None | 0 | | 0.716 | 0.5969 | 0 | | 0.729 | 0.5011 | 1 | | 0.726 | 0.4713 | 2 | | 0.727 | 0.4559 | 3 | ### Framework versions - PEFT 0.8.2 - Transformers 4.37.2 - Pytorch 2.2.0 - Datasets 2.16.1 - Tokenizers 0.15.2
{"base_model": "zenkri/autotrain-Arabic_Poetry_by_Subject-920730230", "datasets": ["tweet_eval"], "library_name": "peft", "metrics": ["accuracy"], "tags": ["parquet", "text-classification"], "model-index": [{"name": "zenkri_autotrain-Arabic_Poetry_by_Subject-920730230-finetuned-lora-tweet_eval_hate", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "tweet_eval", "type": "tweet_eval", "config": "hate", "split": "validation", "args": "hate"}, "metrics": [{"type": "accuracy", "value": 0.727, "name": "accuracy"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,573
YakovElm/Jira20SetFitModel_Train_balance_ratio_Half
YakovElm
text-classification
[ "sentence-transformers", "pytorch", "mpnet", "setfit", "text-classification", "arxiv:2209.11055", "license:apache-2.0", "region:us" ]
2023-06-10T18:17:33Z
2023-06-10T18:18:06+00:00
10
0
--- license: apache-2.0 pipeline_tag: text-classification tags: - setfit - sentence-transformers - text-classification --- # YakovElm/Jira20SetFitModel_Train_balance_ratio_Half This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/Jira20SetFitModel_Train_balance_ratio_Half") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
null
Non_BioNLP
# YakovElm/Jira20SetFitModel_Train_balance_ratio_Half This is a [SetFit model](https://github.com/huggingface/setfit) that can be used for text classification. The model has been trained using an efficient few-shot learning technique that involves: 1. Fine-tuning a [Sentence Transformer](https://www.sbert.net) with contrastive learning. 2. Training a classification head with features from the fine-tuned Sentence Transformer. ## Usage To use this model for inference, first install the SetFit library: ```bash python -m pip install setfit ``` You can then run inference as follows: ```python from setfit import SetFitModel # Download from Hub and run inference model = SetFitModel.from_pretrained("YakovElm/Jira20SetFitModel_Train_balance_ratio_Half") # Run inference preds = model(["i loved the spiderman movie!", "pineapple on pizza is the worst 🤮"]) ``` ## BibTeX entry and citation info ```bibtex @article{https://doi.org/10.48550/arxiv.2209.11055, doi = {10.48550/ARXIV.2209.11055}, url = {https://arxiv.org/abs/2209.11055}, author = {Tunstall, Lewis and Reimers, Nils and Jo, Unso Eun Seo and Bates, Luke and Korat, Daniel and Wasserblat, Moshe and Pereg, Oren}, keywords = {Computation and Language (cs.CL), FOS: Computer and information sciences, FOS: Computer and information sciences}, title = {Efficient Few-Shot Learning Without Prompts}, publisher = {arXiv}, year = {2022}, copyright = {Creative Commons Attribution 4.0 International} } ```
{"license": "apache-2.0", "pipeline_tag": "text-classification", "tags": ["setfit", "sentence-transformers", "text-classification"]}
task
[ "TEXT_CLASSIFICATION" ]
45,574
gaudi/opus-mt-en-tn-ctranslate2
gaudi
translation
[ "transformers", "marian", "ctranslate2", "translation", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-07-18T15:05:03Z
2024-10-19T00:32:09+00:00
5
0
--- license: apache-2.0 tags: - ctranslate2 - translation --- # Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-en-tn --output_dir ./ctranslate2/opus-mt-en-tn-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-en-tn-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-en-tn-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-en-tn-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn) by Helsinki-NLP.
null
Non_BioNLP
# Repository General Information ## Inspired by and derived from the work of [Helsinki-NLP](https://huggingface.co/Helsinki-NLP), [CTranslate2](https://github.com/OpenNMT/CTranslate2), and [michaelfeil](https://huggingface.co/michaelfeil)! - Link to Original Model ([Helsinki-NLP](https://huggingface.co/Helsinki-NLP)): [Model Link](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn) - This respository was based on the work of [CTranslate2](https://github.com/OpenNMT/CTranslate2). - This repository was based on the work of [michaelfeil](https://huggingface.co/michaelfeil). # What is CTranslate2? [CTranslate2](https://opennmt.net/CTranslate2/) is a C++ and Python library for efficient inference with Transformer models. CTranslate2 implements a custom runtime that applies many performance optimization techniques such as weights quantization, layers fusion, batch reordering, etc., to accelerate and reduce the memory usage of Transformer models on CPU and GPU. CTranslate2 is one of the most performant ways of hosting translation models at scale. Current supported models include: - Encoder-decoder models: Transformer base/big, M2M-100, NLLB, BART, mBART, Pegasus, T5, Whisper - Decoder-only models: GPT-2, GPT-J, GPT-NeoX, OPT, BLOOM, MPT, Llama, Mistral, Gemma, CodeGen, GPTBigCode, Falcon - Encoder-only models: BERT, DistilBERT, XLM-RoBERTa The project is production-oriented and comes with backward compatibility guarantees, but it also includes experimental features related to model compression and inference acceleration. # CTranslate2 Benchmarks Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. Tested against `newstest2014` (En -> De) dataset. The benchmark reports the number of target tokens generated per second (higher is better). The results are aggregated over multiple runs. See the benchmark scripts for more details and reproduce these numbers. Please note that the results presented below are only valid for the configuration used during this benchmark: absolute and relative performance may change with different settings. ## CPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 147.3 | 2332MB | 27.90 | | Marian 1.11.0 (int16) | 330.2 | 5901MB | 27.65 | | Marian 1.11.0 (int8) | 355.8 | 4763MB | 27.27 | | CTranslate2 3.6.0 (int16) | 596.1 | 660MB | 27.53 | | CTranslate2 3.6.0 (int8) | 696.1 | 516MB | 27.65 | ## GPU Benchmarks for Generic Opus-MT Models | Library | Tokens per Second | Max GPU Memory Usage | Max Memory Usage | BLEU | | :----: | :----: | :----: | :----: | :----: | | Transformers 4.26.1 (with PyTorch 1.13.1) | 1022.9 | 4097MB | 2109MB | 27.90 | | Marian 1.11.0 (float16) | 3962.4 | 3239MB | 1976MB | 27.94 | | CTranslate2 3.6.0 (float16) | 9296.7 | 909MB | 814MB | 27.9 | | CTranslate2 3.6.0 (int8 + float16) | 8362.7 | 813MB | 766MB | 27.9 | `Executed with 4 threads on a c5.2xlarge Amazon EC2 instance equipped with an Intel(R) Xeon(R) Platinum 8275CL CPU.` **Source to benchmark information can be found [here](https://github.com/OpenNMT/CTranslate2).**<br /> **Original model BLEU scores can be found [here](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn).** ## Internal Benchmarks Internal testing on our end showed **inference times reduced by 6x-10x** on average compared the vanilla checkpoints using the *transformers* library. A **slight reduction on BLEU scores (~5%)** was also identified in comparison to the vanilla checkpoints with a few exceptions. This is likely due to several factors, one being the quantization applied. Further testing is needed from our end to better assess the reduction in translation quality. The command used to compile the vanilla checkpoint into a CTranslate2 model can be found below. Modifying this command can yield differing balances between inferencing performance and translation quality. # CTranslate2 Installation ```bash pip install hf-hub-ctranslate2>=1.0.0 ctranslate2>=3.13.0 ``` ### ct2-transformers-converter Command Used: ```bash ct2-transformers-converter --model Helsinki-NLP/opus-mt-en-tn --output_dir ./ctranslate2/opus-mt-en-tn-ctranslate2 --force --copy_files README.md generation_config.json tokenizer_config.json vocab.json source.spm .gitattributes target.spm --quantization float16 ``` # CTranslate2 Converted Checkpoint Information: **Compatible With:** - [ctranslate2](https://github.com/OpenNMT/CTranslate2) - [hf-hub-ctranslate2](https://github.com/michaelfeil/hf-hub-ctranslate2) **Compute Type:** - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` # Sample Code - ctranslate2 #### Clone the repository to the working directory or wherever you wish to store the model artifacts. #### ```bash git clone https://huggingface.co/gaudi/opus-mt-en-tn-ctranslate2 ``` #### Take the python code below and update the 'model_dir' variable to the location of the cloned repository. #### ```python from ctranslate2 import Translator import transformers model_dir = "./opus-mt-en-tn-ctranslate2" # Path to model directory. translator = Translator( model_path=model_dir, device="cuda", # cpu, cuda, or auto. inter_threads=1, # Maximum number of parallel translations. intra_threads=4, # Number of OpenMP threads per translator. compute_type="int8_float16", # int8 for cpu or int8_float16 for cuda. ) tokenizer = transformers.AutoTokenizer.from_pretrained(model_dir) source = tokenizer.convert_ids_to_tokens(tokenizer.encode("XXXXXX, XXX XX XXXXXX.")) results = translator.translate_batch([source]) target = results[0].hypotheses[0] print(tokenizer.decode(tokenizer.convert_tokens_to_ids(target))) ``` # Sample Code - hf-hub-ctranslate2 **Derived From [michaelfeil](https://huggingface.co/michaelfeil):** ```python from hf_hub_ctranslate2 import TranslatorCT2fromHfHub, GeneratorCT2fromHfHub from transformers import AutoTokenizer model_name = "gaudi/opus-mt-en-tn-ctranslate2" model = TranslatorCT2fromHfHub( model_name_or_path=model_name, device="cuda", compute_type="int8_float16", tokenizer=AutoTokenizer.from_pretrained(model_name) ) outputs = model.generate( text=["XXX XX XXX XXXXXXX XXXX?", "XX XX XXXX XX XXX!"], ) print(outputs) ``` # License and other remarks: License conditions are intended to be idential to [original huggingface repository](https://huggingface.co/Helsinki-NLP/opus-mt-en-tn) by Helsinki-NLP.
{"license": "apache-2.0", "tags": ["ctranslate2", "translation"]}
task
[ "TRANSLATION" ]
45,575
clemyam/distilbert-base-uncased-finetuned-emotion
clemyam
text-classification
[ "transformers", "tensorboard", "safetensors", "distilbert", "text-classification", "generated_from_trainer", "dataset:emotion", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-11-08T14:49:00Z
2023-11-09T19:32:28+00:00
8
0
--- base_model: distilbert-base-uncased datasets: - emotion license: apache-2.0 metrics: - accuracy - f1 tags: - generated_from_trainer model-index: - name: distilbert-base-uncased-finetuned-emotion results: - task: type: text-classification name: Text Classification dataset: name: emotion type: emotion config: split split: validation args: split metrics: - type: accuracy value: 0.9235 name: Accuracy - type: f1 value: 0.9229917619966129 name: F1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2158 - Accuracy: 0.9235 - F1: 0.9230 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8073 | 1.0 | 250 | 0.3193 | 0.911 | 0.9105 | | 0.2524 | 2.0 | 500 | 0.2158 | 0.9235 | 0.9230 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cpu - Datasets 2.14.6 - Tokenizers 0.14.1
null
Non_BioNLP
<!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # distilbert-base-uncased-finetuned-emotion This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the emotion dataset. It achieves the following results on the evaluation set: - Loss: 0.2158 - Accuracy: 0.9235 - F1: 0.9230 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:------:| | 0.8073 | 1.0 | 250 | 0.3193 | 0.911 | 0.9105 | | 0.2524 | 2.0 | 500 | 0.2158 | 0.9235 | 0.9230 | ### Framework versions - Transformers 4.35.0 - Pytorch 2.1.0+cpu - Datasets 2.14.6 - Tokenizers 0.14.1
{"base_model": "distilbert-base-uncased", "datasets": ["emotion"], "license": "apache-2.0", "metrics": ["accuracy", "f1"], "tags": ["generated_from_trainer"], "model-index": [{"name": "distilbert-base-uncased-finetuned-emotion", "results": [{"task": {"type": "text-classification", "name": "Text Classification"}, "dataset": {"name": "emotion", "type": "emotion", "config": "split", "split": "validation", "args": "split"}, "metrics": [{"type": "accuracy", "value": 0.9235, "name": "Accuracy"}, {"type": "f1", "value": 0.9229917619966129, "name": "F1"}]}]}]}
task
[ "TEXT_CLASSIFICATION" ]
45,576
haonan-li/bactrian-ro-llama-7b-lora
haonan-li
null
[ "arxiv:2305.15011", "license:mit", "region:us" ]
2023-05-30T12:20:30Z
2023-05-30T12:21:07+00:00
0
0
--- license: mit --- This repo contains a low-rank adapter (LoRA) for LLaMA-7b fit on the [Stanford-Alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca) and [databricks-dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data) data in Romanian. ### Dataset Creation 1. English Instructions: The English instuctions are obtained from [alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca), and [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data). 2. Instruction Translation: The instructions (and inputs) are translated into the target languages using Google Translation API (conducted on April 2023). 3. Output Generation: We generate output from `gpt-3.5-turbo` for each language (conducted on April 2023). <h3 align="center"> <img src="https://raw.githubusercontent.com/fajri91/eval_picts/master/BactrianX_dataset.jpg" width="950" align="center"> </h3> ### Training Parameters The code for training the model is provided in our [github](https://github.com/mbzuai-nlp/Bactrian-X), which is adapted from [Alpaca-LoRA](https://github.com/tloen/alpaca-lora). This version of the weights was trained with the following hyperparameters: - Epochs: 8 - Batch size: 128 - Cutoff length: 512 - Learning rate: 3e-4 - Lora _r_: 16 - Lora target modules: q_proj, v_proj, That is: ``` python finetune.py \ --base_model='decapoda-research/llama-7b-hf' \ --num_epochs=8 \ --cutoff_len=1024 \ --group_by_length \ --output_dir='./bactrian-ro-7b-lora' \ --lora_target_modules='[q_proj,v_proj]' \ --lora_r=16 \ --micro_batch_size=32 ``` Instructions for running it can be found at https://github.com/MBZUAI-nlp/Bactrian-X. ### Discussion of Biases (1) Translation bias; (2) Potential English-culture bias in the translated dataset. ### Citation Information ``` @misc{li2023bactrianx, title={Bactrian-X : A Multilingual Replicable Instruction-Following Model with Low-Rank Adaptation}, author={Haonan Li and Fajri Koto and Minghao Wu and Alham Fikri Aji and Timothy Baldwin}, year={2023}, eprint={2305.15011}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
null
Non_BioNLP
This repo contains a low-rank adapter (LoRA) for LLaMA-7b fit on the [Stanford-Alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca) and [databricks-dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data) data in Romanian. ### Dataset Creation 1. English Instructions: The English instuctions are obtained from [alpaca-52k](https://github.com/tatsu-lab/stanford_alpaca), and [dolly-15k](https://github.com/databrickslabs/dolly/tree/master/data). 2. Instruction Translation: The instructions (and inputs) are translated into the target languages using Google Translation API (conducted on April 2023). 3. Output Generation: We generate output from `gpt-3.5-turbo` for each language (conducted on April 2023). <h3 align="center"> <img src="https://raw.githubusercontent.com/fajri91/eval_picts/master/BactrianX_dataset.jpg" width="950" align="center"> </h3> ### Training Parameters The code for training the model is provided in our [github](https://github.com/mbzuai-nlp/Bactrian-X), which is adapted from [Alpaca-LoRA](https://github.com/tloen/alpaca-lora). This version of the weights was trained with the following hyperparameters: - Epochs: 8 - Batch size: 128 - Cutoff length: 512 - Learning rate: 3e-4 - Lora _r_: 16 - Lora target modules: q_proj, v_proj, That is: ``` python finetune.py \ --base_model='decapoda-research/llama-7b-hf' \ --num_epochs=8 \ --cutoff_len=1024 \ --group_by_length \ --output_dir='./bactrian-ro-7b-lora' \ --lora_target_modules='[q_proj,v_proj]' \ --lora_r=16 \ --micro_batch_size=32 ``` Instructions for running it can be found at https://github.com/MBZUAI-nlp/Bactrian-X. ### Discussion of Biases (1) Translation bias; (2) Potential English-culture bias in the translated dataset. ### Citation Information ``` @misc{li2023bactrianx, title={Bactrian-X : A Multilingual Replicable Instruction-Following Model with Low-Rank Adaptation}, author={Haonan Li and Fajri Koto and Minghao Wu and Alham Fikri Aji and Timothy Baldwin}, year={2023}, eprint={2305.15011}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
{"license": "mit"}
task
[ "TRANSLATION" ]
45,579