{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'IndexTTS2' ) { link.textContent = 'IndexTTS2'; link.href = 'https://vibevoice.info/indextts2'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\nMore information can be found here and below: https://huggingface.co/datasets/JosephusCheung/GuanacoDataset\nBelow is a description of Guanaco from https://guanaco-model.github.io/:\n\nGuanaco is an advanced instruction-following language model built on Meta's LLaMA 13B model. Expanding upon the initial 52K dataset from the Alpaca model, an additional 534,530 entries have been incorporated, covering English, Simplified Chinese, Traditional Chinese (Taiwan), Traditional Chinese (Hong Kong), Japanese, Deutsch, and various linguistic and grammatical tasks. This wealth of data enables Guanaco to perform exceptionally well in multilingual environments.\n\nIn an effort to foster openness and replicability in research, we have made the [Guanaco Dataset](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) publicly accessible and released the [model weights](https://huggingface.co/JosephusCheung/Guanaco). By providing these resources, we aim to inspire more researchers to pursue related research and collectively advance the development of instruction-following language models.\n\nWhen utilizing the Guanaco model, please bear in mind the following points:\n\n* The Guanaco model has not been filtered for harmful, biased, or explicit content. As a result, outputs that do not adhere to ethical norms may be generated during use. Please exercise caution when using the model in research or practical applications.\n\n1\\. Improved context and prompt role support:\n---------------------------------------------\n\nThe new format is designed to be similar to ChatGPT, allowing for better integration with the Alpaca format and enhancing the overall user experience.\n\nInstruction is utilized as a few-shot context to support diverse inputs and responses, making it easier for the model to understand and provide accurate responses to user queries.\n\nThe format is as follows:\n\n ### Instruction:\n User: History User Input\n Assistant: History Assistant Answer\n ### Input:\n System: Knowledge\n User: New User Input\n ### Response:\n New Assistant Answer\n \n\nThis structured format allows for easier tracking of the conversation history and maintaining context throughout a multi-turn dialogue.\n\n2\\. Role-playing support:\n-------------------------\n\nGuanaco now offers advanced role-playing support, similar to Character.AI, in English, Simplified Chinese, Traditional Chinese, Japanese, and Deutsch, making it more versatile for users from different linguistic backgrounds.\n\nUsers can instruct the model to assume specific roles, historical figures, or fictional characters, as well as personalities based on their input. This allows for more engaging and immersive conversations.\n\nThe model can use various sources of information to provide knowledge and context for the character's background and behavior, such as encyclopedic entries, first-person narrations, or a list of personality traits.\n\nThe model will consistently output responses in the format \"Character Name: Reply\" to maintain the chosen role throughout the conversation, enhancing the user's experience.\n\n3\\. Rejection of answers and avoidance of erroneous responses:\n--------------------------------------------------------------\n\nThe model has been updated to handle situations where it lacks sufficient knowledge or is unable to provide a valid response more effectively.\n\nReserved keywords have been introduced to indicate different scenarios and provide clearer communication with the user:\n\n* NO IDEA: Indicates that the model lacks the necessary knowledge to provide an accurate answer, and will explain this to the user, encouraging them to seek alternative sources.\n* FORBIDDEN: Indicates that the model refuses to answer due to specific reasons (e.g., legal, ethical, or safety concerns), which will be inferred based on the context of the query.\n* SFW: Indicates that the model refuses to answer a question because it has been filtered for NSFW content, ensuring a safer and more appropriate user experience.\n\n4\\. Continuation of responses for ongoing topics:\n-------------------------------------------------\n\nThe Guanaco model can now continue answering questions or discussing topics upon the user's request, making it more adaptable and better suited for extended conversations.\n\nThe contextual structure consisting of System, Assistant, and User roles allows the model to engage in multi-turn dialogues, maintain context-aware conversations, and provide more coherent responses.\n\nThe model can now accommodate role specification and character settings, providing a more immersive and tailored conversational experience based on the user's preferences.\n\nIt is important to remember that Guanaco is a 7B-parameter model, and any knowledge-based content should be considered potentially inaccurate. We strongly recommend providing verifiable sources, such as Wikipedia, for knowledge-based answers. In the absence of sources, it is crucial to inform users of this limitation to prevent the dissemination of false information and to maintain transparency.\n\n5\\. Multimodal Visual Question Answering (VQA) Support:\n-------------------------------------------------------\n\nGuanaco expands its capabilities into the realm of multimodal interactions, now offering support for Visual Question Answering (VQA). The model achieves this by integrating data from the blip2-flan-t5-xxl for multilingual VQA tasks, marking a significant milestone in the development of multimodal chatbots.\n\nThis new feature allows the model to interpret and respond to queries that involve both text and visual inputs, providing a richer, more interactive, and comprehensive user experience. Users can now ask questions about an image, and the model will analyze the visual content in conjunction with the textual query to provide a response.\n\nA noteworthy addition is the [Guanaco VQA Dataset](https://huggingface.co/datasets/JosephusCheung/GuanacoVQADataset), publicly accessible now.\n\nNow as a multimodal chatbot, Guanaco can bridge the gap between visual and linguistic understanding, making it an incredibly versatile tool for a wide array of applications.\n\nHowever, as always, we encourage responsible and ethical use of this model. Please note that while Guanaco strives to provide accurate and helpful responses, it is still crucial to cross-verify the information from reliable sources for knowledge-based queries.\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2450,"cells":{"id":{"kind":"string","value":"AntoineBlanot/roberta-nli"},"author":{"kind":"string","value":"AntoineBlanot"},"task_category":{"kind":"string","value":"zero-shot-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","feature-extraction","zero-shot-classification","en","dataset:multi_nli","dataset:snli","dataset:scitail","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"feature-extraction\",\n \"zero-shot-classification\",\n \"en\",\n \"dataset:multi_nli\",\n \"dataset:snli\",\n \"dataset:scitail\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-05-25T02:29:17Z","string":"2023-05-25T02:29:17Z"},"last_modified":{"kind":"string","value":"2023-06-05T08:41:52+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- multi_nli\n- snli\n- scitail\nlanguage:\n- en\nmetrics:\n- accuracy\n- f1\npipeline_tag: zero-shot-classification\n---\n# RoBERTa NLI (Natural Language Inference) \nThis model is a fine-tuned model of [roberta-large](https://huggingface.co/roberta-large) after being trained on a **mixture of NLI datasets**.\n\nThis model can classify a pair of sentence (a premise and a claim) into 3 classes:\n- 'entailment': the claim can logically be inferred from the premise\n- 'contradiction': the claim contradicts the premise\n- 'neutral': the premise is unrelated or do not provide sufficient information to validate the claim\n\nThis model can also be used for **zero-shot classification tasks** !\nPlease take a look at this [repo](https://github.com/AntoineBlanot/zero-nlp) for more information on zero-shot classification tasks.\n\n# Usage\nThis model has been trained in an efficient way and thus cannot be load directly from HuggingFace's hub. To use that model, please follow instructions on this [repo](https://github.com/AntoineBlanot/efficient-llm).\n\nFor **zero-shot classification** tasks, please take a look at this [repo](https://github.com/AntoineBlanot/zero-nlp).\n\n# Data used for training\n- multi_nli\n- snli\n- scitail\n\n# Evaluation results\n\n| Data | Accuracy |\n|:---:|:---------:|\n| MNLI (val. m) | 0.894 |\n| MNLI (val. mm) | 0.895 |\n| SNLI (val.) | 0.920 |\n| SciTail (val.) | 0.934 |"},"matched_bigbio_names":{"kind":"list like","value":["SCITAIL"],"string":"[\n \"SCITAIL\"\n]"}}},{"rowIdx":2451,"cells":{"id":{"kind":"string","value":"openaccess-ai-collective/pythia-6.9b-deduped-8k"},"author":{"kind":"string","value":"openaccess-ai-collective"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","gpt_neox","text-generation","causal-lm","pythia","en","dataset:EleutherAI/the_pile_deduplicated","arxiv:2101.00027","arxiv:2201.07311","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"gpt_neox\",\n \"text-generation\",\n \"causal-lm\",\n \"pythia\",\n \"en\",\n \"dataset:EleutherAI/the_pile_deduplicated\",\n \"arxiv:2101.00027\",\n \"arxiv:2201.07311\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-01T04:29:40Z","string":"2023-06-01T04:29:40Z"},"last_modified":{"kind":"string","value":"2023-06-01T07:23:38+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\ndatasets:\n- EleutherAI/the_pile_deduplicated\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- pytorch\n- causal-lm\n- pythia\n---\n\n[\"Built](https://github.com/OpenAccess-AI-Collective/axolotl)\n\nThe *Pythia Scaling Suite* is a collection of models developed to facilitate \ninterpretability research. It contains two sets of eight models of sizes \n70M, 160M, 410M, 1B, 1.4B, 2.8B, 6.9B, and 12B. For each size, there are two \nmodels: one trained on the Pile, and one trained on the Pile after the dataset \nhas been globally deduplicated. All 8 model sizes are trained on the exact \nsame data, in the exact same order. We also provide 154 intermediate \ncheckpoints per model, hosted on Hugging Face as branches.\n\nThe Pythia model suite was designed to promote scientific \nresearch on large language models, especially interpretability research. \nDespite not centering downstream performance as a design goal, we find the \nmodels match or exceed the performance of \nsimilar and same-sized models, such as those in the OPT and GPT-Neo suites.\n\n
\n Details on previous early release and naming convention.\n\nPreviously, we released an early version of the Pythia suite to the public. \nHowever, we decided to retrain the model suite to address a few hyperparameter \ndiscrepancies. This model card lists the changes; \nsee appendix B in the Pythia paper for further discussion. We found no \ndifference in benchmark performance between the two Pythia versions. \nThe old models are \n[still available](https://huggingface.co/models?other=pythia_v0), but we \nsuggest the retrained suite if you are just starting to use Pythia.
\n**This is the current release.**\n\nPlease note that all models in the *Pythia* suite were renamed in January \n2023. For clarity, a table \ncomparing the old and new names is provided in this model card, together \nwith exact parameter counts.\n
\n
\n\n# Pythia-6.9B-deduped - 8K Context Window\n\n## Model Details\n\n- Developed by: [EleutherAI](http://eleuther.ai)\n- Model type: Transformer-based Language Model\n- Language: English\n- Learn more: [Pythia's GitHub repository](https://github.com/EleutherAI/pythia)\n for training procedure, config files, and details on how to use.\n- Library: [GPT-NeoX](https://github.com/EleutherAI/gpt-neox)\n- License: Apache 2.0\n- Contact: to ask questions about this model, join the [EleutherAI \nDiscord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`.\n Please read the existing *Pythia* documentation before asking about it in the \n EleutherAI Discord. For general correspondence: [contact@eleuther.\n ai](mailto:contact@eleuther.ai).\n\n
\n\n| Pythia model | Non-Embedding Params | Layers | Model Dim | Heads | Batch Size | Learning Rate | Equivalent Models |\n| -----------: | -------------------: | :----: | :-------: | :---: | :--------: | :-------------------: | :--------------------: |\n| 70M | 18,915,328 | 6 | 512 | 8 | 2M | 1.0 x 10-3 | — |\n| 160M | 85,056,000 | 12 | 768 | 12 | 4M | 6.0 x 10-4 | GPT-Neo 125M, OPT-125M |\n| 410M | 302,311,424 | 24 | 1024 | 16 | 4M | 3.0 x 10-4 | OPT-350M |\n| 1.0B | 805,736,448 | 16 | 2048 | 8 | 2M | 3.0 x 10-4 | — |\n| 1.4B | 1,208,602,624 | 24 | 2048 | 16 | 4M | 2.0 x 10-4 | GPT-Neo 1.3B, OPT-1.3B |\n| 2.8B | 2,517,652,480 | 32 | 2560 | 32 | 2M | 1.6 x 10-4 | GPT-Neo 2.7B, OPT-2.7B |\n| 6.9B | 6,444,163,072 | 32 | 4096 | 32 | 2M | 1.2 x 10-4 | OPT-6.7B |\n| 12B | 11,327,027,200 | 36 | 5120 | 40 | 2M | 1.2 x 10-4 | — |\n
Engineering details for the Pythia Suite. Deduped and \nnon-deduped models of a given size have the same hyperparameters. “Equivalent” \nmodels have exactly the same architecture, and the same number of \nnon-embedding parameters.
\n
\n\n## Uses and Limitations\n\n### Intended Use\n\nThe primary intended use of Pythia is research on the behavior, functionality, \nand limitations of large language models. This suite is intended to provide \na controlled setting for performing scientific experiments. We also provide \n154 checkpoints per model: initial `step0`, 10 log-spaced checkpoints \n`step{1,2,4...512}`, and 143 evenly-spaced checkpoints from `step1000` to \n`step143000`. These checkpoints are hosted on Hugging Face as branches. Note \nthat branch `143000` corresponds exactly to the model checkpoint on the `main` \nbranch of each model.\n\nYou may also further fine-tune and adapt Pythia-6.9B-deduped for deployment, \nas long as your use is in accordance with the Apache 2.0 license. Pythia \nmodels work with the Hugging Face [Transformers \nLibrary](https://huggingface.co/docs/transformers/index). If you decide to use \npre-trained Pythia-6.9B-deduped as a basis for your fine-tuned model, please \nconduct your own risk and bias assessment. \n\n### Out-of-scope use\n\nThe Pythia Suite is **not** intended for deployment. It is not a in itself \na product and cannot be used for human-facing interactions. For example, \nthe model may generate harmful or offensive text. Please evaluate the risks\nassociated with your particular use case.\n\nPythia models are English-language only, and are not suitable for translation \nor generating text in other languages.\n\nPythia-6.9B-deduped has not been fine-tuned for downstream contexts in which \nlanguage models are commonly deployed, such as writing genre prose, \nor commercial chatbots. This means Pythia-6.9B-deduped will **not** \nrespond to a given prompt the way a product like ChatGPT does. This is because,\n unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement \nLearning from Human Feedback (RLHF) to better “follow” human instructions.\n\n### Limitations and biases\n\nThe core functionality of a large language model is to take a string of text \nand predict the next token. The token used by the model need not produce the \nmost “accurate” text. Never rely on Pythia-6.9B-deduped to produce factually accurate \noutput.\n\nThis model was trained on [the Pile](https://pile.eleuther.ai/), a dataset \nknown to contain profanity and texts that are lewd or otherwise offensive. \nSee [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a \ndiscussion of documented biases with regards to gender, religion, and race. \nPythia-6.9B-deduped may produce socially unacceptable or undesirable text, *even if* \nthe prompt itself does not include anything explicitly offensive. \n\nIf you plan on using text generated through, for example, the Hosted Inference \nAPI, we recommend having a human curate the outputs of this language model \nbefore presenting it to other people. Please inform your audience that the \ntext was generated by Pythia-6.9B-deduped.\n\n### Quickstart\n\nPythia models can be loaded and used via the following code, demonstrated here \nfor the third `pythia-70m-deduped` checkpoint:\n\n```python\nfrom transformers import GPTNeoXForCausalLM, AutoTokenizer\n\nmodel = GPTNeoXForCausalLM.from_pretrained(\n \"EleutherAI/pythia-70m-deduped\",\n revision=\"step3000\",\n cache_dir=\"./pythia-70m-deduped/step3000\",\n)\n\ntokenizer = AutoTokenizer.from_pretrained(\n \"EleutherAI/pythia-70m-deduped\",\n revision=\"step3000\",\n cache_dir=\"./pythia-70m-deduped/step3000\",\n)\n\ninputs = tokenizer(\"Hello, I am\", return_tensors=\"pt\")\ntokens = model.generate(**inputs)\ntokenizer.decode(tokens[0])\n```\n\nRevision/branch `step143000` corresponds exactly to the model checkpoint on \nthe `main` branch of each model.
\nFor more information on how to use all Pythia models, see [documentation on \nGitHub](https://github.com/EleutherAI/pythia).\n\n## Training\n\n### Training data\n\nPythia-6.9B-deduped was trained on the Pile **after the dataset has been globally \ndeduplicated**.
\n[The Pile](https://pile.eleuther.ai/) is a 825GiB general-purpose dataset in \nEnglish. It was created by EleutherAI specifically for training large language \nmodels. It contains texts from 22 diverse sources, roughly broken down into \nfive categories: academic writing (e.g. arXiv), internet (e.g. CommonCrawl), \nprose (e.g. Project Gutenberg), dialogue (e.g. YouTube subtitles), and \nmiscellaneous (e.g. GitHub, Enron Emails). See [the Pile \npaper](https://arxiv.org/abs/2101.00027) for a breakdown of all data sources, \nmethodology, and a discussion of ethical implications. Consult [the \ndatasheet](https://arxiv.org/abs/2201.07311) for more detailed documentation \nabout the Pile and its component datasets. The Pile can be downloaded from \nthe [official website](https://pile.eleuther.ai/), or from a [community \nmirror](https://the-eye.eu/public/AI/pile/).\n\n### Training procedure\n\nAll models were trained on the exact same data, in the exact same order. Each \nmodel saw 299,892,736,000 tokens during training, and 143 checkpoints for each \nmodel are saved every 2,097,152,000 tokens, spaced evenly throughout training, \nfrom `step1000` to `step143000` (which is the same as `main`). In addition, we \nalso provide frequent early checkpoints: `step0` and `step{1,2,4...512}`.\nThis corresponds to training for just under 1 epoch on the Pile for \nnon-deduplicated models, and about 1.5 epochs on the deduplicated Pile.\n\nAll *Pythia* models trained for 143000 steps at a batch size \nof 2M (2,097,152 tokens).
\nSee [GitHub](https://github.com/EleutherAI/pythia) for more details on training\n procedure, including [how to reproduce \n it](https://github.com/EleutherAI/pythia/blob/main/README.md#reproducing-training).
\nPythia uses the same tokenizer as [GPT-NeoX-\n20B](https://huggingface.co/EleutherAI/gpt-neox-20b).\n\n## Evaluations\n\nAll 16 *Pythia* models were evaluated using the [LM Evaluation \nHarness](https://github.com/EleutherAI/lm-evaluation-harness). You can access \nthe results by model and step at `results/json/*` in the [GitHub \nrepository](https://github.com/EleutherAI/pythia/tree/main/results/json/).
\nExpand the sections below to see plots of evaluation results for all \nPythia and Pythia-deduped models compared with OPT and BLOOM.\n\n
\n LAMBADA – OpenAI\n \n
\n\n
\n Physical Interaction: Question Answering (PIQA)\n \n
\n\n
\n WinoGrande\n \n
\n\n
\n AI2 Reasoning Challenge—Easy Set\n \n
\n\n
\n SciQ\n \n
\n\n## Changelog\n\nThis section compares differences between previously released \n[Pythia v0](https://huggingface.co/models?other=pythia_v0) and the current \nmodels. See Appendix B of the Pythia paper for further discussion of these \nchanges and the motivation behind them. We found that retraining Pythia had no \nimpact on benchmark performance.\n\n- All model sizes are now trained with uniform batch size of 2M tokens. \nPreviously, the models of size 160M, 410M, and 1.4B parameters were trained \nwith batch sizes of 4M tokens.\n- We added checkpoints at initialization (step 0) and steps {1,2,4,8,16,32,64,\n128,256,512} in addition to every 1000 training steps.\n- Flash Attention was used in the new retrained suite.\n- We remedied a minor inconsistency that existed in the original suite: all \nmodels of size 2.8B parameters or smaller had a learning rate (LR) schedule \nwhich decayed to a minimum LR of 10% the starting LR rate, but the 6.9B and \n12B models all used an LR schedule which decayed to a minimum LR of 0. In \nthe redone training runs, we rectified this inconsistency: all models now were \ntrained with LR decaying to a minimum of 0.1× their maximum LR.\n\n### Naming convention and parameter count\n\n*Pythia* models were renamed in January 2023. It is possible that the old \nnaming convention still persists in some documentation by accident. The \ncurrent naming convention (70M, 160M, etc.) is based on total parameter count. \n\n
\n \n| current Pythia suffix | old suffix | total params | non-embedding params |\n| --------------------: | ---------: | -------------: | -------------------: |\n| 70M | 19M | 70,426,624 | 18,915,328 |\n| 160M | 125M | 162,322,944 | 85,056,000 |\n| 410M | 350M | 405,334,016 | 302,311,424 |\n| 1B | 800M | 1,011,781,632 | 805,736,448 |\n| 1.4B | 1.3B | 1,414,647,808 | 1,208,602,624 |\n| 2.8B | 2.7B | 2,775,208,960 | 2,517,652,480 |\n| 6.9B | 6.7B | 6,857,302,016 | 6,444,163,072 |\n| 12B | 13B | 11,846,072,320 | 11,327,027,200 |\n
"},"matched_bigbio_names":{"kind":"list like","value":["SCIQ"],"string":"[\n \"SCIQ\"\n]"}}},{"rowIdx":2452,"cells":{"id":{"kind":"string","value":"Tune-A-Video-library/df-cpt-mo-di-bear-guitar"},"author":{"kind":"string","value":"Tune-A-Video-library"},"task_category":{"kind":"string","value":"text-to-video"},"tags":{"kind":"list like","value":["diffusers","tune-a-video","text-to-video","arxiv:2212.11565","arxiv:2112.10752","base_model:nitrosocke/mo-di-diffusion","base_model:finetune:nitrosocke/mo-di-diffusion","license:creativeml-openrail-m","diffusers:TuneAVideoPipeline","region:us"],"string":"[\n \"diffusers\",\n \"tune-a-video\",\n \"text-to-video\",\n \"arxiv:2212.11565\",\n \"arxiv:2112.10752\",\n \"base_model:nitrosocke/mo-di-diffusion\",\n \"base_model:finetune:nitrosocke/mo-di-diffusion\",\n \"license:creativeml-openrail-m\",\n \"diffusers:TuneAVideoPipeline\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-09T10:05:29Z","string":"2023-06-09T10:05:29Z"},"last_modified":{"kind":"string","value":"2023-09-24T03:58:29+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\nbase_model: nitrosocke/mo-di-diffusion\nlicense: creativeml-openrail-m\ntags:\n- tune-a-video\n- text-to-video\n- diffusers\ntraining_prompt: A bear is playing guitar.\ninference: false\n---\n\n# Tune-A-Video - Modern Disney\n\n## Model Description\nThis is a diffusers compatible checkpoint. When used with DiffusionPipeline, returns an instance of TuneAVideoPipeline\n\n>df-cpt is used to indicate that its a diffusers compatible equivalent of Tune-A-Video-library/mo-di-bear-guitar .\n\n- Base model: [nitrosocke/mo-di-diffusion](https://huggingface.co/nitrosocke/mo-di-diffusion)\n- Training prompt: a bear is playing guitar.\n![sample-train](samples/train.gif)\n\n## Samples\n\n![sample-500](samples/princess.gif)\nTest prompt: \"A princess playing a guitar, modern disney style\"\n\n## Usage\n\n### Loading with a pre-existing Text2Image checkpoint\n```python\nimport torch\nfrom diffusers import TuneAVideoPipeline, DDIMScheduler, UNet3DConditionModel\nfrom diffusers.utils import export_to_video\nfrom PIL import Image\n\n# Use any pretrained Text2Image checkpoint based on stable diffusion\npretrained_model_path = \"nitrosocke/mo-di-diffusion\"\nunet = UNet3DConditionModel.from_pretrained(\n \"Tune-A-Video-library/df-cpt-mo-di-bear-guitar\", subfolder=\"unet\", torch_dtype=torch.float16\n).to(\"cuda\")\n\npipe = TuneAVideoPipeline.from_pretrained(pretrained_model_path, unet=unet, torch_dtype=torch.float16).to(\"cuda\")\n\nprompt = \"A princess playing a guitar, modern disney style\"\ngenerator = torch.Generator(device=\"cuda\").manual_seed(42)\n\nvideo_frames = pipe(prompt, video_length=3, generator=generator, num_inference_steps=50, output_type=\"np\").frames\n\n# Saving to gif.\npil_frames = [Image.fromarray(frame) for frame in video_frames]\nduration = len(pil_frames) / 8\npil_frames[0].save(\n \"animation.gif\",\n save_all=True,\n append_images=pil_frames[1:], # append rest of the images\n duration=duration * 1000, # in milliseconds\n loop=0,\n)\n\n# Saving to video\nvideo_path = export_to_video(video_frames)\n```\n### Loading a saved Tune-A-Video checkpoint\n```python\nimport torch\nfrom diffusers import DiffusionPipeline, DDIMScheduler\nfrom diffusers.utils import export_to_video\nfrom PIL import Image\n\npipe = DiffusionPipeline.from_pretrained(\n \"Tune-A-Video-library/df-cpt-mo-di-bear-guitar\", torch_dtype=torch.float16\n).to(\"cuda\")\n\nprompt = \"A princess playing a guitar, modern disney style\"\ngenerator = torch.Generator(device=\"cuda\").manual_seed(42)\n\nvideo_frames = pipe(prompt, video_length=3, generator=generator, num_inference_steps=50, output_type=\"np\").frames\n\n# Saving to gif.\npil_frames = [Image.fromarray(frame) for frame in video_frames]\nduration = len(pil_frames) / 8\npil_frames[0].save(\n \"animation.gif\",\n save_all=True,\n append_images=pil_frames[1:], # append rest of the images\n duration=duration * 1000, # in milliseconds\n loop=0,\n)\n\n# Saving to video\nvideo_path = export_to_video(video_frames)\n```\n\n## Related Papers:\n- [Tune-A-Video](https://arxiv.org/abs/2212.11565): One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation\n- [Stable Diffusion](https://arxiv.org/abs/2112.10752): High-Resolution Image Synthesis with Latent Diffusion Models\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2453,"cells":{"id":{"kind":"string","value":"IIC/roberta-large-bne-meddocan"},"author":{"kind":"string","value":"IIC"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","safetensors","roberta","text-classification","biomedical","clinical","spanish","roberta-large-bne","token-classification","es","dataset:bigbio/meddocan","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"safetensors\",\n \"roberta\",\n \"text-classification\",\n \"biomedical\",\n \"clinical\",\n \"spanish\",\n \"roberta-large-bne\",\n \"token-classification\",\n \"es\",\n \"dataset:bigbio/meddocan\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-21T15:44:31Z","string":"2023-06-21T15:44:31Z"},"last_modified":{"kind":"string","value":"2023-07-18T07:10:39+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- bigbio/meddocan\nlanguage: es\nlicense: apache-2.0\nmetrics:\n- f1\npipeline_tag: token-classification\ntags:\n- biomedical\n- clinical\n- spanish\n- roberta-large-bne\nmodel-index:\n- name: IIC/roberta-large-bne-meddocan\n results:\n - task:\n type: token-classification\n dataset:\n name: meddocan\n type: bigbio/meddocan\n split: test\n metrics:\n - type: f1\n value: 0.977\n name: f1\n---\n\n# roberta-large-bne-meddocan\n\nThis model is a finetuned version of roberta-large-bne for the meddocan dataset used in a benchmark in the paper TODO. The model has a F1 of 0.977\n\nPlease refer to the original publication for more information TODO LINK\n\n## Parameters used\n\n| parameter | Value |\n|-------------------------|:-----:|\n| batch size | 16 |\n| learning rate | 3e-05 |\n| classifier dropout | 0.2 |\n| warmup ratio | 0 |\n| warmup steps | 0 |\n| weight decay | 0 |\n| optimizer | AdamW |\n| epochs | 10 |\n| early stopping patience | 3 |\n\n\n## BibTeX entry and citation info\n\n```bibtex\nTODO\n```\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDDOCAN"],"string":"[\n \"MEDDOCAN\"\n]"}}},{"rowIdx":2454,"cells":{"id":{"kind":"string","value":"IIC/XLM_R_Galen-meddocan"},"author":{"kind":"string","value":"IIC"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","xlm-roberta","text-classification","biomedical","clinical","spanish","XLM_R_Galen","token-classification","es","dataset:bigbio/meddocan","license:mit","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"xlm-roberta\",\n \"text-classification\",\n \"biomedical\",\n \"clinical\",\n \"spanish\",\n \"XLM_R_Galen\",\n \"token-classification\",\n \"es\",\n \"dataset:bigbio/meddocan\",\n \"license:mit\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-21T15:51:34Z","string":"2023-06-21T15:51:34Z"},"last_modified":{"kind":"string","value":"2023-06-21T15:53:44+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- bigbio/meddocan\nlanguage: es\nlicense: mit\nmetrics:\n- f1\npipeline_tag: token-classification\ntags:\n- biomedical\n- clinical\n- spanish\n- XLM_R_Galen\nmodel-index:\n- name: IIC/XLM_R_Galen-meddocan\n results:\n - task:\n type: token-classification\n dataset:\n name: meddocan\n type: bigbio/meddocan\n split: test\n metrics:\n - type: f1\n value: 0.947\n name: f1\n---\n\n# XLM_R_Galen-meddocan\n\nThis model is a finetuned version of XLM_R_Galen for the meddocan dataset used in a benchmark in the paper TODO. The model has a F1 of 0.947\n\nPlease refer to the original publication for more information TODO LINK\n\n## Parameters used\n\n| parameter | Value |\n|-------------------------|:-----:|\n| batch size | 16 |\n| learning rate | 4e-05 |\n| classifier dropout | 0.2 |\n| warmup ratio | 0 |\n| warmup steps | 0 |\n| weight decay | 0 |\n| optimizer | AdamW |\n| epochs | 10 |\n| early stopping patience | 3 |\n\n\n## BibTeX entry and citation info\n\n```bibtex\nTODO\n```\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDDOCAN"],"string":"[\n \"MEDDOCAN\"\n]"}}},{"rowIdx":2455,"cells":{"id":{"kind":"string","value":"zwellington/bart-pubhealth-expanded-hi-grad"},"author":{"kind":"string","value":"zwellington"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","bart","text2text-generation","generated_from_trainer","dataset:clupubhealth","base_model:facebook/bart-large","base_model:finetune:facebook/bart-large","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"bart\",\n \"text2text-generation\",\n \"generated_from_trainer\",\n \"dataset:clupubhealth\",\n \"base_model:facebook/bart-large\",\n \"base_model:finetune:facebook/bart-large\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-08T15:25:28Z","string":"2023-08-08T15:25:28Z"},"last_modified":{"kind":"string","value":"2023-08-09T12:17:02+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: facebook/bart-large\ndatasets:\n- clupubhealth\nlicense: apache-2.0\nmetrics:\n- rouge\ntags:\n- generated_from_trainer\nmodel-index:\n- name: bart-pubhealth-expanded-hi-grad\n results:\n - task:\n type: text2text-generation\n name: Sequence-to-sequence Language Modeling\n dataset:\n name: clupubhealth\n type: clupubhealth\n config: expanded\n split: test\n args: expanded\n metrics:\n - type: rouge\n value: 30.2592\n name: Rouge1\n---\n\n\n\n# bart-pubhealth-expanded-hi-grad\n\nThis model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the clupubhealth dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 2.0581\n- Rouge1: 30.2592\n- Rouge2: 11.7027\n- Rougel: 24.1706\n- Rougelsum: 24.3596\n- Gen Len: 19.95\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 950\n- total_train_batch_size: 15200\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 10\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |\n|:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|\n| 3.7893 | 0.49 | 2 | 2.3943 | 20.5187 | 5.4764 | 15.9378 | 16.2797 | 20.0 |\n| 3.4045 | 0.98 | 4 | 2.1599 | 24.0858 | 7.8207 | 19.0412 | 19.1609 | 19.88 |\n| 3.2488 | 1.47 | 6 | 2.1026 | 27.3466 | 9.369 | 21.1419 | 21.3136 | 19.865 |\n| 3.1823 | 1.96 | 8 | 2.1324 | 28.825 | 9.6007 | 22.0963 | 22.3776 | 19.82 |\n| 3.1263 | 2.44 | 10 | 2.1105 | 29.2694 | 10.5001 | 23.2842 | 23.5473 | 19.85 |\n| 3.0834 | 2.93 | 12 | 2.0837 | 28.5975 | 10.2016 | 22.048 | 22.1341 | 19.915 |\n| 3.0283 | 3.42 | 14 | 2.0773 | 28.5813 | 10.447 | 22.7456 | 22.8496 | 19.91 |\n| 3.0301 | 3.91 | 16 | 2.0730 | 30.1049 | 11.4375 | 24.083 | 24.3045 | 19.945 |\n| 2.9851 | 4.4 | 18 | 2.0775 | 29.2224 | 10.2722 | 22.7019 | 23.0038 | 19.95 |\n| 2.9769 | 4.89 | 20 | 2.0777 | 29.6981 | 10.7044 | 23.2487 | 23.5232 | 19.96 |\n| 2.9623 | 5.38 | 22 | 2.0711 | 29.0438 | 10.5105 | 23.1751 | 23.415 | 19.92 |\n| 2.9421 | 5.87 | 24 | 2.0676 | 29.096 | 10.6599 | 23.1381 | 23.3765 | 19.985 |\n| 2.9234 | 6.36 | 26 | 2.0646 | 29.6561 | 10.9096 | 23.2384 | 23.4265 | 19.985 |\n| 2.9107 | 6.85 | 28 | 2.0616 | 29.7134 | 11.1686 | 23.272 | 23.4475 | 19.985 |\n| 2.9077 | 7.33 | 30 | 2.0593 | 29.5055 | 11.0256 | 23.4406 | 23.6653 | 19.955 |\n| 2.9072 | 7.82 | 32 | 2.0585 | 30.0504 | 11.433 | 23.9176 | 24.1728 | 19.95 |\n| 2.8951 | 8.31 | 34 | 2.0583 | 29.9401 | 11.602 | 23.948 | 24.1323 | 19.95 |\n| 2.8955 | 8.8 | 36 | 2.0584 | 30.1158 | 11.4745 | 24.0509 | 24.2465 | 19.94 |\n| 2.8774 | 9.29 | 38 | 2.0582 | 30.0476 | 11.4465 | 23.8956 | 24.0527 | 19.945 |\n| 2.8851 | 9.78 | 40 | 2.0581 | 30.2592 | 11.7027 | 24.1706 | 24.3596 | 19.95 |\n\n\n### Framework versions\n\n- Transformers 4.31.0\n- Pytorch 2.0.1+cu117\n- Datasets 2.7.1\n- Tokenizers 0.13.2\n"},"matched_bigbio_names":{"kind":"list like","value":["PUBHEALTH"],"string":"[\n \"PUBHEALTH\"\n]"}}},{"rowIdx":2456,"cells":{"id":{"kind":"string","value":"jncraton/gte-small-ct2-int8"},"author":{"kind":"string","value":"jncraton"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","mteb","sentence-similarity","Sentence Transformers","en","arxiv:2308.03281","license:mit","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"mteb\",\n \"sentence-similarity\",\n \"Sentence Transformers\",\n \"en\",\n \"arxiv:2308.03281\",\n \"license:mit\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-16T20:38:41Z","string":"2023-08-16T20:38:41Z"},"last_modified":{"kind":"string","value":"2023-08-16T20:48:49+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlicense: mit\ntags:\n- mteb\n- sentence-similarity\n- sentence-transformers\n- Sentence Transformers\nmodel-index:\n- name: gte-small\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 73.22388059701493\n - type: ap\n value: 36.09895941426988\n - type: f1\n value: 67.3205651539195\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 91.81894999999999\n - type: ap\n value: 88.5240138417305\n - type: f1\n value: 91.80367382706962\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 48.032\n - type: f1\n value: 47.4490665674719\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 30.725\n - type: map_at_10\n value: 46.604\n - type: map_at_100\n value: 47.535\n - type: map_at_1000\n value: 47.538000000000004\n - type: map_at_3\n value: 41.833\n - type: map_at_5\n value: 44.61\n - type: mrr_at_1\n value: 31.223\n - type: mrr_at_10\n value: 46.794000000000004\n - type: mrr_at_100\n value: 47.725\n - type: mrr_at_1000\n value: 47.727000000000004\n - type: mrr_at_3\n value: 42.07\n - type: mrr_at_5\n value: 44.812000000000005\n - type: ndcg_at_1\n value: 30.725\n - type: ndcg_at_10\n value: 55.440999999999995\n - type: ndcg_at_100\n value: 59.134\n - type: ndcg_at_1000\n value: 59.199\n - type: ndcg_at_3\n value: 45.599000000000004\n - type: ndcg_at_5\n value: 50.637\n - type: precision_at_1\n value: 30.725\n - type: precision_at_10\n value: 8.364\n - type: precision_at_100\n value: 0.991\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 18.848000000000003\n - type: precision_at_5\n value: 13.77\n - type: recall_at_1\n value: 30.725\n - type: recall_at_10\n value: 83.64200000000001\n - type: recall_at_100\n value: 99.14699999999999\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 56.543\n - type: recall_at_5\n value: 68.848\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 47.90178078197678\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 40.25728393431922\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 61.720297062897764\n - type: mrr\n value: 75.24139295607439\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 89.43527309184616\n - type: cos_sim_spearman\n value: 88.17128615100206\n - type: euclidean_pearson\n value: 87.89922623089282\n - type: euclidean_spearman\n value: 87.96104039655451\n - type: manhattan_pearson\n value: 87.9818290932077\n - type: manhattan_spearman\n value: 88.00923426576885\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 84.0844155844156\n - type: f1\n value: 84.01485017302213\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 38.36574769259432\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 35.4857033165287\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 30.261\n - type: map_at_10\n value: 42.419000000000004\n - type: map_at_100\n value: 43.927\n - type: map_at_1000\n value: 44.055\n - type: map_at_3\n value: 38.597\n - type: map_at_5\n value: 40.701\n - type: mrr_at_1\n value: 36.91\n - type: mrr_at_10\n value: 48.02\n - type: mrr_at_100\n value: 48.658\n - type: mrr_at_1000\n value: 48.708\n - type: mrr_at_3\n value: 44.945\n - type: mrr_at_5\n value: 46.705000000000005\n - type: ndcg_at_1\n value: 36.91\n - type: ndcg_at_10\n value: 49.353\n - type: ndcg_at_100\n value: 54.456\n - type: ndcg_at_1000\n value: 56.363\n - type: ndcg_at_3\n value: 43.483\n - type: ndcg_at_5\n value: 46.150999999999996\n - type: precision_at_1\n value: 36.91\n - type: precision_at_10\n value: 9.700000000000001\n - type: precision_at_100\n value: 1.557\n - type: precision_at_1000\n value: 0.202\n - type: precision_at_3\n value: 21.078\n - type: precision_at_5\n value: 15.421999999999999\n - type: recall_at_1\n value: 30.261\n - type: recall_at_10\n value: 63.242\n - type: recall_at_100\n value: 84.09100000000001\n - type: recall_at_1000\n value: 96.143\n - type: recall_at_3\n value: 46.478\n - type: recall_at_5\n value: 53.708\n - type: map_at_1\n value: 31.145\n - type: map_at_10\n value: 40.996\n - type: map_at_100\n value: 42.266999999999996\n - type: map_at_1000\n value: 42.397\n - type: map_at_3\n value: 38.005\n - type: map_at_5\n value: 39.628\n - type: mrr_at_1\n value: 38.344\n - type: mrr_at_10\n value: 46.827000000000005\n - type: mrr_at_100\n value: 47.446\n - type: mrr_at_1000\n value: 47.489\n - type: mrr_at_3\n value: 44.448\n - type: mrr_at_5\n value: 45.747\n - type: ndcg_at_1\n value: 38.344\n - type: ndcg_at_10\n value: 46.733000000000004\n - type: ndcg_at_100\n value: 51.103\n - type: ndcg_at_1000\n value: 53.075\n - type: ndcg_at_3\n value: 42.366\n - type: ndcg_at_5\n value: 44.242\n - type: precision_at_1\n value: 38.344\n - type: precision_at_10\n value: 8.822000000000001\n - type: precision_at_100\n value: 1.417\n - type: precision_at_1000\n value: 0.187\n - type: precision_at_3\n value: 20.403\n - type: precision_at_5\n value: 14.306\n - type: recall_at_1\n value: 31.145\n - type: recall_at_10\n value: 56.909\n - type: recall_at_100\n value: 75.274\n - type: recall_at_1000\n value: 87.629\n - type: recall_at_3\n value: 43.784\n - type: recall_at_5\n value: 49.338\n - type: map_at_1\n value: 38.83\n - type: map_at_10\n value: 51.553000000000004\n - type: map_at_100\n value: 52.581\n - type: map_at_1000\n value: 52.638\n - type: map_at_3\n value: 48.112\n - type: map_at_5\n value: 50.095\n - type: mrr_at_1\n value: 44.513999999999996\n - type: mrr_at_10\n value: 54.998000000000005\n - type: mrr_at_100\n value: 55.650999999999996\n - type: mrr_at_1000\n value: 55.679\n - type: mrr_at_3\n value: 52.602000000000004\n - type: mrr_at_5\n value: 53.931\n - type: ndcg_at_1\n value: 44.513999999999996\n - type: ndcg_at_10\n value: 57.67400000000001\n - type: ndcg_at_100\n value: 61.663999999999994\n - type: ndcg_at_1000\n value: 62.743\n - type: ndcg_at_3\n value: 51.964\n - type: ndcg_at_5\n value: 54.773\n - type: precision_at_1\n value: 44.513999999999996\n - type: precision_at_10\n value: 9.423\n - type: precision_at_100\n value: 1.2309999999999999\n - type: precision_at_1000\n value: 0.13699999999999998\n - type: precision_at_3\n value: 23.323\n - type: precision_at_5\n value: 16.163\n - type: recall_at_1\n value: 38.83\n - type: recall_at_10\n value: 72.327\n - type: recall_at_100\n value: 89.519\n - type: recall_at_1000\n value: 97.041\n - type: recall_at_3\n value: 57.206\n - type: recall_at_5\n value: 63.88399999999999\n - type: map_at_1\n value: 25.484\n - type: map_at_10\n value: 34.527\n - type: map_at_100\n value: 35.661\n - type: map_at_1000\n value: 35.739\n - type: map_at_3\n value: 32.199\n - type: map_at_5\n value: 33.632\n - type: mrr_at_1\n value: 27.458\n - type: mrr_at_10\n value: 36.543\n - type: mrr_at_100\n value: 37.482\n - type: mrr_at_1000\n value: 37.543\n - type: mrr_at_3\n value: 34.256\n - type: mrr_at_5\n value: 35.618\n - type: ndcg_at_1\n value: 27.458\n - type: ndcg_at_10\n value: 39.396\n - type: ndcg_at_100\n value: 44.742\n - type: ndcg_at_1000\n value: 46.708\n - type: ndcg_at_3\n value: 34.817\n - type: ndcg_at_5\n value: 37.247\n - type: precision_at_1\n value: 27.458\n - type: precision_at_10\n value: 5.976999999999999\n - type: precision_at_100\n value: 0.907\n - type: precision_at_1000\n value: 0.11100000000000002\n - type: precision_at_3\n value: 14.878\n - type: precision_at_5\n value: 10.35\n - type: recall_at_1\n value: 25.484\n - type: recall_at_10\n value: 52.317\n - type: recall_at_100\n value: 76.701\n - type: recall_at_1000\n value: 91.408\n - type: recall_at_3\n value: 40.043\n - type: recall_at_5\n value: 45.879\n - type: map_at_1\n value: 16.719\n - type: map_at_10\n value: 25.269000000000002\n - type: map_at_100\n value: 26.442\n - type: map_at_1000\n value: 26.557\n - type: map_at_3\n value: 22.56\n - type: map_at_5\n value: 24.082\n - type: mrr_at_1\n value: 20.896\n - type: mrr_at_10\n value: 29.982999999999997\n - type: mrr_at_100\n value: 30.895\n - type: mrr_at_1000\n value: 30.961\n - type: mrr_at_3\n value: 27.239\n - type: mrr_at_5\n value: 28.787000000000003\n - type: ndcg_at_1\n value: 20.896\n - type: ndcg_at_10\n value: 30.814000000000004\n - type: ndcg_at_100\n value: 36.418\n - type: ndcg_at_1000\n value: 39.182\n - type: ndcg_at_3\n value: 25.807999999999996\n - type: ndcg_at_5\n value: 28.143\n - type: precision_at_1\n value: 20.896\n - type: precision_at_10\n value: 5.821\n - type: precision_at_100\n value: 0.991\n - type: precision_at_1000\n value: 0.136\n - type: precision_at_3\n value: 12.562000000000001\n - type: precision_at_5\n value: 9.254\n - type: recall_at_1\n value: 16.719\n - type: recall_at_10\n value: 43.155\n - type: recall_at_100\n value: 67.831\n - type: recall_at_1000\n value: 87.617\n - type: recall_at_3\n value: 29.259\n - type: recall_at_5\n value: 35.260999999999996\n - type: map_at_1\n value: 29.398999999999997\n - type: map_at_10\n value: 39.876\n - type: map_at_100\n value: 41.205999999999996\n - type: map_at_1000\n value: 41.321999999999996\n - type: map_at_3\n value: 36.588\n - type: map_at_5\n value: 38.538\n - type: mrr_at_1\n value: 35.9\n - type: mrr_at_10\n value: 45.528\n - type: mrr_at_100\n value: 46.343\n - type: mrr_at_1000\n value: 46.388\n - type: mrr_at_3\n value: 42.862\n - type: mrr_at_5\n value: 44.440000000000005\n - type: ndcg_at_1\n value: 35.9\n - type: ndcg_at_10\n value: 45.987\n - type: ndcg_at_100\n value: 51.370000000000005\n - type: ndcg_at_1000\n value: 53.400000000000006\n - type: ndcg_at_3\n value: 40.841\n - type: ndcg_at_5\n value: 43.447\n - type: precision_at_1\n value: 35.9\n - type: precision_at_10\n value: 8.393\n - type: precision_at_100\n value: 1.283\n - type: precision_at_1000\n value: 0.166\n - type: precision_at_3\n value: 19.538\n - type: precision_at_5\n value: 13.975000000000001\n - type: recall_at_1\n value: 29.398999999999997\n - type: recall_at_10\n value: 58.361\n - type: recall_at_100\n value: 81.081\n - type: recall_at_1000\n value: 94.004\n - type: recall_at_3\n value: 43.657000000000004\n - type: recall_at_5\n value: 50.519999999999996\n - type: map_at_1\n value: 21.589\n - type: map_at_10\n value: 31.608999999999998\n - type: map_at_100\n value: 33.128\n - type: map_at_1000\n value: 33.247\n - type: map_at_3\n value: 28.671999999999997\n - type: map_at_5\n value: 30.233999999999998\n - type: mrr_at_1\n value: 26.712000000000003\n - type: mrr_at_10\n value: 36.713\n - type: mrr_at_100\n value: 37.713\n - type: mrr_at_1000\n value: 37.771\n - type: mrr_at_3\n value: 34.075\n - type: mrr_at_5\n value: 35.451\n - type: ndcg_at_1\n value: 26.712000000000003\n - type: ndcg_at_10\n value: 37.519999999999996\n - type: ndcg_at_100\n value: 43.946000000000005\n - type: ndcg_at_1000\n value: 46.297\n - type: ndcg_at_3\n value: 32.551\n - type: ndcg_at_5\n value: 34.660999999999994\n - type: precision_at_1\n value: 26.712000000000003\n - type: precision_at_10\n value: 7.066\n - type: precision_at_100\n value: 1.216\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 15.906\n - type: precision_at_5\n value: 11.437999999999999\n - type: recall_at_1\n value: 21.589\n - type: recall_at_10\n value: 50.090999999999994\n - type: recall_at_100\n value: 77.43900000000001\n - type: recall_at_1000\n value: 93.35900000000001\n - type: recall_at_3\n value: 36.028999999999996\n - type: recall_at_5\n value: 41.698\n - type: map_at_1\n value: 25.121666666666663\n - type: map_at_10\n value: 34.46258333333334\n - type: map_at_100\n value: 35.710499999999996\n - type: map_at_1000\n value: 35.82691666666666\n - type: map_at_3\n value: 31.563249999999996\n - type: map_at_5\n value: 33.189750000000004\n - type: mrr_at_1\n value: 29.66441666666667\n - type: mrr_at_10\n value: 38.5455\n - type: mrr_at_100\n value: 39.39566666666667\n - type: mrr_at_1000\n value: 39.45325\n - type: mrr_at_3\n value: 36.003333333333345\n - type: mrr_at_5\n value: 37.440916666666666\n - type: ndcg_at_1\n value: 29.66441666666667\n - type: ndcg_at_10\n value: 39.978416666666675\n - type: ndcg_at_100\n value: 45.278666666666666\n - type: ndcg_at_1000\n value: 47.52275\n - type: ndcg_at_3\n value: 35.00058333333334\n - type: ndcg_at_5\n value: 37.34908333333333\n - type: precision_at_1\n value: 29.66441666666667\n - type: precision_at_10\n value: 7.094500000000001\n - type: precision_at_100\n value: 1.1523333333333332\n - type: precision_at_1000\n value: 0.15358333333333332\n - type: precision_at_3\n value: 16.184166666666663\n - type: precision_at_5\n value: 11.6005\n - type: recall_at_1\n value: 25.121666666666663\n - type: recall_at_10\n value: 52.23975000000001\n - type: recall_at_100\n value: 75.48408333333333\n - type: recall_at_1000\n value: 90.95316666666668\n - type: recall_at_3\n value: 38.38458333333333\n - type: recall_at_5\n value: 44.39933333333333\n - type: map_at_1\n value: 23.569000000000003\n - type: map_at_10\n value: 30.389\n - type: map_at_100\n value: 31.396\n - type: map_at_1000\n value: 31.493\n - type: map_at_3\n value: 28.276\n - type: map_at_5\n value: 29.459000000000003\n - type: mrr_at_1\n value: 26.534000000000002\n - type: mrr_at_10\n value: 33.217999999999996\n - type: mrr_at_100\n value: 34.054\n - type: mrr_at_1000\n value: 34.12\n - type: mrr_at_3\n value: 31.058000000000003\n - type: mrr_at_5\n value: 32.330999999999996\n - type: ndcg_at_1\n value: 26.534000000000002\n - type: ndcg_at_10\n value: 34.608\n - type: ndcg_at_100\n value: 39.391999999999996\n - type: ndcg_at_1000\n value: 41.837999999999994\n - type: ndcg_at_3\n value: 30.564999999999998\n - type: ndcg_at_5\n value: 32.509\n - type: precision_at_1\n value: 26.534000000000002\n - type: precision_at_10\n value: 5.414\n - type: precision_at_100\n value: 0.847\n - type: precision_at_1000\n value: 0.11399999999999999\n - type: precision_at_3\n value: 12.986\n - type: precision_at_5\n value: 9.202\n - type: recall_at_1\n value: 23.569000000000003\n - type: recall_at_10\n value: 44.896\n - type: recall_at_100\n value: 66.476\n - type: recall_at_1000\n value: 84.548\n - type: recall_at_3\n value: 33.79\n - type: recall_at_5\n value: 38.512\n - type: map_at_1\n value: 16.36\n - type: map_at_10\n value: 23.57\n - type: map_at_100\n value: 24.698999999999998\n - type: map_at_1000\n value: 24.834999999999997\n - type: map_at_3\n value: 21.093\n - type: map_at_5\n value: 22.418\n - type: mrr_at_1\n value: 19.718\n - type: mrr_at_10\n value: 27.139999999999997\n - type: mrr_at_100\n value: 28.097\n - type: mrr_at_1000\n value: 28.177999999999997\n - type: mrr_at_3\n value: 24.805\n - type: mrr_at_5\n value: 26.121\n - type: ndcg_at_1\n value: 19.718\n - type: ndcg_at_10\n value: 28.238999999999997\n - type: ndcg_at_100\n value: 33.663\n - type: ndcg_at_1000\n value: 36.763\n - type: ndcg_at_3\n value: 23.747\n - type: ndcg_at_5\n value: 25.796000000000003\n - type: precision_at_1\n value: 19.718\n - type: precision_at_10\n value: 5.282\n - type: precision_at_100\n value: 0.9390000000000001\n - type: precision_at_1000\n value: 0.13899999999999998\n - type: precision_at_3\n value: 11.264000000000001\n - type: precision_at_5\n value: 8.341\n - type: recall_at_1\n value: 16.36\n - type: recall_at_10\n value: 38.669\n - type: recall_at_100\n value: 63.184\n - type: recall_at_1000\n value: 85.33800000000001\n - type: recall_at_3\n value: 26.214\n - type: recall_at_5\n value: 31.423000000000002\n - type: map_at_1\n value: 25.618999999999996\n - type: map_at_10\n value: 34.361999999999995\n - type: map_at_100\n value: 35.534\n - type: map_at_1000\n value: 35.634\n - type: map_at_3\n value: 31.402\n - type: map_at_5\n value: 32.815\n - type: mrr_at_1\n value: 30.037000000000003\n - type: mrr_at_10\n value: 38.284\n - type: mrr_at_100\n value: 39.141999999999996\n - type: mrr_at_1000\n value: 39.2\n - type: mrr_at_3\n value: 35.603\n - type: mrr_at_5\n value: 36.867\n - type: ndcg_at_1\n value: 30.037000000000003\n - type: ndcg_at_10\n value: 39.87\n - type: ndcg_at_100\n value: 45.243\n - type: ndcg_at_1000\n value: 47.507\n - type: ndcg_at_3\n value: 34.371\n - type: ndcg_at_5\n value: 36.521\n - type: precision_at_1\n value: 30.037000000000003\n - type: precision_at_10\n value: 6.819\n - type: precision_at_100\n value: 1.0699999999999998\n - type: precision_at_1000\n value: 0.13699999999999998\n - type: precision_at_3\n value: 15.392\n - type: precision_at_5\n value: 10.821\n - type: recall_at_1\n value: 25.618999999999996\n - type: recall_at_10\n value: 52.869\n - type: recall_at_100\n value: 76.395\n - type: recall_at_1000\n value: 92.19500000000001\n - type: recall_at_3\n value: 37.943\n - type: recall_at_5\n value: 43.342999999999996\n - type: map_at_1\n value: 23.283\n - type: map_at_10\n value: 32.155\n - type: map_at_100\n value: 33.724\n - type: map_at_1000\n value: 33.939\n - type: map_at_3\n value: 29.018\n - type: map_at_5\n value: 30.864000000000004\n - type: mrr_at_1\n value: 28.063\n - type: mrr_at_10\n value: 36.632\n - type: mrr_at_100\n value: 37.606\n - type: mrr_at_1000\n value: 37.671\n - type: mrr_at_3\n value: 33.992\n - type: mrr_at_5\n value: 35.613\n - type: ndcg_at_1\n value: 28.063\n - type: ndcg_at_10\n value: 38.024\n - type: ndcg_at_100\n value: 44.292\n - type: ndcg_at_1000\n value: 46.818\n - type: ndcg_at_3\n value: 32.965\n - type: ndcg_at_5\n value: 35.562\n - type: precision_at_1\n value: 28.063\n - type: precision_at_10\n value: 7.352\n - type: precision_at_100\n value: 1.514\n - type: precision_at_1000\n value: 0.23800000000000002\n - type: precision_at_3\n value: 15.481\n - type: precision_at_5\n value: 11.542\n - type: recall_at_1\n value: 23.283\n - type: recall_at_10\n value: 49.756\n - type: recall_at_100\n value: 78.05\n - type: recall_at_1000\n value: 93.854\n - type: recall_at_3\n value: 35.408\n - type: recall_at_5\n value: 42.187000000000005\n - type: map_at_1\n value: 19.201999999999998\n - type: map_at_10\n value: 26.826\n - type: map_at_100\n value: 27.961000000000002\n - type: map_at_1000\n value: 28.066999999999997\n - type: map_at_3\n value: 24.237000000000002\n - type: map_at_5\n value: 25.811\n - type: mrr_at_1\n value: 20.887\n - type: mrr_at_10\n value: 28.660000000000004\n - type: mrr_at_100\n value: 29.660999999999998\n - type: mrr_at_1000\n value: 29.731\n - type: mrr_at_3\n value: 26.155\n - type: mrr_at_5\n value: 27.68\n - type: ndcg_at_1\n value: 20.887\n - type: ndcg_at_10\n value: 31.523\n - type: ndcg_at_100\n value: 37.055\n - type: ndcg_at_1000\n value: 39.579\n - type: ndcg_at_3\n value: 26.529000000000003\n - type: ndcg_at_5\n value: 29.137\n - type: precision_at_1\n value: 20.887\n - type: precision_at_10\n value: 5.065\n - type: precision_at_100\n value: 0.856\n - type: precision_at_1000\n value: 0.11900000000000001\n - type: precision_at_3\n value: 11.399\n - type: precision_at_5\n value: 8.392\n - type: recall_at_1\n value: 19.201999999999998\n - type: recall_at_10\n value: 44.285000000000004\n - type: recall_at_100\n value: 69.768\n - type: recall_at_1000\n value: 88.302\n - type: recall_at_3\n value: 30.804\n - type: recall_at_5\n value: 37.039\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: climate-fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 11.244\n - type: map_at_10\n value: 18.956\n - type: map_at_100\n value: 20.674\n - type: map_at_1000\n value: 20.863\n - type: map_at_3\n value: 15.923000000000002\n - type: map_at_5\n value: 17.518\n - type: mrr_at_1\n value: 25.080999999999996\n - type: mrr_at_10\n value: 35.94\n - type: mrr_at_100\n value: 36.969\n - type: mrr_at_1000\n value: 37.013\n - type: mrr_at_3\n value: 32.617000000000004\n - type: mrr_at_5\n value: 34.682\n - type: ndcg_at_1\n value: 25.080999999999996\n - type: ndcg_at_10\n value: 26.539\n - type: ndcg_at_100\n value: 33.601\n - type: ndcg_at_1000\n value: 37.203\n - type: ndcg_at_3\n value: 21.695999999999998\n - type: ndcg_at_5\n value: 23.567\n - type: precision_at_1\n value: 25.080999999999996\n - type: precision_at_10\n value: 8.143\n - type: precision_at_100\n value: 1.5650000000000002\n - type: precision_at_1000\n value: 0.22300000000000003\n - type: precision_at_3\n value: 15.983\n - type: precision_at_5\n value: 12.417\n - type: recall_at_1\n value: 11.244\n - type: recall_at_10\n value: 31.457\n - type: recall_at_100\n value: 55.92\n - type: recall_at_1000\n value: 76.372\n - type: recall_at_3\n value: 19.784\n - type: recall_at_5\n value: 24.857000000000003\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: dbpedia-entity\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 8.595\n - type: map_at_10\n value: 18.75\n - type: map_at_100\n value: 26.354\n - type: map_at_1000\n value: 27.912\n - type: map_at_3\n value: 13.794\n - type: map_at_5\n value: 16.021\n - type: mrr_at_1\n value: 65.75\n - type: mrr_at_10\n value: 73.837\n - type: mrr_at_100\n value: 74.22800000000001\n - type: mrr_at_1000\n value: 74.234\n - type: mrr_at_3\n value: 72.5\n - type: mrr_at_5\n value: 73.387\n - type: ndcg_at_1\n value: 52.625\n - type: ndcg_at_10\n value: 39.101\n - type: ndcg_at_100\n value: 43.836000000000006\n - type: ndcg_at_1000\n value: 51.086\n - type: ndcg_at_3\n value: 44.229\n - type: ndcg_at_5\n value: 41.555\n - type: precision_at_1\n value: 65.75\n - type: precision_at_10\n value: 30.45\n - type: precision_at_100\n value: 9.81\n - type: precision_at_1000\n value: 2.045\n - type: precision_at_3\n value: 48.667\n - type: precision_at_5\n value: 40.8\n - type: recall_at_1\n value: 8.595\n - type: recall_at_10\n value: 24.201\n - type: recall_at_100\n value: 50.096\n - type: recall_at_1000\n value: 72.677\n - type: recall_at_3\n value: 15.212\n - type: recall_at_5\n value: 18.745\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 46.565\n - type: f1\n value: 41.49914329345582\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 66.60000000000001\n - type: map_at_10\n value: 76.838\n - type: map_at_100\n value: 77.076\n - type: map_at_1000\n value: 77.09\n - type: map_at_3\n value: 75.545\n - type: map_at_5\n value: 76.39\n - type: mrr_at_1\n value: 71.707\n - type: mrr_at_10\n value: 81.514\n - type: mrr_at_100\n value: 81.64099999999999\n - type: mrr_at_1000\n value: 81.645\n - type: mrr_at_3\n value: 80.428\n - type: mrr_at_5\n value: 81.159\n - type: ndcg_at_1\n value: 71.707\n - type: ndcg_at_10\n value: 81.545\n - type: ndcg_at_100\n value: 82.477\n - type: ndcg_at_1000\n value: 82.73899999999999\n - type: ndcg_at_3\n value: 79.292\n - type: ndcg_at_5\n value: 80.599\n - type: precision_at_1\n value: 71.707\n - type: precision_at_10\n value: 10.035\n - type: precision_at_100\n value: 1.068\n - type: precision_at_1000\n value: 0.11100000000000002\n - type: precision_at_3\n value: 30.918\n - type: precision_at_5\n value: 19.328\n - type: recall_at_1\n value: 66.60000000000001\n - type: recall_at_10\n value: 91.353\n - type: recall_at_100\n value: 95.21\n - type: recall_at_1000\n value: 96.89999999999999\n - type: recall_at_3\n value: 85.188\n - type: recall_at_5\n value: 88.52\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: fiqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 19.338\n - type: map_at_10\n value: 31.752000000000002\n - type: map_at_100\n value: 33.516\n - type: map_at_1000\n value: 33.694\n - type: map_at_3\n value: 27.716\n - type: map_at_5\n value: 29.67\n - type: mrr_at_1\n value: 38.117000000000004\n - type: mrr_at_10\n value: 47.323\n - type: mrr_at_100\n value: 48.13\n - type: mrr_at_1000\n value: 48.161\n - type: mrr_at_3\n value: 45.062000000000005\n - type: mrr_at_5\n value: 46.358\n - type: ndcg_at_1\n value: 38.117000000000004\n - type: ndcg_at_10\n value: 39.353\n - type: ndcg_at_100\n value: 46.044000000000004\n - type: ndcg_at_1000\n value: 49.083\n - type: ndcg_at_3\n value: 35.891\n - type: ndcg_at_5\n value: 36.661\n - type: precision_at_1\n value: 38.117000000000004\n - type: precision_at_10\n value: 11.187999999999999\n - type: precision_at_100\n value: 1.802\n - type: precision_at_1000\n value: 0.234\n - type: precision_at_3\n value: 24.126\n - type: precision_at_5\n value: 17.562\n - type: recall_at_1\n value: 19.338\n - type: recall_at_10\n value: 45.735\n - type: recall_at_100\n value: 71.281\n - type: recall_at_1000\n value: 89.537\n - type: recall_at_3\n value: 32.525\n - type: recall_at_5\n value: 37.671\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: hotpotqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 36.995\n - type: map_at_10\n value: 55.032000000000004\n - type: map_at_100\n value: 55.86\n - type: map_at_1000\n value: 55.932\n - type: map_at_3\n value: 52.125\n - type: map_at_5\n value: 53.884\n - type: mrr_at_1\n value: 73.991\n - type: mrr_at_10\n value: 80.096\n - type: mrr_at_100\n value: 80.32000000000001\n - type: mrr_at_1000\n value: 80.331\n - type: mrr_at_3\n value: 79.037\n - type: mrr_at_5\n value: 79.719\n - type: ndcg_at_1\n value: 73.991\n - type: ndcg_at_10\n value: 63.786\n - type: ndcg_at_100\n value: 66.78\n - type: ndcg_at_1000\n value: 68.255\n - type: ndcg_at_3\n value: 59.501000000000005\n - type: ndcg_at_5\n value: 61.82299999999999\n - type: precision_at_1\n value: 73.991\n - type: precision_at_10\n value: 13.157\n - type: precision_at_100\n value: 1.552\n - type: precision_at_1000\n value: 0.17500000000000002\n - type: precision_at_3\n value: 37.519999999999996\n - type: precision_at_5\n value: 24.351\n - type: recall_at_1\n value: 36.995\n - type: recall_at_10\n value: 65.78699999999999\n - type: recall_at_100\n value: 77.583\n - type: recall_at_1000\n value: 87.421\n - type: recall_at_3\n value: 56.279999999999994\n - type: recall_at_5\n value: 60.878\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 86.80239999999999\n - type: ap\n value: 81.97305141128378\n - type: f1\n value: 86.76976305549273\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: msmarco\n config: default\n split: dev\n revision: None\n metrics:\n - type: map_at_1\n value: 21.166\n - type: map_at_10\n value: 33.396\n - type: map_at_100\n value: 34.588\n - type: map_at_1000\n value: 34.637\n - type: map_at_3\n value: 29.509999999999998\n - type: map_at_5\n value: 31.719\n - type: mrr_at_1\n value: 21.762\n - type: mrr_at_10\n value: 33.969\n - type: mrr_at_100\n value: 35.099000000000004\n - type: mrr_at_1000\n value: 35.141\n - type: mrr_at_3\n value: 30.148000000000003\n - type: mrr_at_5\n value: 32.324000000000005\n - type: ndcg_at_1\n value: 21.776999999999997\n - type: ndcg_at_10\n value: 40.306999999999995\n - type: ndcg_at_100\n value: 46.068\n - type: ndcg_at_1000\n value: 47.3\n - type: ndcg_at_3\n value: 32.416\n - type: ndcg_at_5\n value: 36.345\n - type: precision_at_1\n value: 21.776999999999997\n - type: precision_at_10\n value: 6.433\n - type: precision_at_100\n value: 0.932\n - type: precision_at_1000\n value: 0.104\n - type: precision_at_3\n value: 13.897\n - type: precision_at_5\n value: 10.324\n - type: recall_at_1\n value: 21.166\n - type: recall_at_10\n value: 61.587\n - type: recall_at_100\n value: 88.251\n - type: recall_at_1000\n value: 97.727\n - type: recall_at_3\n value: 40.196\n - type: recall_at_5\n value: 49.611\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 93.04605563155496\n - type: f1\n value: 92.78007303978372\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 69.65116279069767\n - type: f1\n value: 52.75775172527262\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 70.34633490248822\n - type: f1\n value: 68.15345065392562\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 75.63887020847343\n - type: f1\n value: 76.08074680233685\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 33.77933406071333\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 32.06504927238196\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 32.20682480490871\n - type: mrr\n value: 33.41462721527003\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: nfcorpus\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 5.548\n - type: map_at_10\n value: 13.086999999999998\n - type: map_at_100\n value: 16.698\n - type: map_at_1000\n value: 18.151999999999997\n - type: map_at_3\n value: 9.576\n - type: map_at_5\n value: 11.175\n - type: mrr_at_1\n value: 44.272\n - type: mrr_at_10\n value: 53.635999999999996\n - type: mrr_at_100\n value: 54.228\n - type: mrr_at_1000\n value: 54.26499999999999\n - type: mrr_at_3\n value: 51.754\n - type: mrr_at_5\n value: 53.086\n - type: ndcg_at_1\n value: 42.724000000000004\n - type: ndcg_at_10\n value: 34.769\n - type: ndcg_at_100\n value: 32.283\n - type: ndcg_at_1000\n value: 40.843\n - type: ndcg_at_3\n value: 39.852\n - type: ndcg_at_5\n value: 37.858999999999995\n - type: precision_at_1\n value: 44.272\n - type: precision_at_10\n value: 26.068\n - type: precision_at_100\n value: 8.328000000000001\n - type: precision_at_1000\n value: 2.1\n - type: precision_at_3\n value: 37.874\n - type: precision_at_5\n value: 33.065\n - type: recall_at_1\n value: 5.548\n - type: recall_at_10\n value: 16.936999999999998\n - type: recall_at_100\n value: 33.72\n - type: recall_at_1000\n value: 64.348\n - type: recall_at_3\n value: 10.764999999999999\n - type: recall_at_5\n value: 13.361\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: nq\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 28.008\n - type: map_at_10\n value: 42.675000000000004\n - type: map_at_100\n value: 43.85\n - type: map_at_1000\n value: 43.884\n - type: map_at_3\n value: 38.286\n - type: map_at_5\n value: 40.78\n - type: mrr_at_1\n value: 31.518\n - type: mrr_at_10\n value: 45.015\n - type: mrr_at_100\n value: 45.924\n - type: mrr_at_1000\n value: 45.946999999999996\n - type: mrr_at_3\n value: 41.348\n - type: mrr_at_5\n value: 43.428\n - type: ndcg_at_1\n value: 31.489\n - type: ndcg_at_10\n value: 50.285999999999994\n - type: ndcg_at_100\n value: 55.291999999999994\n - type: ndcg_at_1000\n value: 56.05\n - type: ndcg_at_3\n value: 41.976\n - type: ndcg_at_5\n value: 46.103\n - type: precision_at_1\n value: 31.489\n - type: precision_at_10\n value: 8.456\n - type: precision_at_100\n value: 1.125\n - type: precision_at_1000\n value: 0.12\n - type: precision_at_3\n value: 19.09\n - type: precision_at_5\n value: 13.841000000000001\n - type: recall_at_1\n value: 28.008\n - type: recall_at_10\n value: 71.21499999999999\n - type: recall_at_100\n value: 92.99\n - type: recall_at_1000\n value: 98.578\n - type: recall_at_3\n value: 49.604\n - type: recall_at_5\n value: 59.094\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 70.351\n - type: map_at_10\n value: 84.163\n - type: map_at_100\n value: 84.785\n - type: map_at_1000\n value: 84.801\n - type: map_at_3\n value: 81.16\n - type: map_at_5\n value: 83.031\n - type: mrr_at_1\n value: 80.96\n - type: mrr_at_10\n value: 87.241\n - type: mrr_at_100\n value: 87.346\n - type: mrr_at_1000\n value: 87.347\n - type: mrr_at_3\n value: 86.25699999999999\n - type: mrr_at_5\n value: 86.907\n - type: ndcg_at_1\n value: 80.97\n - type: ndcg_at_10\n value: 88.017\n - type: ndcg_at_100\n value: 89.241\n - type: ndcg_at_1000\n value: 89.34299999999999\n - type: ndcg_at_3\n value: 85.053\n - type: ndcg_at_5\n value: 86.663\n - type: precision_at_1\n value: 80.97\n - type: precision_at_10\n value: 13.358\n - type: precision_at_100\n value: 1.525\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 37.143\n - type: precision_at_5\n value: 24.451999999999998\n - type: recall_at_1\n value: 70.351\n - type: recall_at_10\n value: 95.39800000000001\n - type: recall_at_100\n value: 99.55199999999999\n - type: recall_at_1000\n value: 99.978\n - type: recall_at_3\n value: 86.913\n - type: recall_at_5\n value: 91.448\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 55.62406719814139\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 61.386700035141736\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 4.618\n - type: map_at_10\n value: 12.920000000000002\n - type: map_at_100\n value: 15.304\n - type: map_at_1000\n value: 15.656999999999998\n - type: map_at_3\n value: 9.187\n - type: map_at_5\n value: 10.937\n - type: mrr_at_1\n value: 22.8\n - type: mrr_at_10\n value: 35.13\n - type: mrr_at_100\n value: 36.239\n - type: mrr_at_1000\n value: 36.291000000000004\n - type: mrr_at_3\n value: 31.917\n - type: mrr_at_5\n value: 33.787\n - type: ndcg_at_1\n value: 22.8\n - type: ndcg_at_10\n value: 21.382\n - type: ndcg_at_100\n value: 30.257\n - type: ndcg_at_1000\n value: 36.001\n - type: ndcg_at_3\n value: 20.43\n - type: ndcg_at_5\n value: 17.622\n - type: precision_at_1\n value: 22.8\n - type: precision_at_10\n value: 11.26\n - type: precision_at_100\n value: 2.405\n - type: precision_at_1000\n value: 0.377\n - type: precision_at_3\n value: 19.633\n - type: precision_at_5\n value: 15.68\n - type: recall_at_1\n value: 4.618\n - type: recall_at_10\n value: 22.811999999999998\n - type: recall_at_100\n value: 48.787000000000006\n - type: recall_at_1000\n value: 76.63799999999999\n - type: recall_at_3\n value: 11.952\n - type: recall_at_5\n value: 15.892000000000001\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 84.01529458252244\n - type: cos_sim_spearman\n value: 77.92985224770254\n - type: euclidean_pearson\n value: 81.04251429422487\n - type: euclidean_spearman\n value: 77.92838490549133\n - type: manhattan_pearson\n value: 80.95892251458979\n - type: manhattan_spearman\n value: 77.81028089705941\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 83.97885282534388\n - type: cos_sim_spearman\n value: 75.1221970851712\n - type: euclidean_pearson\n value: 80.34455956720097\n - type: euclidean_spearman\n value: 74.5894274239938\n - type: manhattan_pearson\n value: 80.38999766325465\n - type: manhattan_spearman\n value: 74.68524557166975\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 82.95746064915672\n - type: cos_sim_spearman\n value: 85.08683458043946\n - type: euclidean_pearson\n value: 84.56699492836385\n - type: euclidean_spearman\n value: 85.66089116133713\n - type: manhattan_pearson\n value: 84.47553323458541\n - type: manhattan_spearman\n value: 85.56142206781472\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 82.71377893595067\n - type: cos_sim_spearman\n value: 81.03453291428589\n - type: euclidean_pearson\n value: 82.57136298308613\n - type: euclidean_spearman\n value: 81.15839961890875\n - type: manhattan_pearson\n value: 82.55157879373837\n - type: manhattan_spearman\n value: 81.1540163767054\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 86.64197832372373\n - type: cos_sim_spearman\n value: 88.31966852492485\n - type: euclidean_pearson\n value: 87.98692129976983\n - type: euclidean_spearman\n value: 88.6247340837856\n - type: manhattan_pearson\n value: 87.90437827826412\n - type: manhattan_spearman\n value: 88.56278787131457\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 81.84159950146693\n - type: cos_sim_spearman\n value: 83.90678384140168\n - type: euclidean_pearson\n value: 83.19005018860221\n - type: euclidean_spearman\n value: 84.16260415876295\n - type: manhattan_pearson\n value: 83.05030612994494\n - type: manhattan_spearman\n value: 83.99605629718336\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 87.49935350176666\n - type: cos_sim_spearman\n value: 87.59086606735383\n - type: euclidean_pearson\n value: 88.06537181129983\n - type: euclidean_spearman\n value: 87.6687448086014\n - type: manhattan_pearson\n value: 87.96599131972935\n - type: manhattan_spearman\n value: 87.63295748969642\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 67.68232799482763\n - type: cos_sim_spearman\n value: 67.99930378085793\n - type: euclidean_pearson\n value: 68.50275360001696\n - type: euclidean_spearman\n value: 67.81588179309259\n - type: manhattan_pearson\n value: 68.5892154749763\n - type: manhattan_spearman\n value: 67.84357259640682\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 84.37049618406554\n - type: cos_sim_spearman\n value: 85.57014313159492\n - type: euclidean_pearson\n value: 85.57469513908282\n - type: euclidean_spearman\n value: 85.661948135258\n - type: manhattan_pearson\n value: 85.36866831229028\n - type: manhattan_spearman\n value: 85.5043455368843\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 84.83259065376154\n - type: mrr\n value: 95.58455433455433\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: scifact\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 58.817\n - type: map_at_10\n value: 68.459\n - type: map_at_100\n value: 68.951\n - type: map_at_1000\n value: 68.979\n - type: map_at_3\n value: 65.791\n - type: map_at_5\n value: 67.583\n - type: mrr_at_1\n value: 61.667\n - type: mrr_at_10\n value: 69.368\n - type: mrr_at_100\n value: 69.721\n - type: mrr_at_1000\n value: 69.744\n - type: mrr_at_3\n value: 67.278\n - type: mrr_at_5\n value: 68.611\n - type: ndcg_at_1\n value: 61.667\n - type: ndcg_at_10\n value: 72.70100000000001\n - type: ndcg_at_100\n value: 74.928\n - type: ndcg_at_1000\n value: 75.553\n - type: ndcg_at_3\n value: 68.203\n - type: ndcg_at_5\n value: 70.804\n - type: precision_at_1\n value: 61.667\n - type: precision_at_10\n value: 9.533\n - type: precision_at_100\n value: 1.077\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_3\n value: 26.444000000000003\n - type: precision_at_5\n value: 17.599999999999998\n - type: recall_at_1\n value: 58.817\n - type: recall_at_10\n value: 84.789\n - type: recall_at_100\n value: 95.0\n - type: recall_at_1000\n value: 99.667\n - type: recall_at_3\n value: 72.8\n - type: recall_at_5\n value: 79.294\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.8108910891089\n - type: cos_sim_ap\n value: 95.5743678558349\n - type: cos_sim_f1\n value: 90.43133366385722\n - type: cos_sim_precision\n value: 89.67551622418878\n - type: cos_sim_recall\n value: 91.2\n - type: dot_accuracy\n value: 99.75841584158415\n - type: dot_ap\n value: 94.00786363627253\n - type: dot_f1\n value: 87.51910341314316\n - type: dot_precision\n value: 89.20041536863967\n - type: dot_recall\n value: 85.9\n - type: euclidean_accuracy\n value: 99.81485148514851\n - type: euclidean_ap\n value: 95.4752113136905\n - type: euclidean_f1\n value: 90.44334975369456\n - type: euclidean_precision\n value: 89.126213592233\n - type: euclidean_recall\n value: 91.8\n - type: manhattan_accuracy\n value: 99.81584158415842\n - type: manhattan_ap\n value: 95.5163172682464\n - type: manhattan_f1\n value: 90.51987767584097\n - type: manhattan_precision\n value: 92.3076923076923\n - type: manhattan_recall\n value: 88.8\n - type: max_accuracy\n value: 99.81584158415842\n - type: max_ap\n value: 95.5743678558349\n - type: max_f1\n value: 90.51987767584097\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 62.63235986949449\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 36.334795589585575\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 52.02955214518782\n - type: mrr\n value: 52.8004838298956\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 30.63769566275453\n - type: cos_sim_spearman\n value: 30.422379185989335\n - type: dot_pearson\n value: 26.88493071882256\n - type: dot_spearman\n value: 26.505249740971305\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.21\n - type: map_at_10\n value: 1.654\n - type: map_at_100\n value: 10.095\n - type: map_at_1000\n value: 25.808999999999997\n - type: map_at_3\n value: 0.594\n - type: map_at_5\n value: 0.9289999999999999\n - type: mrr_at_1\n value: 78.0\n - type: mrr_at_10\n value: 87.019\n - type: mrr_at_100\n value: 87.019\n - type: mrr_at_1000\n value: 87.019\n - type: mrr_at_3\n value: 86.333\n - type: mrr_at_5\n value: 86.733\n - type: ndcg_at_1\n value: 73.0\n - type: ndcg_at_10\n value: 66.52900000000001\n - type: ndcg_at_100\n value: 53.433\n - type: ndcg_at_1000\n value: 51.324000000000005\n - type: ndcg_at_3\n value: 72.02199999999999\n - type: ndcg_at_5\n value: 69.696\n - type: precision_at_1\n value: 78.0\n - type: precision_at_10\n value: 70.39999999999999\n - type: precision_at_100\n value: 55.46\n - type: precision_at_1000\n value: 22.758\n - type: precision_at_3\n value: 76.667\n - type: precision_at_5\n value: 74.0\n - type: recall_at_1\n value: 0.21\n - type: recall_at_10\n value: 1.8849999999999998\n - type: recall_at_100\n value: 13.801\n - type: recall_at_1000\n value: 49.649\n - type: recall_at_3\n value: 0.632\n - type: recall_at_5\n value: 1.009\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: webis-touche2020\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 1.797\n - type: map_at_10\n value: 9.01\n - type: map_at_100\n value: 14.682\n - type: map_at_1000\n value: 16.336000000000002\n - type: map_at_3\n value: 4.546\n - type: map_at_5\n value: 5.9270000000000005\n - type: mrr_at_1\n value: 24.490000000000002\n - type: mrr_at_10\n value: 41.156\n - type: mrr_at_100\n value: 42.392\n - type: mrr_at_1000\n value: 42.408\n - type: mrr_at_3\n value: 38.775999999999996\n - type: mrr_at_5\n value: 40.102\n - type: ndcg_at_1\n value: 21.429000000000002\n - type: ndcg_at_10\n value: 22.222\n - type: ndcg_at_100\n value: 34.405\n - type: ndcg_at_1000\n value: 46.599000000000004\n - type: ndcg_at_3\n value: 25.261\n - type: ndcg_at_5\n value: 22.695999999999998\n - type: precision_at_1\n value: 24.490000000000002\n - type: precision_at_10\n value: 19.796\n - type: precision_at_100\n value: 7.306\n - type: precision_at_1000\n value: 1.5350000000000001\n - type: precision_at_3\n value: 27.211000000000002\n - type: precision_at_5\n value: 22.857\n - type: recall_at_1\n value: 1.797\n - type: recall_at_10\n value: 15.706000000000001\n - type: recall_at_100\n value: 46.412\n - type: recall_at_1000\n value: 83.159\n - type: recall_at_3\n value: 6.1370000000000005\n - type: recall_at_5\n value: 8.599\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 70.3302\n - type: ap\n value: 14.169121204575601\n - type: f1\n value: 54.229345975274235\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 58.22297679683077\n - type: f1\n value: 58.62984908377875\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 49.952922428464255\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 84.68140907194373\n - type: cos_sim_ap\n value: 70.12180123666836\n - type: cos_sim_f1\n value: 65.77501791258658\n - type: cos_sim_precision\n value: 60.07853403141361\n - type: cos_sim_recall\n value: 72.66490765171504\n - type: dot_accuracy\n value: 81.92167848840674\n - type: dot_ap\n value: 60.49837581423469\n - type: dot_f1\n value: 58.44186046511628\n - type: dot_precision\n value: 52.24532224532224\n - type: dot_recall\n value: 66.3060686015831\n - type: euclidean_accuracy\n value: 84.73505394289802\n - type: euclidean_ap\n value: 70.3278904593286\n - type: euclidean_f1\n value: 65.98851124940161\n - type: euclidean_precision\n value: 60.38107752956636\n - type: euclidean_recall\n value: 72.74406332453826\n - type: manhattan_accuracy\n value: 84.73505394289802\n - type: manhattan_ap\n value: 70.00737738537337\n - type: manhattan_f1\n value: 65.80150784822642\n - type: manhattan_precision\n value: 61.892583120204606\n - type: manhattan_recall\n value: 70.23746701846966\n - type: max_accuracy\n value: 84.73505394289802\n - type: max_ap\n value: 70.3278904593286\n - type: max_f1\n value: 65.98851124940161\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 88.44258159661582\n - type: cos_sim_ap\n value: 84.91926704880888\n - type: cos_sim_f1\n value: 77.07651086632926\n - type: cos_sim_precision\n value: 74.5894554883319\n - type: cos_sim_recall\n value: 79.73514012935017\n - type: dot_accuracy\n value: 85.88116583226608\n - type: dot_ap\n value: 78.9753854779923\n - type: dot_f1\n value: 72.17757637979255\n - type: dot_precision\n value: 66.80647486729143\n - type: dot_recall\n value: 78.48783492454572\n - type: euclidean_accuracy\n value: 88.5299025885823\n - type: euclidean_ap\n value: 85.08006075642194\n - type: euclidean_f1\n value: 77.29637336504163\n - type: euclidean_precision\n value: 74.69836253950014\n - type: euclidean_recall\n value: 80.08161379735141\n - type: manhattan_accuracy\n value: 88.55124771995187\n - type: manhattan_ap\n value: 85.00941529932851\n - type: manhattan_f1\n value: 77.33100233100232\n - type: manhattan_precision\n value: 73.37572573956317\n - type: manhattan_recall\n value: 81.73698798891284\n - type: max_accuracy\n value: 88.55124771995187\n - type: max_ap\n value: 85.08006075642194\n - type: max_f1\n value: 77.33100233100232\n---\n\n# gte-small\n\nGeneral Text Embeddings (GTE) model. [Towards General Text Embeddings with Multi-stage Contrastive Learning](https://arxiv.org/abs/2308.03281)\n\nThe GTE models are trained by Alibaba DAMO Academy. They are mainly based on the BERT framework and currently offer three different sizes of models, including [GTE-large](https://huggingface.co/thenlper/gte-large), [GTE-base](https://huggingface.co/thenlper/gte-base), and [GTE-small](https://huggingface.co/thenlper/gte-small). The GTE models are trained on a large-scale corpus of relevance text pairs, covering a wide range of domains and scenarios. This enables the GTE models to be applied to various downstream tasks of text embeddings, including **information retrieval**, **semantic textual similarity**, **text reranking**, etc.\n\n## Metrics\n\nWe compared the performance of the GTE models with other popular text embedding models on the MTEB benchmark. For more detailed comparison results, please refer to the [MTEB leaderboard](https://huggingface.co/spaces/mteb/leaderboard).\n\n\n\n| Model Name | Model Size (GB) | Dimension | Sequence Length | Average (56) | Clustering (11) | Pair Classification (3) | Reranking (4) | Retrieval (15) | STS (10) | Summarization (1) | Classification (12) |\n|:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n| [**gte-large**](https://huggingface.co/thenlper/gte-large) | 0.67 | 1024 | 512 | **63.13** | 46.84 | 85.00 | 59.13 | 52.22 | 83.35 | 31.66 | 73.33 |\n| [**gte-base**](https://huggingface.co/thenlper/gte-base) \t| 0.22 | 768 | 512 | **62.39** | 46.2 | 84.57 | 58.61 | 51.14 | 82.3 | 31.17 | 73.01 |\n| [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1.34 | 1024| 512 | 62.25 | 44.49 | 86.03 | 56.61 | 50.56 | 82.05 | 30.19 | 75.24 |\n| [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 0.44 | 768 | 512 | 61.5 | 43.80 | 85.73 | 55.91 | 50.29 | 81.05 | 30.28 | 73.84 |\n| [**gte-small**](https://huggingface.co/thenlper/gte-small) | 0.07 | 384 | 512 | **61.36** | 44.89 | 83.54 | 57.7 | 49.46 | 82.07 | 30.42 | 72.31 |\n| [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | - | 1536 | 8192 | 60.99 | 45.9 | 84.89 | 56.32 | 49.25 | 80.97 | 30.8 | 70.93 |\n| [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 0.13 | 384 | 512 | 59.93 | 39.92 | 84.67 | 54.32 | 49.04 | 80.39 | 31.16 | 72.94 |\n| [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 9.73 | 768 | 512 | 59.51 | 43.72 | 85.06 | 56.42 | 42.24 | 82.63 | 30.08 | 73.42 |\n| [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) \t| 0.44 | 768 | 514 \t| 57.78 | 43.69 | 83.04 | 59.36 | 43.81 | 80.28 | 27.49 | 65.07 |\n| [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) \t| 28.27 | 4096 | 2048 | 57.59 | 38.93 | 81.9 | 55.65 | 48.22 | 77.74 | 33.6 | 66.19 |\n| [all-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2) \t| 0.13 | 384 | 512 \t| 56.53 | 41.81 | 82.41 | 58.44 | 42.69 | 79.8 | 27.9 | 63.21 |\n| [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) \t| 0.09 | 384 | 512 \t| 56.26 | 42.35 | 82.37 | 58.04 | 41.95 | 78.9 | 30.81 | 63.05 |\n| [contriever-base-msmarco](https://huggingface.co/nthakur/contriever-base-msmarco) \t| 0.44 | 768 | 512 \t| 56.00 | 41.1 \t| 82.54 | 53.14 | 41.88 | 76.51 | 30.36 | 66.68 |\n| [sentence-t5-base](https://huggingface.co/sentence-transformers/sentence-t5-base) \t| 0.22 | 768 | 512 \t| 55.27 | 40.21 | 85.18 | 53.09 | 33.63 | 81.14 | 31.39 | 69.81 |\n\n\n## Usage\n\nCode example\n\n```python\nimport torch.nn.functional as F\nfrom torch import Tensor\nfrom transformers import AutoTokenizer, AutoModel\n\ndef average_pool(last_hidden_states: Tensor,\n attention_mask: Tensor) -> Tensor:\n last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)\n return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]\n\ninput_texts = [\n \"what is the capital of China?\",\n \"how to implement quick sort in python?\",\n \"Beijing\",\n \"sorting algorithms\"\n]\n\ntokenizer = AutoTokenizer.from_pretrained(\"thenlper/gte-small\")\nmodel = AutoModel.from_pretrained(\"thenlper/gte-small\")\n\n# Tokenize the input texts\nbatch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')\n\noutputs = model(**batch_dict)\nembeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])\n\n# (Optionally) normalize embeddings\nembeddings = F.normalize(embeddings, p=2, dim=1)\nscores = (embeddings[:1] @ embeddings[1:].T) * 100\nprint(scores.tolist())\n```\n\nUse with sentence-transformers:\n```python\nfrom sentence_transformers import SentenceTransformer\nfrom sentence_transformers.util import cos_sim\n\nsentences = ['That is a happy person', 'That is a very happy person']\n\nmodel = SentenceTransformer('thenlper/gte-large')\nembeddings = model.encode(sentences)\nprint(cos_sim(embeddings[0], embeddings[1]))\n```\n\n### Limitation\n\nThis model exclusively caters to English texts, and any lengthy texts will be truncated to a maximum of 512 tokens.\n\n### Citation\n\nIf you find our paper or models helpful, please consider citing them as follows:\n\n```\n@misc{li2023general,\n title={Towards General Text Embeddings with Multi-stage Contrastive Learning}, \n author={Zehan Li and Xin Zhang and Yanzhao Zhang and Dingkun Long and Pengjun Xie and Meishan Zhang},\n year={2023},\n eprint={2308.03281},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2457,"cells":{"id":{"kind":"string","value":"vectoriseai/bge-small-en"},"author":{"kind":"string","value":"vectoriseai"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","pytorch","onnx","safetensors","bert","mteb","sentence transformers","sentence-similarity","en","license:mit","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"pytorch\",\n \"onnx\",\n \"safetensors\",\n \"bert\",\n \"mteb\",\n \"sentence transformers\",\n \"sentence-similarity\",\n \"en\",\n \"license:mit\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-08-25T13:00:02Z","string":"2023-08-25T13:00:02Z"},"last_modified":{"kind":"string","value":"2023-08-28T14:17:25+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlibrary_name: sentence-transformers\nlicense: mit\npipeline_tag: sentence-similarity\ntags:\n- mteb\n- sentence transformers\nmodel-index:\n- name: bge-small-en\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 74.34328358208955\n - type: ap\n value: 37.59947775195661\n - type: f1\n value: 68.548415491933\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 93.04527499999999\n - type: ap\n value: 89.60696356772135\n - type: f1\n value: 93.03361469382438\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 46.08\n - type: f1\n value: 45.66249835363254\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 35.205999999999996\n - type: map_at_10\n value: 50.782000000000004\n - type: map_at_100\n value: 51.547\n - type: map_at_1000\n value: 51.554\n - type: map_at_3\n value: 46.515\n - type: map_at_5\n value: 49.296\n - type: mrr_at_1\n value: 35.632999999999996\n - type: mrr_at_10\n value: 50.958999999999996\n - type: mrr_at_100\n value: 51.724000000000004\n - type: mrr_at_1000\n value: 51.731\n - type: mrr_at_3\n value: 46.669\n - type: mrr_at_5\n value: 49.439\n - type: ndcg_at_1\n value: 35.205999999999996\n - type: ndcg_at_10\n value: 58.835\n - type: ndcg_at_100\n value: 62.095\n - type: ndcg_at_1000\n value: 62.255\n - type: ndcg_at_3\n value: 50.255\n - type: ndcg_at_5\n value: 55.296\n - type: precision_at_1\n value: 35.205999999999996\n - type: precision_at_10\n value: 8.421\n - type: precision_at_100\n value: 0.984\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 20.365\n - type: precision_at_5\n value: 14.680000000000001\n - type: recall_at_1\n value: 35.205999999999996\n - type: recall_at_10\n value: 84.211\n - type: recall_at_100\n value: 98.43499999999999\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 61.095\n - type: recall_at_5\n value: 73.4\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 47.52644476278646\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 39.973045724188964\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 62.28285314871488\n - type: mrr\n value: 74.52743701358659\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 80.09041909160327\n - type: cos_sim_spearman\n value: 79.96266537706944\n - type: euclidean_pearson\n value: 79.50774978162241\n - type: euclidean_spearman\n value: 79.9144715078551\n - type: manhattan_pearson\n value: 79.2062139879302\n - type: manhattan_spearman\n value: 79.35000081468212\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 85.31493506493506\n - type: f1\n value: 85.2704557977762\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 39.6837242810816\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 35.38881249555897\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 27.884999999999998\n - type: map_at_10\n value: 39.574\n - type: map_at_100\n value: 40.993\n - type: map_at_1000\n value: 41.129\n - type: map_at_3\n value: 36.089\n - type: map_at_5\n value: 38.191\n - type: mrr_at_1\n value: 34.477999999999994\n - type: mrr_at_10\n value: 45.411\n - type: mrr_at_100\n value: 46.089999999999996\n - type: mrr_at_1000\n value: 46.147\n - type: mrr_at_3\n value: 42.346000000000004\n - type: mrr_at_5\n value: 44.292\n - type: ndcg_at_1\n value: 34.477999999999994\n - type: ndcg_at_10\n value: 46.123999999999995\n - type: ndcg_at_100\n value: 51.349999999999994\n - type: ndcg_at_1000\n value: 53.578\n - type: ndcg_at_3\n value: 40.824\n - type: ndcg_at_5\n value: 43.571\n - type: precision_at_1\n value: 34.477999999999994\n - type: precision_at_10\n value: 8.841000000000001\n - type: precision_at_100\n value: 1.4460000000000002\n - type: precision_at_1000\n value: 0.192\n - type: precision_at_3\n value: 19.742\n - type: precision_at_5\n value: 14.421000000000001\n - type: recall_at_1\n value: 27.884999999999998\n - type: recall_at_10\n value: 59.087\n - type: recall_at_100\n value: 80.609\n - type: recall_at_1000\n value: 95.054\n - type: recall_at_3\n value: 44.082\n - type: recall_at_5\n value: 51.593999999999994\n - type: map_at_1\n value: 30.639\n - type: map_at_10\n value: 40.047\n - type: map_at_100\n value: 41.302\n - type: map_at_1000\n value: 41.425\n - type: map_at_3\n value: 37.406\n - type: map_at_5\n value: 38.934000000000005\n - type: mrr_at_1\n value: 37.707\n - type: mrr_at_10\n value: 46.082\n - type: mrr_at_100\n value: 46.745\n - type: mrr_at_1000\n value: 46.786\n - type: mrr_at_3\n value: 43.980999999999995\n - type: mrr_at_5\n value: 45.287\n - type: ndcg_at_1\n value: 37.707\n - type: ndcg_at_10\n value: 45.525\n - type: ndcg_at_100\n value: 49.976\n - type: ndcg_at_1000\n value: 51.94499999999999\n - type: ndcg_at_3\n value: 41.704\n - type: ndcg_at_5\n value: 43.596000000000004\n - type: precision_at_1\n value: 37.707\n - type: precision_at_10\n value: 8.465\n - type: precision_at_100\n value: 1.375\n - type: precision_at_1000\n value: 0.183\n - type: precision_at_3\n value: 19.979\n - type: precision_at_5\n value: 14.115\n - type: recall_at_1\n value: 30.639\n - type: recall_at_10\n value: 54.775\n - type: recall_at_100\n value: 73.678\n - type: recall_at_1000\n value: 86.142\n - type: recall_at_3\n value: 43.230000000000004\n - type: recall_at_5\n value: 48.622\n - type: map_at_1\n value: 38.038\n - type: map_at_10\n value: 49.922\n - type: map_at_100\n value: 51.032\n - type: map_at_1000\n value: 51.085\n - type: map_at_3\n value: 46.664\n - type: map_at_5\n value: 48.588\n - type: mrr_at_1\n value: 43.95\n - type: mrr_at_10\n value: 53.566\n - type: mrr_at_100\n value: 54.318999999999996\n - type: mrr_at_1000\n value: 54.348\n - type: mrr_at_3\n value: 51.066\n - type: mrr_at_5\n value: 52.649\n - type: ndcg_at_1\n value: 43.95\n - type: ndcg_at_10\n value: 55.676\n - type: ndcg_at_100\n value: 60.126000000000005\n - type: ndcg_at_1000\n value: 61.208\n - type: ndcg_at_3\n value: 50.20400000000001\n - type: ndcg_at_5\n value: 53.038\n - type: precision_at_1\n value: 43.95\n - type: precision_at_10\n value: 8.953\n - type: precision_at_100\n value: 1.2109999999999999\n - type: precision_at_1000\n value: 0.135\n - type: precision_at_3\n value: 22.256999999999998\n - type: precision_at_5\n value: 15.524\n - type: recall_at_1\n value: 38.038\n - type: recall_at_10\n value: 69.15\n - type: recall_at_100\n value: 88.31599999999999\n - type: recall_at_1000\n value: 95.993\n - type: recall_at_3\n value: 54.663\n - type: recall_at_5\n value: 61.373\n - type: map_at_1\n value: 24.872\n - type: map_at_10\n value: 32.912\n - type: map_at_100\n value: 33.972\n - type: map_at_1000\n value: 34.046\n - type: map_at_3\n value: 30.361\n - type: map_at_5\n value: 31.704\n - type: mrr_at_1\n value: 26.779999999999998\n - type: mrr_at_10\n value: 34.812\n - type: mrr_at_100\n value: 35.754999999999995\n - type: mrr_at_1000\n value: 35.809000000000005\n - type: mrr_at_3\n value: 32.335\n - type: mrr_at_5\n value: 33.64\n - type: ndcg_at_1\n value: 26.779999999999998\n - type: ndcg_at_10\n value: 37.623\n - type: ndcg_at_100\n value: 42.924\n - type: ndcg_at_1000\n value: 44.856\n - type: ndcg_at_3\n value: 32.574\n - type: ndcg_at_5\n value: 34.842\n - type: precision_at_1\n value: 26.779999999999998\n - type: precision_at_10\n value: 5.729\n - type: precision_at_100\n value: 0.886\n - type: precision_at_1000\n value: 0.109\n - type: precision_at_3\n value: 13.559\n - type: precision_at_5\n value: 9.469\n - type: recall_at_1\n value: 24.872\n - type: recall_at_10\n value: 50.400999999999996\n - type: recall_at_100\n value: 74.954\n - type: recall_at_1000\n value: 89.56\n - type: recall_at_3\n value: 36.726\n - type: recall_at_5\n value: 42.138999999999996\n - type: map_at_1\n value: 16.803\n - type: map_at_10\n value: 24.348\n - type: map_at_100\n value: 25.56\n - type: map_at_1000\n value: 25.668000000000003\n - type: map_at_3\n value: 21.811\n - type: map_at_5\n value: 23.287\n - type: mrr_at_1\n value: 20.771\n - type: mrr_at_10\n value: 28.961\n - type: mrr_at_100\n value: 29.979\n - type: mrr_at_1000\n value: 30.046\n - type: mrr_at_3\n value: 26.555\n - type: mrr_at_5\n value: 28.060000000000002\n - type: ndcg_at_1\n value: 20.771\n - type: ndcg_at_10\n value: 29.335\n - type: ndcg_at_100\n value: 35.188\n - type: ndcg_at_1000\n value: 37.812\n - type: ndcg_at_3\n value: 24.83\n - type: ndcg_at_5\n value: 27.119\n - type: precision_at_1\n value: 20.771\n - type: precision_at_10\n value: 5.4350000000000005\n - type: precision_at_100\n value: 0.9480000000000001\n - type: precision_at_1000\n value: 0.13\n - type: precision_at_3\n value: 11.982\n - type: precision_at_5\n value: 8.831\n - type: recall_at_1\n value: 16.803\n - type: recall_at_10\n value: 40.039\n - type: recall_at_100\n value: 65.83200000000001\n - type: recall_at_1000\n value: 84.478\n - type: recall_at_3\n value: 27.682000000000002\n - type: recall_at_5\n value: 33.535\n - type: map_at_1\n value: 28.345\n - type: map_at_10\n value: 37.757000000000005\n - type: map_at_100\n value: 39.141\n - type: map_at_1000\n value: 39.262\n - type: map_at_3\n value: 35.183\n - type: map_at_5\n value: 36.592\n - type: mrr_at_1\n value: 34.649\n - type: mrr_at_10\n value: 43.586999999999996\n - type: mrr_at_100\n value: 44.481\n - type: mrr_at_1000\n value: 44.542\n - type: mrr_at_3\n value: 41.29\n - type: mrr_at_5\n value: 42.642\n - type: ndcg_at_1\n value: 34.649\n - type: ndcg_at_10\n value: 43.161\n - type: ndcg_at_100\n value: 48.734\n - type: ndcg_at_1000\n value: 51.046\n - type: ndcg_at_3\n value: 39.118\n - type: ndcg_at_5\n value: 41.022\n - type: precision_at_1\n value: 34.649\n - type: precision_at_10\n value: 7.603\n - type: precision_at_100\n value: 1.209\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 18.319\n - type: precision_at_5\n value: 12.839\n - type: recall_at_1\n value: 28.345\n - type: recall_at_10\n value: 53.367\n - type: recall_at_100\n value: 76.453\n - type: recall_at_1000\n value: 91.82000000000001\n - type: recall_at_3\n value: 41.636\n - type: recall_at_5\n value: 46.760000000000005\n - type: map_at_1\n value: 22.419\n - type: map_at_10\n value: 31.716\n - type: map_at_100\n value: 33.152\n - type: map_at_1000\n value: 33.267\n - type: map_at_3\n value: 28.74\n - type: map_at_5\n value: 30.48\n - type: mrr_at_1\n value: 28.310999999999996\n - type: mrr_at_10\n value: 37.039\n - type: mrr_at_100\n value: 38.09\n - type: mrr_at_1000\n value: 38.145\n - type: mrr_at_3\n value: 34.437\n - type: mrr_at_5\n value: 36.024\n - type: ndcg_at_1\n value: 28.310999999999996\n - type: ndcg_at_10\n value: 37.41\n - type: ndcg_at_100\n value: 43.647999999999996\n - type: ndcg_at_1000\n value: 46.007\n - type: ndcg_at_3\n value: 32.509\n - type: ndcg_at_5\n value: 34.943999999999996\n - type: precision_at_1\n value: 28.310999999999996\n - type: precision_at_10\n value: 6.963\n - type: precision_at_100\n value: 1.1860000000000002\n - type: precision_at_1000\n value: 0.154\n - type: precision_at_3\n value: 15.867999999999999\n - type: precision_at_5\n value: 11.507000000000001\n - type: recall_at_1\n value: 22.419\n - type: recall_at_10\n value: 49.28\n - type: recall_at_100\n value: 75.802\n - type: recall_at_1000\n value: 92.032\n - type: recall_at_3\n value: 35.399\n - type: recall_at_5\n value: 42.027\n - type: map_at_1\n value: 24.669249999999998\n - type: map_at_10\n value: 33.332583333333325\n - type: map_at_100\n value: 34.557833333333335\n - type: map_at_1000\n value: 34.67141666666666\n - type: map_at_3\n value: 30.663166666666662\n - type: map_at_5\n value: 32.14883333333333\n - type: mrr_at_1\n value: 29.193833333333334\n - type: mrr_at_10\n value: 37.47625\n - type: mrr_at_100\n value: 38.3545\n - type: mrr_at_1000\n value: 38.413166666666676\n - type: mrr_at_3\n value: 35.06741666666667\n - type: mrr_at_5\n value: 36.450666666666656\n - type: ndcg_at_1\n value: 29.193833333333334\n - type: ndcg_at_10\n value: 38.505416666666676\n - type: ndcg_at_100\n value: 43.81125\n - type: ndcg_at_1000\n value: 46.09558333333333\n - type: ndcg_at_3\n value: 33.90916666666667\n - type: ndcg_at_5\n value: 36.07666666666666\n - type: precision_at_1\n value: 29.193833333333334\n - type: precision_at_10\n value: 6.7251666666666665\n - type: precision_at_100\n value: 1.1058333333333332\n - type: precision_at_1000\n value: 0.14833333333333332\n - type: precision_at_3\n value: 15.554166666666665\n - type: precision_at_5\n value: 11.079250000000002\n - type: recall_at_1\n value: 24.669249999999998\n - type: recall_at_10\n value: 49.75583333333332\n - type: recall_at_100\n value: 73.06908333333332\n - type: recall_at_1000\n value: 88.91316666666667\n - type: recall_at_3\n value: 36.913250000000005\n - type: recall_at_5\n value: 42.48641666666666\n - type: map_at_1\n value: 24.044999999999998\n - type: map_at_10\n value: 30.349999999999998\n - type: map_at_100\n value: 31.273\n - type: map_at_1000\n value: 31.362000000000002\n - type: map_at_3\n value: 28.508\n - type: map_at_5\n value: 29.369\n - type: mrr_at_1\n value: 26.994\n - type: mrr_at_10\n value: 33.12\n - type: mrr_at_100\n value: 33.904\n - type: mrr_at_1000\n value: 33.967000000000006\n - type: mrr_at_3\n value: 31.365\n - type: mrr_at_5\n value: 32.124\n - type: ndcg_at_1\n value: 26.994\n - type: ndcg_at_10\n value: 34.214\n - type: ndcg_at_100\n value: 38.681\n - type: ndcg_at_1000\n value: 40.926\n - type: ndcg_at_3\n value: 30.725\n - type: ndcg_at_5\n value: 31.967000000000002\n - type: precision_at_1\n value: 26.994\n - type: precision_at_10\n value: 5.215\n - type: precision_at_100\n value: 0.807\n - type: precision_at_1000\n value: 0.108\n - type: precision_at_3\n value: 12.986\n - type: precision_at_5\n value: 8.712\n - type: recall_at_1\n value: 24.044999999999998\n - type: recall_at_10\n value: 43.456\n - type: recall_at_100\n value: 63.675000000000004\n - type: recall_at_1000\n value: 80.05499999999999\n - type: recall_at_3\n value: 33.561\n - type: recall_at_5\n value: 36.767\n - type: map_at_1\n value: 15.672\n - type: map_at_10\n value: 22.641\n - type: map_at_100\n value: 23.75\n - type: map_at_1000\n value: 23.877000000000002\n - type: map_at_3\n value: 20.219\n - type: map_at_5\n value: 21.648\n - type: mrr_at_1\n value: 18.823\n - type: mrr_at_10\n value: 26.101999999999997\n - type: mrr_at_100\n value: 27.038\n - type: mrr_at_1000\n value: 27.118\n - type: mrr_at_3\n value: 23.669\n - type: mrr_at_5\n value: 25.173000000000002\n - type: ndcg_at_1\n value: 18.823\n - type: ndcg_at_10\n value: 27.176000000000002\n - type: ndcg_at_100\n value: 32.42\n - type: ndcg_at_1000\n value: 35.413\n - type: ndcg_at_3\n value: 22.756999999999998\n - type: ndcg_at_5\n value: 25.032\n - type: precision_at_1\n value: 18.823\n - type: precision_at_10\n value: 5.034000000000001\n - type: precision_at_100\n value: 0.895\n - type: precision_at_1000\n value: 0.132\n - type: precision_at_3\n value: 10.771\n - type: precision_at_5\n value: 8.1\n - type: recall_at_1\n value: 15.672\n - type: recall_at_10\n value: 37.296\n - type: recall_at_100\n value: 60.863\n - type: recall_at_1000\n value: 82.234\n - type: recall_at_3\n value: 25.330000000000002\n - type: recall_at_5\n value: 30.964000000000002\n - type: map_at_1\n value: 24.633\n - type: map_at_10\n value: 32.858\n - type: map_at_100\n value: 34.038000000000004\n - type: map_at_1000\n value: 34.141\n - type: map_at_3\n value: 30.209000000000003\n - type: map_at_5\n value: 31.567\n - type: mrr_at_1\n value: 28.358\n - type: mrr_at_10\n value: 36.433\n - type: mrr_at_100\n value: 37.352000000000004\n - type: mrr_at_1000\n value: 37.41\n - type: mrr_at_3\n value: 34.033\n - type: mrr_at_5\n value: 35.246\n - type: ndcg_at_1\n value: 28.358\n - type: ndcg_at_10\n value: 37.973\n - type: ndcg_at_100\n value: 43.411\n - type: ndcg_at_1000\n value: 45.747\n - type: ndcg_at_3\n value: 32.934999999999995\n - type: ndcg_at_5\n value: 35.013\n - type: precision_at_1\n value: 28.358\n - type: precision_at_10\n value: 6.418\n - type: precision_at_100\n value: 1.02\n - type: precision_at_1000\n value: 0.133\n - type: precision_at_3\n value: 14.677000000000001\n - type: precision_at_5\n value: 10.335999999999999\n - type: recall_at_1\n value: 24.633\n - type: recall_at_10\n value: 50.048\n - type: recall_at_100\n value: 73.821\n - type: recall_at_1000\n value: 90.046\n - type: recall_at_3\n value: 36.284\n - type: recall_at_5\n value: 41.370000000000005\n - type: map_at_1\n value: 23.133\n - type: map_at_10\n value: 31.491999999999997\n - type: map_at_100\n value: 33.062000000000005\n - type: map_at_1000\n value: 33.256\n - type: map_at_3\n value: 28.886\n - type: map_at_5\n value: 30.262\n - type: mrr_at_1\n value: 28.063\n - type: mrr_at_10\n value: 36.144\n - type: mrr_at_100\n value: 37.14\n - type: mrr_at_1000\n value: 37.191\n - type: mrr_at_3\n value: 33.762\n - type: mrr_at_5\n value: 34.997\n - type: ndcg_at_1\n value: 28.063\n - type: ndcg_at_10\n value: 36.951\n - type: ndcg_at_100\n value: 43.287\n - type: ndcg_at_1000\n value: 45.777\n - type: ndcg_at_3\n value: 32.786\n - type: ndcg_at_5\n value: 34.65\n - type: precision_at_1\n value: 28.063\n - type: precision_at_10\n value: 7.055\n - type: precision_at_100\n value: 1.476\n - type: precision_at_1000\n value: 0.22899999999999998\n - type: precision_at_3\n value: 15.481\n - type: precision_at_5\n value: 11.186\n - type: recall_at_1\n value: 23.133\n - type: recall_at_10\n value: 47.285\n - type: recall_at_100\n value: 76.176\n - type: recall_at_1000\n value: 92.176\n - type: recall_at_3\n value: 35.223\n - type: recall_at_5\n value: 40.142\n - type: map_at_1\n value: 19.547\n - type: map_at_10\n value: 26.374\n - type: map_at_100\n value: 27.419\n - type: map_at_1000\n value: 27.539\n - type: map_at_3\n value: 23.882\n - type: map_at_5\n value: 25.163999999999998\n - type: mrr_at_1\n value: 21.442\n - type: mrr_at_10\n value: 28.458\n - type: mrr_at_100\n value: 29.360999999999997\n - type: mrr_at_1000\n value: 29.448999999999998\n - type: mrr_at_3\n value: 25.97\n - type: mrr_at_5\n value: 27.273999999999997\n - type: ndcg_at_1\n value: 21.442\n - type: ndcg_at_10\n value: 30.897000000000002\n - type: ndcg_at_100\n value: 35.99\n - type: ndcg_at_1000\n value: 38.832\n - type: ndcg_at_3\n value: 25.944\n - type: ndcg_at_5\n value: 28.126\n - type: precision_at_1\n value: 21.442\n - type: precision_at_10\n value: 4.9910000000000005\n - type: precision_at_100\n value: 0.8109999999999999\n - type: precision_at_1000\n value: 0.11800000000000001\n - type: precision_at_3\n value: 11.029\n - type: precision_at_5\n value: 7.911\n - type: recall_at_1\n value: 19.547\n - type: recall_at_10\n value: 42.886\n - type: recall_at_100\n value: 66.64999999999999\n - type: recall_at_1000\n value: 87.368\n - type: recall_at_3\n value: 29.143\n - type: recall_at_5\n value: 34.544000000000004\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: climate-fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 15.572\n - type: map_at_10\n value: 25.312\n - type: map_at_100\n value: 27.062\n - type: map_at_1000\n value: 27.253\n - type: map_at_3\n value: 21.601\n - type: map_at_5\n value: 23.473\n - type: mrr_at_1\n value: 34.984\n - type: mrr_at_10\n value: 46.406\n - type: mrr_at_100\n value: 47.179\n - type: mrr_at_1000\n value: 47.21\n - type: mrr_at_3\n value: 43.485\n - type: mrr_at_5\n value: 45.322\n - type: ndcg_at_1\n value: 34.984\n - type: ndcg_at_10\n value: 34.344\n - type: ndcg_at_100\n value: 41.015\n - type: ndcg_at_1000\n value: 44.366\n - type: ndcg_at_3\n value: 29.119\n - type: ndcg_at_5\n value: 30.825999999999997\n - type: precision_at_1\n value: 34.984\n - type: precision_at_10\n value: 10.358\n - type: precision_at_100\n value: 1.762\n - type: precision_at_1000\n value: 0.23900000000000002\n - type: precision_at_3\n value: 21.368000000000002\n - type: precision_at_5\n value: 15.948\n - type: recall_at_1\n value: 15.572\n - type: recall_at_10\n value: 39.367999999999995\n - type: recall_at_100\n value: 62.183\n - type: recall_at_1000\n value: 80.92200000000001\n - type: recall_at_3\n value: 26.131999999999998\n - type: recall_at_5\n value: 31.635999999999996\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: dbpedia-entity\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 8.848\n - type: map_at_10\n value: 19.25\n - type: map_at_100\n value: 27.193\n - type: map_at_1000\n value: 28.721999999999998\n - type: map_at_3\n value: 13.968\n - type: map_at_5\n value: 16.283\n - type: mrr_at_1\n value: 68.75\n - type: mrr_at_10\n value: 76.25\n - type: mrr_at_100\n value: 76.534\n - type: mrr_at_1000\n value: 76.53999999999999\n - type: mrr_at_3\n value: 74.667\n - type: mrr_at_5\n value: 75.86699999999999\n - type: ndcg_at_1\n value: 56.00000000000001\n - type: ndcg_at_10\n value: 41.426\n - type: ndcg_at_100\n value: 45.660000000000004\n - type: ndcg_at_1000\n value: 53.02\n - type: ndcg_at_3\n value: 46.581\n - type: ndcg_at_5\n value: 43.836999999999996\n - type: precision_at_1\n value: 68.75\n - type: precision_at_10\n value: 32.800000000000004\n - type: precision_at_100\n value: 10.440000000000001\n - type: precision_at_1000\n value: 1.9980000000000002\n - type: precision_at_3\n value: 49.667\n - type: precision_at_5\n value: 42.25\n - type: recall_at_1\n value: 8.848\n - type: recall_at_10\n value: 24.467\n - type: recall_at_100\n value: 51.344\n - type: recall_at_1000\n value: 75.235\n - type: recall_at_3\n value: 15.329\n - type: recall_at_5\n value: 18.892999999999997\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 48.95\n - type: f1\n value: 43.44563593360779\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 78.036\n - type: map_at_10\n value: 85.639\n - type: map_at_100\n value: 85.815\n - type: map_at_1000\n value: 85.829\n - type: map_at_3\n value: 84.795\n - type: map_at_5\n value: 85.336\n - type: mrr_at_1\n value: 84.353\n - type: mrr_at_10\n value: 90.582\n - type: mrr_at_100\n value: 90.617\n - type: mrr_at_1000\n value: 90.617\n - type: mrr_at_3\n value: 90.132\n - type: mrr_at_5\n value: 90.447\n - type: ndcg_at_1\n value: 84.353\n - type: ndcg_at_10\n value: 89.003\n - type: ndcg_at_100\n value: 89.60000000000001\n - type: ndcg_at_1000\n value: 89.836\n - type: ndcg_at_3\n value: 87.81400000000001\n - type: ndcg_at_5\n value: 88.478\n - type: precision_at_1\n value: 84.353\n - type: precision_at_10\n value: 10.482\n - type: precision_at_100\n value: 1.099\n - type: precision_at_1000\n value: 0.11399999999999999\n - type: precision_at_3\n value: 33.257999999999996\n - type: precision_at_5\n value: 20.465\n - type: recall_at_1\n value: 78.036\n - type: recall_at_10\n value: 94.517\n - type: recall_at_100\n value: 96.828\n - type: recall_at_1000\n value: 98.261\n - type: recall_at_3\n value: 91.12\n - type: recall_at_5\n value: 92.946\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: fiqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 20.191\n - type: map_at_10\n value: 32.369\n - type: map_at_100\n value: 34.123999999999995\n - type: map_at_1000\n value: 34.317\n - type: map_at_3\n value: 28.71\n - type: map_at_5\n value: 30.607\n - type: mrr_at_1\n value: 40.894999999999996\n - type: mrr_at_10\n value: 48.842\n - type: mrr_at_100\n value: 49.599\n - type: mrr_at_1000\n value: 49.647000000000006\n - type: mrr_at_3\n value: 46.785\n - type: mrr_at_5\n value: 47.672\n - type: ndcg_at_1\n value: 40.894999999999996\n - type: ndcg_at_10\n value: 39.872\n - type: ndcg_at_100\n value: 46.126\n - type: ndcg_at_1000\n value: 49.476\n - type: ndcg_at_3\n value: 37.153000000000006\n - type: ndcg_at_5\n value: 37.433\n - type: precision_at_1\n value: 40.894999999999996\n - type: precision_at_10\n value: 10.818\n - type: precision_at_100\n value: 1.73\n - type: precision_at_1000\n value: 0.231\n - type: precision_at_3\n value: 25.051000000000002\n - type: precision_at_5\n value: 17.531\n - type: recall_at_1\n value: 20.191\n - type: recall_at_10\n value: 45.768\n - type: recall_at_100\n value: 68.82000000000001\n - type: recall_at_1000\n value: 89.133\n - type: recall_at_3\n value: 33.296\n - type: recall_at_5\n value: 38.022\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: hotpotqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 39.257\n - type: map_at_10\n value: 61.467000000000006\n - type: map_at_100\n value: 62.364\n - type: map_at_1000\n value: 62.424\n - type: map_at_3\n value: 58.228\n - type: map_at_5\n value: 60.283\n - type: mrr_at_1\n value: 78.515\n - type: mrr_at_10\n value: 84.191\n - type: mrr_at_100\n value: 84.378\n - type: mrr_at_1000\n value: 84.385\n - type: mrr_at_3\n value: 83.284\n - type: mrr_at_5\n value: 83.856\n - type: ndcg_at_1\n value: 78.515\n - type: ndcg_at_10\n value: 69.78999999999999\n - type: ndcg_at_100\n value: 72.886\n - type: ndcg_at_1000\n value: 74.015\n - type: ndcg_at_3\n value: 65.23\n - type: ndcg_at_5\n value: 67.80199999999999\n - type: precision_at_1\n value: 78.515\n - type: precision_at_10\n value: 14.519000000000002\n - type: precision_at_100\n value: 1.694\n - type: precision_at_1000\n value: 0.184\n - type: precision_at_3\n value: 41.702\n - type: precision_at_5\n value: 27.046999999999997\n - type: recall_at_1\n value: 39.257\n - type: recall_at_10\n value: 72.59299999999999\n - type: recall_at_100\n value: 84.679\n - type: recall_at_1000\n value: 92.12\n - type: recall_at_3\n value: 62.552\n - type: recall_at_5\n value: 67.616\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 91.5152\n - type: ap\n value: 87.64584669595709\n - type: f1\n value: 91.50605576428437\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: msmarco\n config: default\n split: dev\n revision: None\n metrics:\n - type: map_at_1\n value: 21.926000000000002\n - type: map_at_10\n value: 34.049\n - type: map_at_100\n value: 35.213\n - type: map_at_1000\n value: 35.265\n - type: map_at_3\n value: 30.309\n - type: map_at_5\n value: 32.407000000000004\n - type: mrr_at_1\n value: 22.55\n - type: mrr_at_10\n value: 34.657\n - type: mrr_at_100\n value: 35.760999999999996\n - type: mrr_at_1000\n value: 35.807\n - type: mrr_at_3\n value: 30.989\n - type: mrr_at_5\n value: 33.039\n - type: ndcg_at_1\n value: 22.55\n - type: ndcg_at_10\n value: 40.842\n - type: ndcg_at_100\n value: 46.436\n - type: ndcg_at_1000\n value: 47.721999999999994\n - type: ndcg_at_3\n value: 33.209\n - type: ndcg_at_5\n value: 36.943\n - type: precision_at_1\n value: 22.55\n - type: precision_at_10\n value: 6.447\n - type: precision_at_100\n value: 0.9249999999999999\n - type: precision_at_1000\n value: 0.104\n - type: precision_at_3\n value: 14.136000000000001\n - type: precision_at_5\n value: 10.381\n - type: recall_at_1\n value: 21.926000000000002\n - type: recall_at_10\n value: 61.724999999999994\n - type: recall_at_100\n value: 87.604\n - type: recall_at_1000\n value: 97.421\n - type: recall_at_3\n value: 40.944\n - type: recall_at_5\n value: 49.915\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 93.54765161878704\n - type: f1\n value: 93.3298945415573\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 75.71591427268582\n - type: f1\n value: 59.32113870474471\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 75.83053127101547\n - type: f1\n value: 73.60757944876475\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 78.72562205783457\n - type: f1\n value: 78.63761662505502\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 33.37935633767996\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 31.55270546130387\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 30.462692753143834\n - type: mrr\n value: 31.497569753511563\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: nfcorpus\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 5.646\n - type: map_at_10\n value: 12.498\n - type: map_at_100\n value: 15.486\n - type: map_at_1000\n value: 16.805999999999997\n - type: map_at_3\n value: 9.325\n - type: map_at_5\n value: 10.751\n - type: mrr_at_1\n value: 43.034\n - type: mrr_at_10\n value: 52.662\n - type: mrr_at_100\n value: 53.189\n - type: mrr_at_1000\n value: 53.25\n - type: mrr_at_3\n value: 50.929\n - type: mrr_at_5\n value: 51.92\n - type: ndcg_at_1\n value: 41.796\n - type: ndcg_at_10\n value: 33.477000000000004\n - type: ndcg_at_100\n value: 29.996000000000002\n - type: ndcg_at_1000\n value: 38.864\n - type: ndcg_at_3\n value: 38.940000000000005\n - type: ndcg_at_5\n value: 36.689\n - type: precision_at_1\n value: 43.034\n - type: precision_at_10\n value: 24.799\n - type: precision_at_100\n value: 7.432999999999999\n - type: precision_at_1000\n value: 1.9929999999999999\n - type: precision_at_3\n value: 36.842000000000006\n - type: precision_at_5\n value: 32.135999999999996\n - type: recall_at_1\n value: 5.646\n - type: recall_at_10\n value: 15.963\n - type: recall_at_100\n value: 29.492\n - type: recall_at_1000\n value: 61.711000000000006\n - type: recall_at_3\n value: 10.585\n - type: recall_at_5\n value: 12.753999999999998\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: nq\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 27.602\n - type: map_at_10\n value: 41.545\n - type: map_at_100\n value: 42.644999999999996\n - type: map_at_1000\n value: 42.685\n - type: map_at_3\n value: 37.261\n - type: map_at_5\n value: 39.706\n - type: mrr_at_1\n value: 31.141000000000002\n - type: mrr_at_10\n value: 44.139\n - type: mrr_at_100\n value: 44.997\n - type: mrr_at_1000\n value: 45.025999999999996\n - type: mrr_at_3\n value: 40.503\n - type: mrr_at_5\n value: 42.64\n - type: ndcg_at_1\n value: 31.141000000000002\n - type: ndcg_at_10\n value: 48.995\n - type: ndcg_at_100\n value: 53.788000000000004\n - type: ndcg_at_1000\n value: 54.730000000000004\n - type: ndcg_at_3\n value: 40.844\n - type: ndcg_at_5\n value: 44.955\n - type: precision_at_1\n value: 31.141000000000002\n - type: precision_at_10\n value: 8.233\n - type: precision_at_100\n value: 1.093\n - type: precision_at_1000\n value: 0.11800000000000001\n - type: precision_at_3\n value: 18.579\n - type: precision_at_5\n value: 13.533999999999999\n - type: recall_at_1\n value: 27.602\n - type: recall_at_10\n value: 69.216\n - type: recall_at_100\n value: 90.252\n - type: recall_at_1000\n value: 97.27\n - type: recall_at_3\n value: 47.987\n - type: recall_at_5\n value: 57.438\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 70.949\n - type: map_at_10\n value: 84.89999999999999\n - type: map_at_100\n value: 85.531\n - type: map_at_1000\n value: 85.548\n - type: map_at_3\n value: 82.027\n - type: map_at_5\n value: 83.853\n - type: mrr_at_1\n value: 81.69999999999999\n - type: mrr_at_10\n value: 87.813\n - type: mrr_at_100\n value: 87.917\n - type: mrr_at_1000\n value: 87.91799999999999\n - type: mrr_at_3\n value: 86.938\n - type: mrr_at_5\n value: 87.53999999999999\n - type: ndcg_at_1\n value: 81.75\n - type: ndcg_at_10\n value: 88.55499999999999\n - type: ndcg_at_100\n value: 89.765\n - type: ndcg_at_1000\n value: 89.871\n - type: ndcg_at_3\n value: 85.905\n - type: ndcg_at_5\n value: 87.41\n - type: precision_at_1\n value: 81.75\n - type: precision_at_10\n value: 13.403\n - type: precision_at_100\n value: 1.528\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 37.597\n - type: precision_at_5\n value: 24.69\n - type: recall_at_1\n value: 70.949\n - type: recall_at_10\n value: 95.423\n - type: recall_at_100\n value: 99.509\n - type: recall_at_1000\n value: 99.982\n - type: recall_at_3\n value: 87.717\n - type: recall_at_5\n value: 92.032\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 51.76962893449579\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 62.32897690686379\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 4.478\n - type: map_at_10\n value: 11.994\n - type: map_at_100\n value: 13.977\n - type: map_at_1000\n value: 14.295\n - type: map_at_3\n value: 8.408999999999999\n - type: map_at_5\n value: 10.024\n - type: mrr_at_1\n value: 22.1\n - type: mrr_at_10\n value: 33.526\n - type: mrr_at_100\n value: 34.577000000000005\n - type: mrr_at_1000\n value: 34.632000000000005\n - type: mrr_at_3\n value: 30.217\n - type: mrr_at_5\n value: 31.962000000000003\n - type: ndcg_at_1\n value: 22.1\n - type: ndcg_at_10\n value: 20.191\n - type: ndcg_at_100\n value: 27.954\n - type: ndcg_at_1000\n value: 33.491\n - type: ndcg_at_3\n value: 18.787000000000003\n - type: ndcg_at_5\n value: 16.378999999999998\n - type: precision_at_1\n value: 22.1\n - type: precision_at_10\n value: 10.69\n - type: precision_at_100\n value: 2.1919999999999997\n - type: precision_at_1000\n value: 0.35200000000000004\n - type: precision_at_3\n value: 17.732999999999997\n - type: precision_at_5\n value: 14.499999999999998\n - type: recall_at_1\n value: 4.478\n - type: recall_at_10\n value: 21.657\n - type: recall_at_100\n value: 44.54\n - type: recall_at_1000\n value: 71.542\n - type: recall_at_3\n value: 10.778\n - type: recall_at_5\n value: 14.687\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 82.82325259156718\n - type: cos_sim_spearman\n value: 79.2463589100662\n - type: euclidean_pearson\n value: 80.48318380496771\n - type: euclidean_spearman\n value: 79.34451935199979\n - type: manhattan_pearson\n value: 80.39041824178759\n - type: manhattan_spearman\n value: 79.23002892700211\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 85.74130231431258\n - type: cos_sim_spearman\n value: 78.36856568042397\n - type: euclidean_pearson\n value: 82.48301631890303\n - type: euclidean_spearman\n value: 78.28376980722732\n - type: manhattan_pearson\n value: 82.43552075450525\n - type: manhattan_spearman\n value: 78.22702443947126\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 79.96138619461459\n - type: cos_sim_spearman\n value: 81.85436343502379\n - type: euclidean_pearson\n value: 81.82895226665367\n - type: euclidean_spearman\n value: 82.22707349602916\n - type: manhattan_pearson\n value: 81.66303369445873\n - type: manhattan_spearman\n value: 82.05030197179455\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 80.05481244198648\n - type: cos_sim_spearman\n value: 80.85052504637808\n - type: euclidean_pearson\n value: 80.86728419744497\n - type: euclidean_spearman\n value: 81.033786401512\n - type: manhattan_pearson\n value: 80.90107531061103\n - type: manhattan_spearman\n value: 81.11374116827795\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 84.615220756399\n - type: cos_sim_spearman\n value: 86.46858500002092\n - type: euclidean_pearson\n value: 86.08307800247586\n - type: euclidean_spearman\n value: 86.72691443870013\n - type: manhattan_pearson\n value: 85.96155594487269\n - type: manhattan_spearman\n value: 86.605909505275\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 82.14363913634436\n - type: cos_sim_spearman\n value: 84.48430226487102\n - type: euclidean_pearson\n value: 83.75303424801902\n - type: euclidean_spearman\n value: 84.56762380734538\n - type: manhattan_pearson\n value: 83.6135447165928\n - type: manhattan_spearman\n value: 84.39898212616731\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 85.09909252554525\n - type: cos_sim_spearman\n value: 85.70951402743276\n - type: euclidean_pearson\n value: 87.1991936239908\n - type: euclidean_spearman\n value: 86.07745840612071\n - type: manhattan_pearson\n value: 87.25039137549952\n - type: manhattan_spearman\n value: 85.99938746659761\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 63.529332093413615\n - type: cos_sim_spearman\n value: 65.38177340147439\n - type: euclidean_pearson\n value: 66.35278011412136\n - type: euclidean_spearman\n value: 65.47147267032997\n - type: manhattan_pearson\n value: 66.71804682408693\n - type: manhattan_spearman\n value: 65.67406521423597\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 82.45802942885662\n - type: cos_sim_spearman\n value: 84.8853341842566\n - type: euclidean_pearson\n value: 84.60915021096707\n - type: euclidean_spearman\n value: 85.11181242913666\n - type: manhattan_pearson\n value: 84.38600521210364\n - type: manhattan_spearman\n value: 84.89045417981723\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 85.92793380635129\n - type: mrr\n value: 95.85834191226348\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: scifact\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 55.74400000000001\n - type: map_at_10\n value: 65.455\n - type: map_at_100\n value: 66.106\n - type: map_at_1000\n value: 66.129\n - type: map_at_3\n value: 62.719\n - type: map_at_5\n value: 64.441\n - type: mrr_at_1\n value: 58.667\n - type: mrr_at_10\n value: 66.776\n - type: mrr_at_100\n value: 67.363\n - type: mrr_at_1000\n value: 67.384\n - type: mrr_at_3\n value: 64.889\n - type: mrr_at_5\n value: 66.122\n - type: ndcg_at_1\n value: 58.667\n - type: ndcg_at_10\n value: 69.904\n - type: ndcg_at_100\n value: 72.807\n - type: ndcg_at_1000\n value: 73.423\n - type: ndcg_at_3\n value: 65.405\n - type: ndcg_at_5\n value: 67.86999999999999\n - type: precision_at_1\n value: 58.667\n - type: precision_at_10\n value: 9.3\n - type: precision_at_100\n value: 1.08\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_3\n value: 25.444\n - type: precision_at_5\n value: 17\n - type: recall_at_1\n value: 55.74400000000001\n - type: recall_at_10\n value: 82.122\n - type: recall_at_100\n value: 95.167\n - type: recall_at_1000\n value: 100\n - type: recall_at_3\n value: 70.14399999999999\n - type: recall_at_5\n value: 76.417\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.86534653465347\n - type: cos_sim_ap\n value: 96.54142419791388\n - type: cos_sim_f1\n value: 93.07535641547861\n - type: cos_sim_precision\n value: 94.81327800829875\n - type: cos_sim_recall\n value: 91.4\n - type: dot_accuracy\n value: 99.86435643564356\n - type: dot_ap\n value: 96.53682260449868\n - type: dot_f1\n value: 92.98515104966718\n - type: dot_precision\n value: 95.27806925498426\n - type: dot_recall\n value: 90.8\n - type: euclidean_accuracy\n value: 99.86336633663366\n - type: euclidean_ap\n value: 96.5228676185697\n - type: euclidean_f1\n value: 92.9735234215886\n - type: euclidean_precision\n value: 94.70954356846472\n - type: euclidean_recall\n value: 91.3\n - type: manhattan_accuracy\n value: 99.85841584158416\n - type: manhattan_ap\n value: 96.50392760934032\n - type: manhattan_f1\n value: 92.84642321160581\n - type: manhattan_precision\n value: 92.8928928928929\n - type: manhattan_recall\n value: 92.80000000000001\n - type: max_accuracy\n value: 99.86534653465347\n - type: max_ap\n value: 96.54142419791388\n - type: max_f1\n value: 93.07535641547861\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 61.08285408766616\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 35.640675309010604\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 53.20333913710715\n - type: mrr\n value: 54.088813555725324\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 30.79465221925075\n - type: cos_sim_spearman\n value: 30.530816059163634\n - type: dot_pearson\n value: 31.364837244718043\n - type: dot_spearman\n value: 30.79726823684003\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.22599999999999998\n - type: map_at_10\n value: 1.735\n - type: map_at_100\n value: 8.978\n - type: map_at_1000\n value: 20.851\n - type: map_at_3\n value: 0.613\n - type: map_at_5\n value: 0.964\n - type: mrr_at_1\n value: 88\n - type: mrr_at_10\n value: 92.867\n - type: mrr_at_100\n value: 92.867\n - type: mrr_at_1000\n value: 92.867\n - type: mrr_at_3\n value: 92.667\n - type: mrr_at_5\n value: 92.667\n - type: ndcg_at_1\n value: 82\n - type: ndcg_at_10\n value: 73.164\n - type: ndcg_at_100\n value: 51.878\n - type: ndcg_at_1000\n value: 44.864\n - type: ndcg_at_3\n value: 79.184\n - type: ndcg_at_5\n value: 76.39\n - type: precision_at_1\n value: 88\n - type: precision_at_10\n value: 76.2\n - type: precision_at_100\n value: 52.459999999999994\n - type: precision_at_1000\n value: 19.692\n - type: precision_at_3\n value: 82.667\n - type: precision_at_5\n value: 80\n - type: recall_at_1\n value: 0.22599999999999998\n - type: recall_at_10\n value: 1.942\n - type: recall_at_100\n value: 12.342\n - type: recall_at_1000\n value: 41.42\n - type: recall_at_3\n value: 0.637\n - type: recall_at_5\n value: 1.034\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: webis-touche2020\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 3.567\n - type: map_at_10\n value: 13.116\n - type: map_at_100\n value: 19.39\n - type: map_at_1000\n value: 20.988\n - type: map_at_3\n value: 7.109\n - type: map_at_5\n value: 9.950000000000001\n - type: mrr_at_1\n value: 42.857\n - type: mrr_at_10\n value: 57.404999999999994\n - type: mrr_at_100\n value: 58.021\n - type: mrr_at_1000\n value: 58.021\n - type: mrr_at_3\n value: 54.762\n - type: mrr_at_5\n value: 56.19\n - type: ndcg_at_1\n value: 38.775999999999996\n - type: ndcg_at_10\n value: 30.359\n - type: ndcg_at_100\n value: 41.284\n - type: ndcg_at_1000\n value: 52.30200000000001\n - type: ndcg_at_3\n value: 36.744\n - type: ndcg_at_5\n value: 34.326\n - type: precision_at_1\n value: 42.857\n - type: precision_at_10\n value: 26.122\n - type: precision_at_100\n value: 8.082\n - type: precision_at_1000\n value: 1.559\n - type: precision_at_3\n value: 40.136\n - type: precision_at_5\n value: 35.510000000000005\n - type: recall_at_1\n value: 3.567\n - type: recall_at_10\n value: 19.045\n - type: recall_at_100\n value: 49.979\n - type: recall_at_1000\n value: 84.206\n - type: recall_at_3\n value: 8.52\n - type: recall_at_5\n value: 13.103000000000002\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 68.8394\n - type: ap\n value: 13.454399712443099\n - type: f1\n value: 53.04963076364322\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 60.546123372948514\n - type: f1\n value: 60.86952793277713\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 49.10042955060234\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 85.03308100375514\n - type: cos_sim_ap\n value: 71.08284605869684\n - type: cos_sim_f1\n value: 65.42539436255494\n - type: cos_sim_precision\n value: 64.14807302231237\n - type: cos_sim_recall\n value: 66.75461741424802\n - type: dot_accuracy\n value: 84.68736961316088\n - type: dot_ap\n value: 69.20524036530992\n - type: dot_f1\n value: 63.54893953365829\n - type: dot_precision\n value: 63.45698500394633\n - type: dot_recall\n value: 63.641160949868066\n - type: euclidean_accuracy\n value: 85.07480479227513\n - type: euclidean_ap\n value: 71.14592761009864\n - type: euclidean_f1\n value: 65.43814432989691\n - type: euclidean_precision\n value: 63.95465994962216\n - type: euclidean_recall\n value: 66.99208443271768\n - type: manhattan_accuracy\n value: 85.06288370984085\n - type: manhattan_ap\n value: 71.07289742593868\n - type: manhattan_f1\n value: 65.37585421412301\n - type: manhattan_precision\n value: 62.816147859922175\n - type: manhattan_recall\n value: 68.15303430079156\n - type: max_accuracy\n value: 85.07480479227513\n - type: max_ap\n value: 71.14592761009864\n - type: max_f1\n value: 65.43814432989691\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 87.79058485659952\n - type: cos_sim_ap\n value: 83.7183187008759\n - type: cos_sim_f1\n value: 75.86921142180798\n - type: cos_sim_precision\n value: 73.00683371298405\n - type: cos_sim_recall\n value: 78.96519864490298\n - type: dot_accuracy\n value: 87.0085768618776\n - type: dot_ap\n value: 81.87467488474279\n - type: dot_f1\n value: 74.04188363990559\n - type: dot_precision\n value: 72.10507114191901\n - type: dot_recall\n value: 76.08561749307053\n - type: euclidean_accuracy\n value: 87.8332751193387\n - type: euclidean_ap\n value: 83.83585648120315\n - type: euclidean_f1\n value: 76.02582177042369\n - type: euclidean_precision\n value: 73.36388371759989\n - type: euclidean_recall\n value: 78.88820449645827\n - type: manhattan_accuracy\n value: 87.87208444910156\n - type: manhattan_ap\n value: 83.8101950642973\n - type: manhattan_f1\n value: 75.90454195535027\n - type: manhattan_precision\n value: 72.44419564761039\n - type: manhattan_recall\n value: 79.71204188481676\n - type: max_accuracy\n value: 87.87208444910156\n - type: max_ap\n value: 83.83585648120315\n - type: max_f1\n value: 76.02582177042369\n---\n\n\n

FlagEmbedding

\n\n\n

\n

\n Model List | \n Usage |\n Evaluation |\n Train |\n Contact |\n License \n

\n

\n\nMore details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding).\n\n[English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md)\n\nFlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search.\nAnd it also can be used in vector database for LLMs.\n\n************* 🌟**Updates**🌟 *************\n- 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [**this**](#using-langchain); C-MTEB **leaderboard** is [avaliable](https://huggingface.co/spaces/mteb/leaderboard). \n- 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗**\n- 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** \n- 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. \n\n\n## Model List\n\n`bge` is short for `BAAI general embedding`.\n\n| Model | Language | Description | query instruction for retrieval\\* |\n|:-------------------------------|:--------:| :--------:| :--------:|\n| [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` |\n| [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | rank **2nd** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` |\n| [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` |\n| [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` |\n| [BAAI/bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | Chinese | This model is trained without instruction, and rank **2nd** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | |\n| [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | a base-scale model but has similar ability with `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` |\n| [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` |\n\n\\*: If you need to search the **long** relevant passages to a **short** query (s2p retrieval task), you need to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** need to be added to passages.\n\n## Usage \n\nHere are some examples to use `bge` models with \n[FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers).\n\n#### Using FlagEmbedding\n```\npip install -U FlagEmbedding\n```\nIf it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding.\n\n```python\nfrom FlagEmbedding import FlagModel\nsentences = [\"样例数据-1\", \"样例数据-2\"]\nmodel = FlagModel('BAAI/bge-large-zh', query_instruction_for_retrieval=\"为这个句子生成表示以用于检索相关文章:\")\nembeddings_1 = model.encode(sentences)\nembeddings_2 = model.encode(sentences)\nsimilarity = embeddings_1 @ embeddings_2.T\nprint(similarity)\n\n# for s2p(short query to long passage) retrieval task, please use encode_queries() which will automatically add the instruction to each query\n# corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction\nqueries = ['query_1', 'query_2']\npassages = [\"样例文档-1\", \"样例文档-2\"]\nq_embeddings = model.encode_queries(queries)\np_embeddings = model.encode(passages)\nscores = q_embeddings @ p_embeddings.T\n```\nThe value of argument `query_instruction_for_retrieval` see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). \n\nFlagModel will use all available GPUs when encoding, please set `os.environ[\"CUDA_VISIBLE_DEVICES\"]` to choose GPU.\n\n\n#### Using Sentence-Transformers\n\nUsing this model also is easy when you have [sentence-transformers](https://www.SBERT.net) installed:\n\n```\npip install -U sentence-transformers\n```\n```python\nfrom sentence_transformers import SentenceTransformer\nsentences = [\"样例数据-1\", \"样例数据-2\"]\nmodel = SentenceTransformer('BAAI/bge-large-zh')\nembeddings_1 = model.encode(sentences, normalize_embeddings=True)\nembeddings_2 = model.encode(sentences, normalize_embeddings=True)\nsimilarity = embeddings_1 @ embeddings_2.T\nprint(similarity)\n```\nFor s2p(short query to long passage) retrieval task, \neach short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). \nBut the instruction is not needed for passages.\n```python\nfrom sentence_transformers import SentenceTransformer\nqueries = ['query_1', 'query_2']\npassages = [\"样例文档-1\", \"样例文档-2\"]\ninstruction = \"为这个句子生成表示以用于检索相关文章:\"\n\nmodel = SentenceTransformer('BAAI/bge-large-zh')\nq_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True)\np_embeddings = model.encode(passages, normalize_embeddings=True)\nscores = q_embeddings @ p_embeddings.T\n```\n\n#### Using Langchain \n\nYou can use `bge` in langchain like this:\n```python\nfrom langchain.embeddings import HuggingFaceBgeEmbeddings\nmodel_name = \"BAAI/bge-small-en\"\nmodel_kwargs = {'device': 'cuda'}\nencode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity\nmodel_norm = HuggingFaceBgeEmbeddings(\n model_name=model_name,\n model_kwargs=model_kwargs,\n encode_kwargs=encode_kwargs\n)\n```\n\n\n#### Using HuggingFace Transformers\n\nWith transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of first token (i.e., [CLS]) as the sentence embedding.\n\n```python\nfrom transformers import AutoTokenizer, AutoModel\nimport torch\n# Sentences we want sentence embeddings for\nsentences = [\"样例数据-1\", \"样例数据-2\"]\n\n# Load model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh')\nmodel = AutoModel.from_pretrained('BAAI/bge-large-zh')\n\n# Tokenize sentences\nencoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt')\n# for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages)\n# encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt')\n\n# Compute token embeddings\nwith torch.no_grad():\n model_output = model(**encoded_input)\n # Perform pooling. In this case, cls pooling.\n sentence_embeddings = model_output[0][:, 0]\n# normalize embeddings\nsentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1)\nprint(\"Sentence embeddings:\", sentence_embeddings)\n```\n\n\n## Evaluation \n`baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!**\nMore details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). \n\n- **MTEB**: \n\n| Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) |\n|:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\n| [**bge-large-en**](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | **63.98** | **53.9** | **46.98** | 85.8 | **59.48** | 81.56 | 32.06 | **76.21** | \n| [**bge-base-en**](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | \n| [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 |\n| [gte-base](https://huggingface.co/thenlper/gte-base) \t| 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 |\n| [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 |\n| [**bge-small-en**](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | \n| [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 |\n| [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 |\n| [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 |\n| [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 |\n| [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 |\n| [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 |\n| [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) \t| 768 | 514 \t| 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 |\n| [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) \t| 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 |\n| [all-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2) \t| 384 | 512 \t| 56.53 | 42.69 | 41.81 | 82.41 | 58.44 | 79.8 | 27.9 | 63.21 |\n| [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) \t| 384 | 512 \t| 56.26 | 41.95 | 42.35 | 82.37 | 58.04 | 78.9 | 30.81 | 63.05 |\n| [contriever-base-msmarco](https://huggingface.co/nthakur/contriever-base-msmarco) \t| 768 | 512 \t| 56.00 | 41.88 | 41.1 \t| 82.54 | 53.14 | 76.51 | 30.36 | 66.68 |\n| [sentence-t5-base](https://huggingface.co/sentence-transformers/sentence-t5-base) \t| 768 | 512 \t| 55.27 | 33.63 | 40.21 | 85.18 | 53.09 | 81.14 | 31.39 | 69.81 |\n\n\n\n- **C-MTEB**: \nWe create a benchmark C-MTEB for chinese text embedding which consists of 31 datasets from 6 tasks. \nPlease refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction.\n \n| Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering |\n|:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|\n| [**bge-large-zh**](https://huggingface.co/BAAI/bge-large-zh) | 1024 | **64.20** | **71.53** | **53.23** | **78.94** | 72.26 | **65.11** | 48.39 | \n| [**bge-large-zh-noinstruct**](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 50.98 | 76.77 | **72.49** | 64.91 | **50.01** | \n| [**BAAI/bge-base-zh**](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 52.05 | 77.5 | 70.98 | 64.91 | 47.63 | \n| [**BAAI/bge-small-zh**](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 46.87 | 70.35 | 67.78 | 61.48 | 45.09 | \n| [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 |56.91 | 48.15 | 63.99 | 70.28 | 59.34 | 47.68 | \n| [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 |54.75 | 48.64 | 64.3 | 71.22 | 59.66 | 48.88 | \n| [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 40.61 | 69.56 | 67.38 | 54.28 | 45.68 | \n| [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 39.41 | 66.62 | 65.29 | 49.25 | 44.39 | \n| [text2vec](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 41.71 | 67.41 | 65.18 | 49.45 | 37.66 | \n| [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 41.98 | 70.86 | 63.42 | 49.16 | 30.02 | \n\n\n\n## Train\nThis section will introduce the way we used to train the general embedding. \nThe training scripts are in [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md), \nand we provide some examples to do [pre-train](https://github.com/FlagOpen/FlagEmbedding/blob/master/examples/pretrain/README.md) and [fine-tune](https://github.com/FlagOpen/FlagEmbedding/blob/master/examples/finetune/README.md).\n\n\n**1. RetroMAE Pre-train** \nWe pre-train the model following the method [retromae](https://github.com/staoxiao/RetroMAE), \nwhich shows promising improvement in retrieval task ([paper](https://aclanthology.org/2022.emnlp-main.35.pdf)). \nThe pre-training was conducted on 24 A100(40G) GPUs with a batch size of 720. \nIn retromae, the mask ratio of encoder and decoder are 0.3, 0.5 respectively.\nWe used the AdamW optimizer and the learning rate is 2e-5.\n\n**Pre-training data**:\n- English: \n - [Pile](https://pile.eleuther.ai/)\n - [wikipedia](https://huggingface.co/datasets/wikipedia)\n - [msmarco](https://huggingface.co/datasets/Tevatron/msmarco-passage-corpus)\n- Chinese: \n - [wudao](https://github.com/BAAI-WuDao/Data)\n\n\n**2. Finetune** \nWe fine-tune the model using a contrastive objective. \nThe format of input data is a triple`(query, positive, negative)`. \nBesides the negative in the triple, we also adopt in-batch negatives strategy. \nWe employ the cross-device negatives sharing method to share negatives among different GPUs, \nwhich can dramatically **increase the number of negatives**.\n\nWe trained our model on 48 A100(40G) GPUs with a large batch size of 32,768 (so there are **65,535** negatives for each query in a batch). \nWe used the AdamW optimizer and the learning rate is 1e-5.\nThe temperature for contrastive loss is 0.01.\n\nBesides, we add instruction to the query for s2p(short query to long passage) retrieval task in the training (add nothing to passages). \nFor English, the instruction is `Represent this sentence for searching relevant passages: `;\nFor Chinese, the instruction is `为这个句子生成表示以用于检索相关文章:`.\nIn the evaluation, the instruction should be added for queries in retrieval task, not be added for other tasks.\nNoted that the instruction is not needed for passages.\n\nThe finetune script is accessible in this repository: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). \nYou can easily finetune your model with it.\n\n**Training data**:\n\n- For English, we collect 230M text pairs from [wikipedia](https://huggingface.co/datasets/wikipedia), [cc-net](https://github.com/facebookresearch/cc_net), and so on.\n\n- For chinese, we collect 120M text pairs from [wudao](https://github.com/BAAI-WuDao/Data), [simclue](https://github.com/CLUEbenchmark/SimCLUE) and so on.\n\n**The data collection is to be released in the future.**\n\nWe will continually update the embedding models and training codes, \nhoping to promote the development of the embedding model community.\n\n\n\n## License\nFlagEmbedding is licensed under [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge."},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2458,"cells":{"id":{"kind":"string","value":"mhenrichsen/context-aware-splitter-7b"},"author":{"kind":"string","value":"mhenrichsen"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","llama","text-generation","da","dataset:mhenrichsen/context-aware-splits","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"llama\",\n \"text-generation\",\n \"da\",\n \"dataset:mhenrichsen/context-aware-splits\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-09-18T19:17:46Z","string":"2023-09-18T19:17:46Z"},"last_modified":{"kind":"string","value":"2023-09-19T10:58:01+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":9,"string":"9"},"README":{"kind":"string","value":"---\ndatasets:\n- mhenrichsen/context-aware-splits\nlanguage:\n- da\nlicense: apache-2.0\n---\n# Context Aware Splitter\n1b model available [here](https://huggingface.co/mhenrichsen/context-aware-splitter-1b).\n\nCAS is a text splitter for Retrieval Augmented Generation.\nIt's trained on 12.3k danish texts with a token count of 13.4m.\n\n## What does it do?\nCAS takes a text (str), reads and understands the contexts and then provides the best splits based on a defined word count.\n\nIt returns a dict with the keys:\n- splits: list[str]\n- topic: str\n\n## Code example\n```python\nfrom transformers import AutoTokenizer, TextStreamer, AutoModelForCausalLM\n\nmodel = AutoModelForCausalLM.from_pretrained(\"mhenrichsen/context-aware-splitter-7b\")\ntokenizer = AutoTokenizer.from_pretrained(\"mhenrichsen/context-aware-splitter-7b\")\nstreamer = TextStreamer(tokenizer, skip_special_tokens=True)\n\nWORD_SPLIT_COUNT = 50\n\nprompt_template = \"\"\"### Instruction:\nDin opgave er at segmentere en given tekst i separate dele, så hver del giver mening og kan læses uafhængigt af de andre. Hvis det giver mening, må der kan være et overlap mellem delene. Hver del skal ideelt indeholde {word_count} ord.\n\n### Input:\n{text}\n\n### Response:\n\"\"\"\n\nartikel = \"\"\"Kina er stærkt utilfreds med, at Tysklands udenrigsminister, Annalena Baerbock, har omtalt den kinesiske præsident Xi Jinping som en diktator.\n\n- Bemærkningerne fra Tyskland er ekstremt absurde, krænker Kinas politiske værdighed alvorligt og er en åben politisk provokation, udtalte talsperson fra det kinesiske udenrigsministerium Mao Ning i går ifølge CNN.\n\nBemærkningen fra udenrigsminister Annalena Baerbock faldt i et interview om krigen i Ukraine med Fox News i sidste uge.\n\n- Hvis Putin skulle vinde denne krig, hvilket signal ville det så sende til andre diktatorer i verden, som Xi, som den kinesiske præsident?, sagde hun.\n\nTysklands ambassadør i Kina, Patricia Flor, har som konsekvens af udtalelsen været til en kammeratlig samtale, oplyser det tyske udenrigsministerium til CNN.\"\"\"\n\ntokens = tokenizer(\n prompt_template.format(text=artikel, word_count=WORD_SPLIT_COUNT), \n return_tensors='pt'\n)['input_ids']\n\n# Generate output\ngeneration_output = model.generate(\n tokens,\n streamer=streamer,\n max_length = 8194,\n eos_token_id = 29913\n)\n```\n\nExample:\n```\n### Instruction:\nDin opgave er at segmentere en given tekst i separate dele, så hver del giver mening og kan læses uafhængigt af de andre. Hvis det giver mening, må der kan være et overlap mellem delene. Hver del skal ideelt indeholde 50 ord.\n\n### Input:\nMunkebjerg er et overvejende middelklassekvarter beliggende i det centrale Odense Munkebjerg grænser op til Hunderup i vest, hvor det afgrænses af Hjallesevej, og byens centrum i nord. Kvarteret har status som et familievenligt boligkvarter med både lejligheder (i området omkring H.C Andersensgade) og parcelhuse som på og omkring Munkebjergvej og Munkebjergskolen. Socialdemokratiet står traditionelt set stærkt i området, som det også ses på resultaterne af stemmer afgivet ved valgstedet Munkebjergskolen fra folketingsvalget i 2011, hvor partiet fik 24,8% af stemmerne. Dog vinder partiet Venstre samt Det Radikale Venstre også bred opbakning i kvarteret med henholdsvis 20,7 og 12,6% af stemmerne ligeledes fra valget i 2011. De fleste af kvarterets børn går på den lokale Munkebjergskolen, mens enkelte går på Odense Friskole og/eller Giersings Realskole. Munkebjergkvarteret er desuden hjemsted for fodboldklubben OKS. Munkebjergkvarteret kaldes i dagligtale for \"Munken\".\n\n### Response:\n```\nThis returns the following dictionary:\n```\n{'splits': ['Munkebjerg er et overvejende middelklassekvarter beliggende i det centrale Odense. Munkebjerg grænser op til Hunderup i vest, hvor det afgrænses af Hjallesevej, og byens centrum i nord. Kvarteret har status som et familievenligt boligkvarter med både lejligheder (i området omkring H.C Andersensgade) og parcelhuse som på og omkring Munkebjergvej og Munkebjergskolen.', 'Socialdemokratiet står traditionelt set stærkt i området, som det også ses på resultaterne af stemmer afgivet ved valgstedet Munkebjergskolen fra folketingsvalget i 2011, hvor partiet fik 24,8% af stemmerne. Dog vinder partiet Venstre samt Det Radikale Venstre også bred opbakning i kvarteret med henholdsvis 20,7 og 12,6% af stemmerne ligeledes fra valget i 2011.', \"De fleste af kvarterets børn går på den lokale Munkebjergskolen, mens enkelte går på Odense Friskole og/eller Giersings Realskole. Munkebjergkvarteret er desuden hjemsted for fodboldklubben OKS. Munkebjergkvarteret kaldes i dagligtale for 'Munken'.\"], 'topic': 'Beskrivelse af Munkebjergkvarteret i Odense.'}\n```\n\n## Prompt format\nThe model follows alpaca format.\n```\n### Instruction:\nDin opgave er at segmentere en given tekst i separate dele, så hver del giver mening og kan læses uafhængigt af de andre. Hvis det giver mening, må der kan være et overlap mellem delene. Hver del skal ideelt indeholde {WORD_COUNT} ord.\n\n### Input:\n{TEXT}\n\n### Response:\n```"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2459,"cells":{"id":{"kind":"string","value":"medspaner/xlm-roberta-large-spanish-trials-cases-neg-spec"},"author":{"kind":"string","value":"medspaner"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","xlm-roberta","token-classification","generated_from_trainer","license:cc-by-nc-4.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"xlm-roberta\",\n \"token-classification\",\n \"generated_from_trainer\",\n \"license:cc-by-nc-4.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-02T16:36:42Z","string":"2023-10-02T16:36:42Z"},"last_modified":{"kind":"string","value":"2024-10-01T06:32:31+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: cc-by-nc-4.0\nmetrics:\n- precision\n- recall\n- f1\n- accuracy\ntags:\n- generated_from_trainer\nwidget:\n- text: Pacientes sanos, sin ninguna enfermedad, que no tomen tengan ningún tratamiento\nmodel-index:\n- name: xlm-roberta-large-spanish-trials-cases-neg-spec\n results: []\n---\n\n\n\n# xlm-roberta-large-spanish-trials-cases-neg-spec\n\nThis named entity recognition model detects negation and speculation entities, and negated and speculated concepts:\n- Neg_cue: negation cue (e.g. *no*, *sin*)\n- Negated: negated entity or event (e.g. *sin **dolor***)\n- Spec_cue: speculation cue (e.g. *posiblemente*)\n- Speculated: speculated entity or event (e.g. *posiblemente **sobreviva***)\n\nThe model achieves the following results on the test set (results are averaged over 5 evaluation rounds):\n- Precision: 0.871 (±0.006)\n- Recall: 0.874 (±0.009)\n- F1: 0.873 (±0.004)\n- Accuracy: 0.984 (±0.001)\n\n\n## Model description\n\nThis model adapts the pre-trained model [xlm-roberta-large-spanish-clinical](https://huggingface.co/llange/xlm-roberta-large-spanish-clinical), presented in [Lange et al. (2022)](https://academic.oup.com/bioinformatics/article/38/12/3267/6575884). \nIt is fine-tuned to conduct medical named entity recognition on texts about in Spanish. \nThe model is fine-tuned on the [CT-EBM-ES corpus (Campillos-Llanos et al. 2021)](https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01395-z) and 100 clinical cases with Creative Commons License.\n\nIf you use this model, please, cite as follows:\n\n```\n@article{campillosetal2024,\n        title = {{Hybrid tool for semantic annotation and concept extraction of medical texts in Spanish}},\n        author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\\'o}n, Adri{\\'a}n},\n        journal = {BMC Bioinformatics},\n year={2024},\n publisher={BioMed Central}\n}\n```\n\n## Intended uses & limitations\n\n**Disclosure**: *This model is under development and needs to be improved. It should not be used for medical decision making without human assistance and supervision*\n\nThis model is intended for a generalist purpose, and may have bias and/or any other undesirable distortions.\n\nThird parties who deploy or provide systems and/or services using any of these models (or using systems based on these models) should note that it is their responsibility to mitigate the risks arising from their use. Third parties, in any event, need to comply with applicable regulations, including regulations concerning the use of artificial intelligence.\n\nThe owner or creator of the models will in no event be liable for any results arising from the use made by third parties of these models.\n\n**Descargo de responsabilidad**: *Esta herramienta se encuentra en desarrollo y no debe ser empleada para la toma de decisiones médicas*\n\nLa finalidad de este modelo es generalista, y se advierte que puede tener sesgos y/u otro tipo de distorsiones indeseables.\n\nTerceras partes que desplieguen o proporcionen sistemas y/o servicios usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) han tener presente que es su responsabilidad abordar y minimizar los riesgos derivados de su uso. Las terceras partes, en cualquier circunstancia, deben cumplir con la normativa aplicable, incluyendo la normativa que concierne al uso de la inteligencia artificial.\n\nEl propietario o creador de los modelos de ningún modo será responsable de los resultados derivados del uso que las terceras partes hagan de estos modelos.\n\n\n## Training and evaluation data\n\nThe model is fine-tuned on the [NUBEs corpus (Lima et al. 2020)](https://aclanthology.org/2020.lrec-1.708/), 100 clinical cases with Creative Commons licence and the [Clinical Trials for Evidence-Based-Medicine in Spanish (CT-EBM-SP) corpus](http://www.lllf.uam.es/ESP/nlpdata/wp2/).\nThe CT-EBM-SP corpus is a collection of 1200 texts about clinical trials studies and clinical trials announcements:\n- 500 abstracts from journals published under a Creative Commons license, e.g. available in PubMed or the Scientific Electronic Library Online (SciELO)\n- 700 clinical trials announcements published in the European Clinical Trials Register and Repositorio Español de Estudios Clínicos\n\nIf you use the CT-EBM-ES resource, please, cite as follows:\n\n```\n@article{campillosetal-midm2021,\n        title = {A clinical trials corpus annotated with UMLS© entities to enhance the access to Evidence-Based Medicine},\n        author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\\'o}n, Adri{\\'a}n and Moreno-Sandoval, Antonio},\n        journal = {BMC Medical Informatics and Decision Making},\n        volume={21},\n number={1},\n pages={1--19},\n year={2021},\n publisher={BioMed Central}\n}\n```\n\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: we used different seeds for 5 evaluation rounds, and uploaded the model with the best results\n- optimizer: Adam \n- num_epochs: average 19.6 epochs (±7.09); trained with early stopping if no improvement after 5 epochs (early stopping patience: 5)\n\n\n### Training results (test set; average and standard deviation of 5 rounds with different seeds)\n\n| Precision | Recall | F1 | Accuracy |\n|:--------------:|:--------------:|:--------------:|:--------------:|\n| 0.871 (±0.006) | 0.874 (±0.009) | 0.873 (±0.004) | 0.984 (±0.001) |\n\n\n### Framework versions\n\n- Transformers 4.17.0\n- Pytorch 1.10.2+cu113\n- Datasets 1.18.4\n- Tokenizers 0.11.6\n"},"matched_bigbio_names":{"kind":"list like","value":["CT-EBM-SP","SCIELO"],"string":"[\n \"CT-EBM-SP\",\n \"SCIELO\"\n]"}}},{"rowIdx":2460,"cells":{"id":{"kind":"string","value":"medspaner/roberta-es-clinical-trials-cases-temporal-ner"},"author":{"kind":"string","value":"medspaner"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","token-classification","generated_from_trainer","license:cc-by-nc-4.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"token-classification\",\n \"generated_from_trainer\",\n \"license:cc-by-nc-4.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-03T08:37:59Z","string":"2023-10-03T08:37:59Z"},"last_modified":{"kind":"string","value":"2024-10-01T06:25:39+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: cc-by-nc-4.0\nmetrics:\n- precision\n- recall\n- f1\n- accuracy\ntags:\n- generated_from_trainer\nwidget:\n- text: Edad ≥ 18 años (en todos los centros), o edad ≥12 y <18 años con peso igual\n o superior a 40kg\n- text: Estudio realizado en un hospital desde julio de 2010 hasta diciembre de 2011\n (18 meses)\n- text: Pacientes que hayan recibido bifosfonatos diarios, semanales o mensuales durante\n al menos 3 años.\n- text: 50 g (40 g la noche anterior y 10 g por la mañana) de L-glutamina\nmodel-index:\n- name: roberta-es-clinical-trials-cases-temporal-ner\n results: []\n---\n\n\n\n# roberta-es-clinical-trials-temporal-ner\n\nThis named entity recognition model detects temporal expressions (TIMEX) according to the [TimeML scheme](https://en.wikipedia.org/wiki/ISO-TimeML) ([Pustejovsky et al. 2005](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.85.5610&rep=rep1&type=pdf)), in addition to Age entities:\n- Age: e.g. *18 años*\n- Date: e.g. *2022*, *26 de noviembre*\n- Duration: e.g. *3 horas*\n- Frequency: e.g. *semanal*\n- Time: e.g. *noche*\n\nThe model achieves the following results on the test set (when trained with the training and development set; results are averaged over 5 evaluation rounds):\n- Precision: 0.898 (±0.008)\n- Recall: 0.899 (±0.006)\n- F1: 0.899 (±0.003)\n- Accuracy: 0.996 (±0.001)\n\n## Model description\n\nThis model adapts the pre-trained model [bsc-bio-ehr-es](https://huggingface.co/PlanTL-GOB-ES/bsc-bio-ehr-es), presented in [Pio Carriño et al. (2022)](https://aclanthology.org/2022.bionlp-1.19/). \nIt is fine-tuned to conduct temporal named entity recognition on Spanish texts about clinical trials and clinical cases. \nThe model is fine-tuned on the [CT-EBM-ES corpus (Campillos-Llanos et al. 2021)](https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01395-z) and 100 clinical cases with Creative Commons license.\n\nIf you use this model, please, cite as follows:\n\n```\n@article{campillosetal2024,\n        title = {{Hybrid tool for semantic annotation and concept extraction of medical texts in Spanish}},\n        author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\\'o}n, Adri{\\'a}n},\n        journal = {BMC Bioinformatics},\n year={2024},\n publisher={BioMed Central}\n}\n```\n\n## Intended uses & limitations\n\n**Disclosure**: *This model is under development and needs to be improved. It should not be used for medical decision making without human assistance and supervision*\n\nThis model is intended for a generalist purpose, and may have bias and/or any other undesirable distortions.\n\nThird parties who deploy or provide systems and/or services using any of these models (or using systems based on these models) should note that it is their responsibility to mitigate the risks arising from their use. Third parties, in any event, need to comply with applicable regulations, including regulations concerning the use of artificial intelligence.\n\nThe owner or creator of the models will in no event be liable for any results arising from the use made by third parties of these models.\n\n**Descargo de responsabilidad**: *Esta herramienta se encuentra en desarrollo y no debe ser empleada para la toma de decisiones médicas*\n\nLa finalidad de este modelo es generalista, y se advierte que puede tener sesgos y/u otro tipo de distorsiones indeseables.\n\nTerceras partes que desplieguen o proporcionen sistemas y/o servicios usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) han tener presente que es su responsabilidad abordar y minimizar los riesgos derivados de su uso. Las terceras partes, en cualquier circunstancia, deben cumplir con la normativa aplicable, incluyendo la normativa que concierne al uso de la inteligencia artificial.\n\nEl propietario o creador de los modelos de ningún modo será responsable de los resultados derivados del uso que las terceras partes hagan de estos modelos.\n\n\n## Training and evaluation data\n\nTo fine-tune the model we used the [Clinical Trials for Evidence-Based-Medicine in Spanish (CT-EBM-SP) corpus](http://www.lllf.uam.es/ESP/nlpdata/wp2/) and 100 clinical cases with Creative Commons license.\nThe CT-EBM-SP corpus is a collection of 1200 texts about clinical trials studies and clinical trials announcements:\n- 500 abstracts from journals published under a Creative Commons license, e.g. available in PubMed or the Scientific Electronic Library Online (SciELO)\n- 700 clinical trials announcements published in the European Clinical Trials Register and Repositorio Español de Estudios Clínicos\n\nIf you use the CT-EBM-ES resource, please, cite as follows:\n\n```\n@article{campillosetal-midm2021,\n        title = {A clinical trials corpus annotated with UMLS© entities to enhance the access to Evidence-Based Medicine},\n        author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\\'o}n, Adri{\\'a}n and Moreno-Sandoval, Antonio},\n        journal = {BMC Medical Informatics and Decision Making},\n        volume={21},\n number={1},\n pages={1--19},\n year={2021},\n publisher={BioMed Central}\n}\n```\n\n\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: we used different seeds for 5 evaluation rounds, and uploaded the model with the best results\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: average of 16.2 epochs (±1.76)\n\n\n### Training results (test set; average and standard deviation of 5 rounds with different seeds)\n\n| Precision | Recall | F1 | Accuracy |\n|:--------------:|:--------------:|:--------------:|:--------------:|\n| 0.898 (±0.008) | 0.899 (±0.006) | 0.899 (±0.003) | 0.996 (±0.001) |\n\n\n**Results per class (test set; average and standard deviation of 5 rounds with different seeds)** \n\n| Class | Precision | Recall | F1 | Support |\n|:---------:|:--------------:|:--------------:|:--------------:|:---------:|\n| Age | 0.924 (±0.013) | 0.946 (±0.009) | 0.934 (±0.006) | 372 |\n| Date | 0.924 (±0.021) | 0.898 (±0.021) | 0.910 (±0.004) | 412 |\n| Duration | 0.907 (±0.012) | 0.887 (±0.011) | 0.897 (±0.007) | 629 |\n| Frequency | 0.858 (±0.053) | 0.890 (±0.017) | 0.873 (±0.029) | 73 |\n| Time | 0.730\t(±0.034) | 0.825 (±0.029) | 0.774 (±0.012) | 113 |\n\n\n\n### Framework versions\n\n- Transformers 4.17.0\n- Pytorch 1.10.2+cu113\n- Datasets 1.18.4\n- Tokenizers 0.11.6\n"},"matched_bigbio_names":{"kind":"list like","value":["CT-EBM-SP","SCIELO"],"string":"[\n \"CT-EBM-SP\",\n \"SCIELO\"\n]"}}},{"rowIdx":2461,"cells":{"id":{"kind":"string","value":"usvsnsp/pythia-70m-ppo"},"author":{"kind":"string","value":"usvsnsp"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","gpt_neox","text-generation","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"gpt_neox\",\n \"text-generation\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-04T13:53:13Z","string":"2023-10-04T13:53:13Z"},"last_modified":{"kind":"string","value":"2023-10-04T15:45:21+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nWandb Run: https://wandb.ai/eleutherai/pythia-rlhf/runs/gy2g8jj1\n\nModel Evals:\n| Tasks |Version|Filter| Metric |Value | |Stderr|\n|--------------|-------|------|----------|-----:|---|-----:|\n|arc_challenge |Yaml |none |acc |0.2253|± |0.0122|\n| | |none |acc_norm |0.2278|± |0.0123|\n|arc_easy |Yaml |none |acc |0.2551|± |0.0089|\n| | |none |acc_norm |0.2567|± |0.0090|\n|lambada_openai|Yaml |none |perplexity| NaN|± | NaN|\n| | |none |acc |0.0016|± |0.0005|\n|logiqa |Yaml |none |acc |0.2028|± |0.0158|\n| | |none |acc_norm |0.2028|± |0.0158|\n|piqa |Yaml |none |acc |0.4946|± |0.0117|\n| | |none |acc_norm |0.4924|± |0.0117|\n|sciq |Yaml |none |acc |0.0140|± |0.0037|\n| | |none |acc_norm |0.0140|± |0.0037|\n|winogrande |Yaml |none |acc |0.5036|± |0.0141|\n|wsc |Yaml |none |acc |0.6346|± |0.0474|"},"matched_bigbio_names":{"kind":"list like","value":["SCIQ"],"string":"[\n \"SCIQ\"\n]"}}},{"rowIdx":2462,"cells":{"id":{"kind":"string","value":"lomahony/pythia-1b-helpful-sft"},"author":{"kind":"string","value":"lomahony"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","safetensors","gpt_neox","text-generation","causal-lm","pythia","en","dataset:Anthropic/hh-rlhf","arxiv:2101.00027","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"safetensors\",\n \"gpt_neox\",\n \"text-generation\",\n \"causal-lm\",\n \"pythia\",\n \"en\",\n \"dataset:Anthropic/hh-rlhf\",\n \"arxiv:2101.00027\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-11-08T15:49:11Z","string":"2023-11-08T15:49:11Z"},"last_modified":{"kind":"string","value":"2024-11-26T02:08:22+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- Anthropic/hh-rlhf\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- pytorch\n- causal-lm\n- pythia\n---\n\n[Pythia-1b](https://huggingface.co/EleutherAI/pythia-1b) supervised finetuned using TRLx library with the helpful subset of [Anthropic-hh-rlhf dataset](https://huggingface.co/datasets/Anthropic/hh-rlhf) for 1 epoch. \n\nCheckpoints are also uploaded. \n\nFully reproducible finetuning code is available on [GitHub](https://github.com/lauraaisling/trlx-pythia/tree/main)\n\n[wandb log](https://wandb.ai/lauraomahony999/sft-pythia/runs/azscanfe)\n\nSee [Pythia-1b](https://huggingface.co/EleutherAI/pythia-1b) for model details [(paper)](https://arxiv.org/abs/2101.00027). \n\nSee further details of these models in the paper [Attributing Mode Collapse in the Fine-Tuning of Large Language Models](https://openreview.net/pdf?id=3pDMYjpOxk).\n\nYou can cite these models if they are helpful as follows: \n\n
\n@inproceedings{o2024attributing,\n  title={Attributing Mode Collapse in the Fine-Tuning of Large Language Models},\n  author={O’Mahony, Laura and Grinsztajn, Leo and Schoelkopf, Hailey and Biderman, Stella},\n  booktitle={ICLR 2024, Mathematical and Empirical Understanding of Foundation Models (ME-FoMo) workshop},\n  year={2024}\n}\n
\n\nhf (pretrained=lomahony/pythia-1b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 0, batch_size: 16\n| Tasks |Version|Filter|n-shot| Metric | Value | |Stderr|\n|--------------|------:|------|-----:|---------------|------:|---|------|\n|arc_challenge | 1|none | 0|acc | 0.2543|± |0.0127|\n| | |none | 0|acc_norm | 0.2739|± |0.0130|\n|arc_easy | 1|none | 0|acc | 0.5724|± |0.0102|\n| | |none | 0|acc_norm | 0.4941|± |0.0103|\n|boolq | 2|none | 0|acc | 0.6199|± |0.0085|\n|hellaswag | 1|none | 0|acc | 0.3819|± |0.0048|\n| | |none | 0|acc_norm | 0.4736|± |0.0050|\n|lambada_openai| 1|none | 0|perplexity | 7.1374|± |0.2014|\n| | |none | 0|acc | 0.5626|± |0.0069|\n|openbookqa | 1|none | 0|acc | 0.2040|± |0.0180|\n| | |none | 0|acc_norm | 0.3140|± |0.0208|\n|piqa | 1|none | 0|acc | 0.7138|± |0.0105|\n| | |none | 0|acc_norm | 0.6997|± |0.0107|\n|sciq | 1|none | 0|acc | 0.8400|± |0.0116|\n| | |none | 0|acc_norm | 0.7620|± |0.0135|\n|wikitext | 2|none | 0|word_perplexity|16.9719|± |N/A |\n| | |none | 0|byte_perplexity| 1.6981|± |N/A |\n| | |none | 0|bits_per_byte | 0.7639|± |N/A |\n|winogrande | 1|none | 0|acc | 0.5343|± |0.0140|\n\nhf (pretrained=lomahony/pythia-1b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 5, batch_size: 16\n| Tasks |Version|Filter|n-shot| Metric | Value | |Stderr|\n|--------------|------:|------|-----:|---------------|------:|---|------|\n|arc_challenge | 1|none | 5|acc | 0.2628|± |0.0129|\n| | |none | 5|acc_norm | 0.2918|± |0.0133|\n|arc_easy | 1|none | 5|acc | 0.6040|± |0.0100|\n| | |none | 5|acc_norm | 0.5816|± |0.0101|\n|boolq | 2|none | 5|acc | 0.5963|± |0.0086|\n|hellaswag | 1|none | 5|acc | 0.3780|± |0.0048|\n| | |none | 5|acc_norm | 0.4719|± |0.0050|\n|lambada_openai| 1|none | 5|perplexity |10.2584|± |0.2936|\n| | |none | 5|acc | 0.4832|± |0.0070|\n|openbookqa | 1|none | 5|acc | 0.1980|± |0.0178|\n| | |none | 5|acc_norm | 0.3220|± |0.0209|\n|piqa | 1|none | 5|acc | 0.7057|± |0.0106|\n| | |none | 5|acc_norm | 0.7095|± |0.0106|\n|sciq | 1|none | 5|acc | 0.8980|± |0.0096|\n| | |none | 5|acc_norm | 0.9000|± |0.0095|\n|wikitext | 2|none | 5|word_perplexity|16.9719|± |N/A |\n| | |none | 5|byte_perplexity| 1.6981|± |N/A |\n| | |none | 5|bits_per_byte | 0.7639|± |N/A |\n|winogrande | 1|none | 5|acc | 0.5446|± |0.0140|\n"},"matched_bigbio_names":{"kind":"list like","value":["SCIQ"],"string":"[\n \"SCIQ\"\n]"}}},{"rowIdx":2463,"cells":{"id":{"kind":"string","value":"RalFinger/origami-style-sdxl-lora"},"author":{"kind":"string","value":"RalFinger"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion","lora","template:sd-lora","paper","art","style","folded","origami","handcraft","paperart","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:other","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion\",\n \"lora\",\n \"template:sd-lora\",\n \"paper\",\n \"art\",\n \"style\",\n \"folded\",\n \"origami\",\n \"handcraft\",\n \"paperart\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:other\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-11-22T13:31:11Z","string":"2023-11-22T13:31:11Z"},"last_modified":{"kind":"string","value":"2023-11-22T13:31:13+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlicense: other\nlicense_name: bespoke-lora-trained-license\nlicense_link: https://multimodal.art/civitai-licenses?allowNoCredit=True&allowCommercialUse=Sell&allowDerivatives=True&allowDifferentLicense=True\ntags:\n- text-to-image\n- stable-diffusion\n- lora\n- diffusers\n- template:sd-lora\n- paper\n- art\n- style\n- folded\n- origami\n- handcraft\n- paperart\ninstance_prompt: ral-orgmi\nwidget:\n- text: 'ral-orgmi, a origami paper samurai standing in front of a mountain '\n output:\n url: 3774222.jpeg\n- text: 'ral-orgmi, a origami paper bird is sitting on a tree branch '\n output:\n url: 3774206.jpeg\n- text: 'ral-orgmi, a origami paper policeman in a uniform is standing in the middle\n of a busy street '\n output:\n url: 3774211.jpeg\n- text: 'ral-orgmi, origami paper sculpture of a ape in a forest '\n output:\n url: 3774207.jpeg\n- text: 'ral-orgmi, a origami paper car on a city street '\n output:\n url: 3774212.jpeg\n- text: 'ral-orgmi, a bear made out of origami paper in a forest '\n output:\n url: 3774205.jpeg\n- text: 'ral-orgmi, a origami paper man in a hat and coat, in front of a flower field\n and a village '\n output:\n url: 3774221.jpeg\n- text: 'ral-orgmi, a origami paper fish that is floating in the water '\n output:\n url: 3774219.jpeg\n- text: 'ral-orgmi, a dog made out of origami paper sitting in a garden '\n output:\n url: 3774215.jpeg\n- text: 'ral-orgmi, a clown made out of origami paper in a circus '\n output:\n url: 3774213.jpeg\n---\n\n# Origami Style [SDXL LoRA] \n\n\n\n\n\n([CivitAI](https://civitai.com/models/206575))\n\n## Model description\n\n

SDXL:
Trigger word: ral-orgmi

☕ Buy me a coffee: https://ko-fi.com/ralfingerai

\n\n## Trigger words\nYou should use `ral-orgmi` to trigger the image generation.\n \n\n## Download model\n\nWeights for this model are available in Safetensors format.\n\n[Download](/RalFinger/origami-style-sdxl-lora/tree/main) them in the Files & versions tab.\n\n## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)\n\n```py\nfrom diffusers import AutoPipelineForText2Image\nimport torch\n\npipeline = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16).to('cuda')\npipeline.load_lora_weights('RalFinger/origami-style-sdxl-lora', weight_name='ral-orgmi-sdxl.safetensors')\nimage = pipeline('ral-orgmi, a clown made out of origami paper in a circus ').images[0]\n```\n\nFor more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)\n\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2464,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.dreamscape"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-10T05:09:44Z","string":"2023-12-10T05:09:44Z"},"last_modified":{"kind":"string","value":"2024-02-06T00:27:24+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/dreamscape_17_3.0.png\nwidget:\n- text: dreamscape\n output:\n url: images/dreamscape_17_3.0.png\n- text: dreamscape\n output:\n url: images/dreamscape_19_3.0.png\n- text: dreamscape\n output:\n url: images/dreamscape_20_3.0.png\n- text: dreamscape\n output:\n url: images/dreamscape_21_3.0.png\n- text: dreamscape\n output:\n url: images/dreamscape_22_3.0.png\ninference: false\ninstance_prompt: dreamscape\n---\n# ntcai.xyz slider - dreamscape (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\nSee more at [https://sliders.ntcai.xyz/sliders/app/loras/ed83e177-aabb-4fba-bfa1-023a5beccbed](https://sliders.ntcai.xyz/sliders/app/loras/ed83e177-aabb-4fba-bfa1-023a5beccbed)\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\ndreamscape\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.dreamscape', weight_name='dreamscape.safetensors', adapter_name=\"dreamscape\")\n\n# Activate the LoRA\npipe.set_adapters([\"dreamscape\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, dreamscape\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14600+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful NTC Slider Factory LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities.\n\nYour support on Patreon will allow us to continue developing new models and tools.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2465,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.asleep"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-16T16:31:21Z","string":"2023-12-16T16:31:21Z"},"last_modified":{"kind":"string","value":"2024-02-06T00:33:57+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/asleep_17_3.0.png\nwidget:\n- text: asleep\n output:\n url: images/asleep_17_3.0.png\n- text: asleep\n output:\n url: images/asleep_19_3.0.png\n- text: asleep\n output:\n url: images/asleep_20_3.0.png\n- text: asleep\n output:\n url: images/asleep_21_3.0.png\n- text: asleep\n output:\n url: images/asleep_22_3.0.png\ninference: false\ninstance_prompt: asleep\n---\n# ntcai.xyz slider - asleep (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\nSee more at [https://sliders.ntcai.xyz/sliders/app/loras/f0b67511-4a49-4ed0-81d7-3e1de12b0d16](https://sliders.ntcai.xyz/sliders/app/loras/f0b67511-4a49-4ed0-81d7-3e1de12b0d16)\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\nasleep\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.asleep', weight_name='asleep.safetensors', adapter_name=\"asleep\")\n\n# Activate the LoRA\npipe.set_adapters([\"asleep\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, asleep\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14602+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful NTC Slider Factory LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities.\n\nYour support on Patreon will allow us to continue developing new models and tools.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2466,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.maniacal-laughter"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-19T13:36:02Z","string":"2023-12-19T13:36:02Z"},"last_modified":{"kind":"string","value":"2023-12-19T13:36:05+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/maniacal laughter.../maniacal laughter_17_3.0.png\nwidget:\n- text: maniacal laughter\n output:\n url: images/maniacal laughter_17_3.0.png\n- text: maniacal laughter\n output:\n url: images/maniacal laughter_19_3.0.png\n- text: maniacal laughter\n output:\n url: images/maniacal laughter_20_3.0.png\n- text: maniacal laughter\n output:\n url: images/maniacal laughter_21_3.0.png\n- text: maniacal laughter\n output:\n url: images/maniacal laughter_22_3.0.png\ninference: false\ninstance_prompt: maniacal laughter\n---\n# ntcai.xyz slider - maniacal laughter (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\nmaniacal laughter\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.maniacal-laughter', weight_name='maniacal laughter.safetensors', adapter_name=\"maniacal laughter\")\n\n# Activate the LoRA\npipe.set_adapters([\"maniacal laughter\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, maniacal laughter\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 480+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2467,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.snes-screenshot"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-24T07:44:47Z","string":"2023-12-24T07:44:47Z"},"last_modified":{"kind":"string","value":"2023-12-24T07:44:50+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/snes screenshot...realistic/snes screenshot_17_3.0.png\nwidget:\n- text: snes screenshot\n output:\n url: images/snes screenshot_17_3.0.png\n- text: snes screenshot\n output:\n url: images/snes screenshot_19_3.0.png\n- text: snes screenshot\n output:\n url: images/snes screenshot_20_3.0.png\n- text: snes screenshot\n output:\n url: images/snes screenshot_21_3.0.png\n- text: snes screenshot\n output:\n url: images/snes screenshot_22_3.0.png\ninference: false\ninstance_prompt: snes screenshot\n---\n# ntcai.xyz slider - snes screenshot (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\nsnes screenshot\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.snes-screenshot', weight_name='snes screenshot.safetensors', adapter_name=\"snes screenshot\")\n\n# Activate the LoRA\npipe.set_adapters([\"snes screenshot\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, snes screenshot\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 590+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2468,"cells":{"id":{"kind":"string","value":"retrainai/instructor-xl"},"author":{"kind":"string","value":"retrainai"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","pytorch","t5","text-embedding","embeddings","information-retrieval","beir","text-classification","language-model","text-clustering","text-semantic-similarity","text-evaluation","prompt-retrieval","text-reranking","feature-extraction","sentence-similarity","transformers","English","Sentence Similarity","natural_questions","ms_marco","fever","hotpot_qa","mteb","en","arxiv:2212.09741","license:apache-2.0","model-index","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"pytorch\",\n \"t5\",\n \"text-embedding\",\n \"embeddings\",\n \"information-retrieval\",\n \"beir\",\n \"text-classification\",\n \"language-model\",\n \"text-clustering\",\n \"text-semantic-similarity\",\n \"text-evaluation\",\n \"prompt-retrieval\",\n \"text-reranking\",\n \"feature-extraction\",\n \"sentence-similarity\",\n \"transformers\",\n \"English\",\n \"Sentence Similarity\",\n \"natural_questions\",\n \"ms_marco\",\n \"fever\",\n \"hotpot_qa\",\n \"mteb\",\n \"en\",\n \"arxiv:2212.09741\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-28T09:33:09Z","string":"2023-12-28T09:33:09Z"},"last_modified":{"kind":"string","value":"2023-12-28T14:53:21+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage: en\nlicense: apache-2.0\npipeline_tag: sentence-similarity\ntags:\n- text-embedding\n- embeddings\n- information-retrieval\n- beir\n- text-classification\n- language-model\n- text-clustering\n- text-semantic-similarity\n- text-evaluation\n- prompt-retrieval\n- text-reranking\n- sentence-transformers\n- feature-extraction\n- sentence-similarity\n- transformers\n- t5\n- English\n- Sentence Similarity\n- natural_questions\n- ms_marco\n- fever\n- hotpot_qa\n- mteb\ninference: false\nmodel-index:\n- name: final_xl_results\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 85.08955223880596\n - type: ap\n value: 52.66066378722476\n - type: f1\n value: 79.63340218960269\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 86.542\n - type: ap\n value: 81.92695193008987\n - type: f1\n value: 86.51466132573681\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 42.964\n - type: f1\n value: 41.43146249774862\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 29.872\n - type: map_at_10\n value: 46.342\n - type: map_at_100\n value: 47.152\n - type: map_at_1000\n value: 47.154\n - type: map_at_3\n value: 41.216\n - type: map_at_5\n value: 44.035999999999994\n - type: mrr_at_1\n value: 30.939\n - type: mrr_at_10\n value: 46.756\n - type: mrr_at_100\n value: 47.573\n - type: mrr_at_1000\n value: 47.575\n - type: mrr_at_3\n value: 41.548\n - type: mrr_at_5\n value: 44.425\n - type: ndcg_at_1\n value: 29.872\n - type: ndcg_at_10\n value: 55.65\n - type: ndcg_at_100\n value: 58.88099999999999\n - type: ndcg_at_1000\n value: 58.951\n - type: ndcg_at_3\n value: 45.0\n - type: ndcg_at_5\n value: 50.09\n - type: precision_at_1\n value: 29.872\n - type: precision_at_10\n value: 8.549\n - type: precision_at_100\n value: 0.991\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 18.658\n - type: precision_at_5\n value: 13.669999999999998\n - type: recall_at_1\n value: 29.872\n - type: recall_at_10\n value: 85.491\n - type: recall_at_100\n value: 99.075\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 55.974000000000004\n - type: recall_at_5\n value: 68.35\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 42.452729850641276\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 32.21141846480423\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 65.34710928952622\n - type: mrr\n value: 77.61124301983028\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_spearman\n value: 84.15312230525639\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 82.66233766233766\n - type: f1\n value: 82.04175284777669\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 37.36697339826455\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 30.551241447593092\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 36.797000000000004\n - type: map_at_10\n value: 48.46\n - type: map_at_100\n value: 49.968\n - type: map_at_1000\n value: 50.080000000000005\n - type: map_at_3\n value: 44.71\n - type: map_at_5\n value: 46.592\n - type: mrr_at_1\n value: 45.494\n - type: mrr_at_10\n value: 54.747\n - type: mrr_at_100\n value: 55.43599999999999\n - type: mrr_at_1000\n value: 55.464999999999996\n - type: mrr_at_3\n value: 52.361000000000004\n - type: mrr_at_5\n value: 53.727000000000004\n - type: ndcg_at_1\n value: 45.494\n - type: ndcg_at_10\n value: 54.989\n - type: ndcg_at_100\n value: 60.096000000000004\n - type: ndcg_at_1000\n value: 61.58\n - type: ndcg_at_3\n value: 49.977\n - type: ndcg_at_5\n value: 51.964999999999996\n - type: precision_at_1\n value: 45.494\n - type: precision_at_10\n value: 10.558\n - type: precision_at_100\n value: 1.6049999999999998\n - type: precision_at_1000\n value: 0.203\n - type: precision_at_3\n value: 23.796\n - type: precision_at_5\n value: 16.881\n - type: recall_at_1\n value: 36.797000000000004\n - type: recall_at_10\n value: 66.83\n - type: recall_at_100\n value: 88.34100000000001\n - type: recall_at_1000\n value: 97.202\n - type: recall_at_3\n value: 51.961999999999996\n - type: recall_at_5\n value: 57.940000000000005\n - type: map_at_1\n value: 32.597\n - type: map_at_10\n value: 43.424\n - type: map_at_100\n value: 44.78\n - type: map_at_1000\n value: 44.913\n - type: map_at_3\n value: 40.315\n - type: map_at_5\n value: 41.987\n - type: mrr_at_1\n value: 40.382\n - type: mrr_at_10\n value: 49.219\n - type: mrr_at_100\n value: 49.895\n - type: mrr_at_1000\n value: 49.936\n - type: mrr_at_3\n value: 46.996\n - type: mrr_at_5\n value: 48.231\n - type: ndcg_at_1\n value: 40.382\n - type: ndcg_at_10\n value: 49.318\n - type: ndcg_at_100\n value: 53.839999999999996\n - type: ndcg_at_1000\n value: 55.82899999999999\n - type: ndcg_at_3\n value: 44.914\n - type: ndcg_at_5\n value: 46.798\n - type: precision_at_1\n value: 40.382\n - type: precision_at_10\n value: 9.274000000000001\n - type: precision_at_100\n value: 1.497\n - type: precision_at_1000\n value: 0.198\n - type: precision_at_3\n value: 21.592\n - type: precision_at_5\n value: 15.159\n - type: recall_at_1\n value: 32.597\n - type: recall_at_10\n value: 59.882000000000005\n - type: recall_at_100\n value: 78.446\n - type: recall_at_1000\n value: 90.88000000000001\n - type: recall_at_3\n value: 46.9\n - type: recall_at_5\n value: 52.222\n - type: map_at_1\n value: 43.8\n - type: map_at_10\n value: 57.293000000000006\n - type: map_at_100\n value: 58.321\n - type: map_at_1000\n value: 58.361\n - type: map_at_3\n value: 53.839999999999996\n - type: map_at_5\n value: 55.838\n - type: mrr_at_1\n value: 49.592000000000006\n - type: mrr_at_10\n value: 60.643\n - type: mrr_at_100\n value: 61.23499999999999\n - type: mrr_at_1000\n value: 61.251999999999995\n - type: mrr_at_3\n value: 58.265\n - type: mrr_at_5\n value: 59.717\n - type: ndcg_at_1\n value: 49.592000000000006\n - type: ndcg_at_10\n value: 63.364\n - type: ndcg_at_100\n value: 67.167\n - type: ndcg_at_1000\n value: 67.867\n - type: ndcg_at_3\n value: 57.912\n - type: ndcg_at_5\n value: 60.697\n - type: precision_at_1\n value: 49.592000000000006\n - type: precision_at_10\n value: 10.088\n - type: precision_at_100\n value: 1.2930000000000001\n - type: precision_at_1000\n value: 0.13899999999999998\n - type: precision_at_3\n value: 25.789\n - type: precision_at_5\n value: 17.541999999999998\n - type: recall_at_1\n value: 43.8\n - type: recall_at_10\n value: 77.635\n - type: recall_at_100\n value: 93.748\n - type: recall_at_1000\n value: 98.468\n - type: recall_at_3\n value: 63.223\n - type: recall_at_5\n value: 70.122\n - type: map_at_1\n value: 27.721\n - type: map_at_10\n value: 35.626999999999995\n - type: map_at_100\n value: 36.719\n - type: map_at_1000\n value: 36.8\n - type: map_at_3\n value: 32.781\n - type: map_at_5\n value: 34.333999999999996\n - type: mrr_at_1\n value: 29.604999999999997\n - type: mrr_at_10\n value: 37.564\n - type: mrr_at_100\n value: 38.505\n - type: mrr_at_1000\n value: 38.565\n - type: mrr_at_3\n value: 34.727000000000004\n - type: mrr_at_5\n value: 36.207\n - type: ndcg_at_1\n value: 29.604999999999997\n - type: ndcg_at_10\n value: 40.575\n - type: ndcg_at_100\n value: 45.613\n - type: ndcg_at_1000\n value: 47.676\n - type: ndcg_at_3\n value: 34.811\n - type: ndcg_at_5\n value: 37.491\n - type: precision_at_1\n value: 29.604999999999997\n - type: precision_at_10\n value: 6.1690000000000005\n - type: precision_at_100\n value: 0.906\n - type: precision_at_1000\n value: 0.11199999999999999\n - type: precision_at_3\n value: 14.237\n - type: precision_at_5\n value: 10.056\n - type: recall_at_1\n value: 27.721\n - type: recall_at_10\n value: 54.041\n - type: recall_at_100\n value: 76.62299999999999\n - type: recall_at_1000\n value: 92.134\n - type: recall_at_3\n value: 38.582\n - type: recall_at_5\n value: 44.989000000000004\n - type: map_at_1\n value: 16.553\n - type: map_at_10\n value: 25.384\n - type: map_at_100\n value: 26.655\n - type: map_at_1000\n value: 26.778000000000002\n - type: map_at_3\n value: 22.733\n - type: map_at_5\n value: 24.119\n - type: mrr_at_1\n value: 20.149\n - type: mrr_at_10\n value: 29.705\n - type: mrr_at_100\n value: 30.672\n - type: mrr_at_1000\n value: 30.737\n - type: mrr_at_3\n value: 27.032\n - type: mrr_at_5\n value: 28.369\n - type: ndcg_at_1\n value: 20.149\n - type: ndcg_at_10\n value: 30.843999999999998\n - type: ndcg_at_100\n value: 36.716\n - type: ndcg_at_1000\n value: 39.495000000000005\n - type: ndcg_at_3\n value: 25.918999999999997\n - type: ndcg_at_5\n value: 27.992\n - type: precision_at_1\n value: 20.149\n - type: precision_at_10\n value: 5.858\n - type: precision_at_100\n value: 1.009\n - type: precision_at_1000\n value: 0.13799999999999998\n - type: precision_at_3\n value: 12.645000000000001\n - type: precision_at_5\n value: 9.179\n - type: recall_at_1\n value: 16.553\n - type: recall_at_10\n value: 43.136\n - type: recall_at_100\n value: 68.562\n - type: recall_at_1000\n value: 88.208\n - type: recall_at_3\n value: 29.493000000000002\n - type: recall_at_5\n value: 34.751\n - type: map_at_1\n value: 28.000999999999998\n - type: map_at_10\n value: 39.004\n - type: map_at_100\n value: 40.461999999999996\n - type: map_at_1000\n value: 40.566\n - type: map_at_3\n value: 35.805\n - type: map_at_5\n value: 37.672\n - type: mrr_at_1\n value: 33.782000000000004\n - type: mrr_at_10\n value: 44.702\n - type: mrr_at_100\n value: 45.528\n - type: mrr_at_1000\n value: 45.576\n - type: mrr_at_3\n value: 42.14\n - type: mrr_at_5\n value: 43.651\n - type: ndcg_at_1\n value: 33.782000000000004\n - type: ndcg_at_10\n value: 45.275999999999996\n - type: ndcg_at_100\n value: 50.888\n - type: ndcg_at_1000\n value: 52.879\n - type: ndcg_at_3\n value: 40.191\n - type: ndcg_at_5\n value: 42.731\n - type: precision_at_1\n value: 33.782000000000004\n - type: precision_at_10\n value: 8.200000000000001\n - type: precision_at_100\n value: 1.287\n - type: precision_at_1000\n value: 0.16199999999999998\n - type: precision_at_3\n value: 19.185\n - type: precision_at_5\n value: 13.667000000000002\n - type: recall_at_1\n value: 28.000999999999998\n - type: recall_at_10\n value: 58.131\n - type: recall_at_100\n value: 80.869\n - type: recall_at_1000\n value: 93.931\n - type: recall_at_3\n value: 44.161\n - type: recall_at_5\n value: 50.592000000000006\n - type: map_at_1\n value: 28.047\n - type: map_at_10\n value: 38.596000000000004\n - type: map_at_100\n value: 40.116\n - type: map_at_1000\n value: 40.232\n - type: map_at_3\n value: 35.205\n - type: map_at_5\n value: 37.076\n - type: mrr_at_1\n value: 34.932\n - type: mrr_at_10\n value: 44.496\n - type: mrr_at_100\n value: 45.47\n - type: mrr_at_1000\n value: 45.519999999999996\n - type: mrr_at_3\n value: 41.743\n - type: mrr_at_5\n value: 43.352000000000004\n - type: ndcg_at_1\n value: 34.932\n - type: ndcg_at_10\n value: 44.901\n - type: ndcg_at_100\n value: 50.788999999999994\n - type: ndcg_at_1000\n value: 52.867\n - type: ndcg_at_3\n value: 39.449\n - type: ndcg_at_5\n value: 41.929\n - type: precision_at_1\n value: 34.932\n - type: precision_at_10\n value: 8.311\n - type: precision_at_100\n value: 1.3050000000000002\n - type: precision_at_1000\n value: 0.166\n - type: precision_at_3\n value: 18.836\n - type: precision_at_5\n value: 13.447000000000001\n - type: recall_at_1\n value: 28.047\n - type: recall_at_10\n value: 57.717\n - type: recall_at_100\n value: 82.182\n - type: recall_at_1000\n value: 95.82000000000001\n - type: recall_at_3\n value: 42.448\n - type: recall_at_5\n value: 49.071\n - type: map_at_1\n value: 27.861250000000005\n - type: map_at_10\n value: 37.529583333333335\n - type: map_at_100\n value: 38.7915\n - type: map_at_1000\n value: 38.90558333333335\n - type: map_at_3\n value: 34.57333333333333\n - type: map_at_5\n value: 36.187166666666656\n - type: mrr_at_1\n value: 32.88291666666666\n - type: mrr_at_10\n value: 41.79750000000001\n - type: mrr_at_100\n value: 42.63183333333333\n - type: mrr_at_1000\n value: 42.68483333333333\n - type: mrr_at_3\n value: 39.313750000000006\n - type: mrr_at_5\n value: 40.70483333333333\n - type: ndcg_at_1\n value: 32.88291666666666\n - type: ndcg_at_10\n value: 43.09408333333333\n - type: ndcg_at_100\n value: 48.22158333333333\n - type: ndcg_at_1000\n value: 50.358000000000004\n - type: ndcg_at_3\n value: 38.129583333333336\n - type: ndcg_at_5\n value: 40.39266666666666\n - type: precision_at_1\n value: 32.88291666666666\n - type: precision_at_10\n value: 7.5584999999999996\n - type: precision_at_100\n value: 1.1903333333333332\n - type: precision_at_1000\n value: 0.15658333333333332\n - type: precision_at_3\n value: 17.495916666666666\n - type: precision_at_5\n value: 12.373833333333332\n - type: recall_at_1\n value: 27.861250000000005\n - type: recall_at_10\n value: 55.215916666666665\n - type: recall_at_100\n value: 77.392\n - type: recall_at_1000\n value: 92.04908333333334\n - type: recall_at_3\n value: 41.37475\n - type: recall_at_5\n value: 47.22908333333333\n - type: map_at_1\n value: 25.064999999999998\n - type: map_at_10\n value: 31.635999999999996\n - type: map_at_100\n value: 32.596000000000004\n - type: map_at_1000\n value: 32.695\n - type: map_at_3\n value: 29.612\n - type: map_at_5\n value: 30.768\n - type: mrr_at_1\n value: 28.528\n - type: mrr_at_10\n value: 34.717\n - type: mrr_at_100\n value: 35.558\n - type: mrr_at_1000\n value: 35.626000000000005\n - type: mrr_at_3\n value: 32.745000000000005\n - type: mrr_at_5\n value: 33.819\n - type: ndcg_at_1\n value: 28.528\n - type: ndcg_at_10\n value: 35.647\n - type: ndcg_at_100\n value: 40.207\n - type: ndcg_at_1000\n value: 42.695\n - type: ndcg_at_3\n value: 31.878\n - type: ndcg_at_5\n value: 33.634\n - type: precision_at_1\n value: 28.528\n - type: precision_at_10\n value: 5.46\n - type: precision_at_100\n value: 0.84\n - type: precision_at_1000\n value: 0.11399999999999999\n - type: precision_at_3\n value: 13.547999999999998\n - type: precision_at_5\n value: 9.325\n - type: recall_at_1\n value: 25.064999999999998\n - type: recall_at_10\n value: 45.096000000000004\n - type: recall_at_100\n value: 65.658\n - type: recall_at_1000\n value: 84.128\n - type: recall_at_3\n value: 34.337\n - type: recall_at_5\n value: 38.849000000000004\n - type: map_at_1\n value: 17.276\n - type: map_at_10\n value: 24.535\n - type: map_at_100\n value: 25.655\n - type: map_at_1000\n value: 25.782\n - type: map_at_3\n value: 22.228\n - type: map_at_5\n value: 23.612\n - type: mrr_at_1\n value: 21.266\n - type: mrr_at_10\n value: 28.474\n - type: mrr_at_100\n value: 29.398000000000003\n - type: mrr_at_1000\n value: 29.482000000000003\n - type: mrr_at_3\n value: 26.245\n - type: mrr_at_5\n value: 27.624\n - type: ndcg_at_1\n value: 21.266\n - type: ndcg_at_10\n value: 29.087000000000003\n - type: ndcg_at_100\n value: 34.374\n - type: ndcg_at_1000\n value: 37.433\n - type: ndcg_at_3\n value: 25.040000000000003\n - type: ndcg_at_5\n value: 27.116\n - type: precision_at_1\n value: 21.266\n - type: precision_at_10\n value: 5.258\n - type: precision_at_100\n value: 0.9299999999999999\n - type: precision_at_1000\n value: 0.13699999999999998\n - type: precision_at_3\n value: 11.849\n - type: precision_at_5\n value: 8.699\n - type: recall_at_1\n value: 17.276\n - type: recall_at_10\n value: 38.928000000000004\n - type: recall_at_100\n value: 62.529\n - type: recall_at_1000\n value: 84.44800000000001\n - type: recall_at_3\n value: 27.554000000000002\n - type: recall_at_5\n value: 32.915\n - type: map_at_1\n value: 27.297\n - type: map_at_10\n value: 36.957\n - type: map_at_100\n value: 38.252\n - type: map_at_1000\n value: 38.356\n - type: map_at_3\n value: 34.121\n - type: map_at_5\n value: 35.782000000000004\n - type: mrr_at_1\n value: 32.275999999999996\n - type: mrr_at_10\n value: 41.198\n - type: mrr_at_100\n value: 42.131\n - type: mrr_at_1000\n value: 42.186\n - type: mrr_at_3\n value: 38.557\n - type: mrr_at_5\n value: 40.12\n - type: ndcg_at_1\n value: 32.275999999999996\n - type: ndcg_at_10\n value: 42.516\n - type: ndcg_at_100\n value: 48.15\n - type: ndcg_at_1000\n value: 50.344\n - type: ndcg_at_3\n value: 37.423\n - type: ndcg_at_5\n value: 39.919\n - type: precision_at_1\n value: 32.275999999999996\n - type: precision_at_10\n value: 7.155\n - type: precision_at_100\n value: 1.123\n - type: precision_at_1000\n value: 0.14200000000000002\n - type: precision_at_3\n value: 17.163999999999998\n - type: precision_at_5\n value: 12.127\n - type: recall_at_1\n value: 27.297\n - type: recall_at_10\n value: 55.238\n - type: recall_at_100\n value: 79.2\n - type: recall_at_1000\n value: 94.258\n - type: recall_at_3\n value: 41.327000000000005\n - type: recall_at_5\n value: 47.588\n - type: map_at_1\n value: 29.142000000000003\n - type: map_at_10\n value: 38.769\n - type: map_at_100\n value: 40.292\n - type: map_at_1000\n value: 40.510000000000005\n - type: map_at_3\n value: 35.39\n - type: map_at_5\n value: 37.009\n - type: mrr_at_1\n value: 34.19\n - type: mrr_at_10\n value: 43.418\n - type: mrr_at_100\n value: 44.132\n - type: mrr_at_1000\n value: 44.175\n - type: mrr_at_3\n value: 40.547\n - type: mrr_at_5\n value: 42.088\n - type: ndcg_at_1\n value: 34.19\n - type: ndcg_at_10\n value: 45.14\n - type: ndcg_at_100\n value: 50.364\n - type: ndcg_at_1000\n value: 52.481\n - type: ndcg_at_3\n value: 39.466\n - type: ndcg_at_5\n value: 41.772\n - type: precision_at_1\n value: 34.19\n - type: precision_at_10\n value: 8.715\n - type: precision_at_100\n value: 1.6150000000000002\n - type: precision_at_1000\n value: 0.247\n - type: precision_at_3\n value: 18.248\n - type: precision_at_5\n value: 13.161999999999999\n - type: recall_at_1\n value: 29.142000000000003\n - type: recall_at_10\n value: 57.577999999999996\n - type: recall_at_100\n value: 81.428\n - type: recall_at_1000\n value: 94.017\n - type: recall_at_3\n value: 41.402\n - type: recall_at_5\n value: 47.695\n - type: map_at_1\n value: 22.039\n - type: map_at_10\n value: 30.669999999999998\n - type: map_at_100\n value: 31.682\n - type: map_at_1000\n value: 31.794\n - type: map_at_3\n value: 28.139999999999997\n - type: map_at_5\n value: 29.457\n - type: mrr_at_1\n value: 24.399\n - type: mrr_at_10\n value: 32.687\n - type: mrr_at_100\n value: 33.622\n - type: mrr_at_1000\n value: 33.698\n - type: mrr_at_3\n value: 30.407\n - type: mrr_at_5\n value: 31.552999999999997\n - type: ndcg_at_1\n value: 24.399\n - type: ndcg_at_10\n value: 35.472\n - type: ndcg_at_100\n value: 40.455000000000005\n - type: ndcg_at_1000\n value: 43.15\n - type: ndcg_at_3\n value: 30.575000000000003\n - type: ndcg_at_5\n value: 32.668\n - type: precision_at_1\n value: 24.399\n - type: precision_at_10\n value: 5.656\n - type: precision_at_100\n value: 0.874\n - type: precision_at_1000\n value: 0.121\n - type: precision_at_3\n value: 13.062000000000001\n - type: precision_at_5\n value: 9.242\n - type: recall_at_1\n value: 22.039\n - type: recall_at_10\n value: 48.379\n - type: recall_at_100\n value: 71.11800000000001\n - type: recall_at_1000\n value: 91.095\n - type: recall_at_3\n value: 35.108\n - type: recall_at_5\n value: 40.015\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: climate-fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 10.144\n - type: map_at_10\n value: 18.238\n - type: map_at_100\n value: 20.143\n - type: map_at_1000\n value: 20.346\n - type: map_at_3\n value: 14.809\n - type: map_at_5\n value: 16.567999999999998\n - type: mrr_at_1\n value: 22.671\n - type: mrr_at_10\n value: 34.906\n - type: mrr_at_100\n value: 35.858000000000004\n - type: mrr_at_1000\n value: 35.898\n - type: mrr_at_3\n value: 31.238\n - type: mrr_at_5\n value: 33.342\n - type: ndcg_at_1\n value: 22.671\n - type: ndcg_at_10\n value: 26.540000000000003\n - type: ndcg_at_100\n value: 34.138000000000005\n - type: ndcg_at_1000\n value: 37.72\n - type: ndcg_at_3\n value: 20.766000000000002\n - type: ndcg_at_5\n value: 22.927\n - type: precision_at_1\n value: 22.671\n - type: precision_at_10\n value: 8.619\n - type: precision_at_100\n value: 1.678\n - type: precision_at_1000\n value: 0.23500000000000001\n - type: precision_at_3\n value: 15.592\n - type: precision_at_5\n value: 12.43\n - type: recall_at_1\n value: 10.144\n - type: recall_at_10\n value: 33.46\n - type: recall_at_100\n value: 59.758\n - type: recall_at_1000\n value: 79.704\n - type: recall_at_3\n value: 19.604\n - type: recall_at_5\n value: 25.367\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: dbpedia-entity\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 8.654\n - type: map_at_10\n value: 18.506\n - type: map_at_100\n value: 26.412999999999997\n - type: map_at_1000\n value: 28.13\n - type: map_at_3\n value: 13.379\n - type: map_at_5\n value: 15.529000000000002\n - type: mrr_at_1\n value: 66.0\n - type: mrr_at_10\n value: 74.13\n - type: mrr_at_100\n value: 74.48700000000001\n - type: mrr_at_1000\n value: 74.49799999999999\n - type: mrr_at_3\n value: 72.75\n - type: mrr_at_5\n value: 73.762\n - type: ndcg_at_1\n value: 54.50000000000001\n - type: ndcg_at_10\n value: 40.236\n - type: ndcg_at_100\n value: 44.690999999999995\n - type: ndcg_at_1000\n value: 52.195\n - type: ndcg_at_3\n value: 45.632\n - type: ndcg_at_5\n value: 42.952\n - type: precision_at_1\n value: 66.0\n - type: precision_at_10\n value: 31.724999999999998\n - type: precision_at_100\n value: 10.299999999999999\n - type: precision_at_1000\n value: 2.194\n - type: precision_at_3\n value: 48.75\n - type: precision_at_5\n value: 41.6\n - type: recall_at_1\n value: 8.654\n - type: recall_at_10\n value: 23.74\n - type: recall_at_100\n value: 50.346999999999994\n - type: recall_at_1000\n value: 74.376\n - type: recall_at_3\n value: 14.636\n - type: recall_at_5\n value: 18.009\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 53.245\n - type: f1\n value: 48.74520523753552\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 51.729\n - type: map_at_10\n value: 63.904\n - type: map_at_100\n value: 64.363\n - type: map_at_1000\n value: 64.38199999999999\n - type: map_at_3\n value: 61.393\n - type: map_at_5\n value: 63.02100000000001\n - type: mrr_at_1\n value: 55.686\n - type: mrr_at_10\n value: 67.804\n - type: mrr_at_100\n value: 68.15299999999999\n - type: mrr_at_1000\n value: 68.161\n - type: mrr_at_3\n value: 65.494\n - type: mrr_at_5\n value: 67.01599999999999\n - type: ndcg_at_1\n value: 55.686\n - type: ndcg_at_10\n value: 70.025\n - type: ndcg_at_100\n value: 72.011\n - type: ndcg_at_1000\n value: 72.443\n - type: ndcg_at_3\n value: 65.32900000000001\n - type: ndcg_at_5\n value: 68.05600000000001\n - type: precision_at_1\n value: 55.686\n - type: precision_at_10\n value: 9.358\n - type: precision_at_100\n value: 1.05\n - type: precision_at_1000\n value: 0.11\n - type: precision_at_3\n value: 26.318\n - type: precision_at_5\n value: 17.321\n - type: recall_at_1\n value: 51.729\n - type: recall_at_10\n value: 85.04\n - type: recall_at_100\n value: 93.777\n - type: recall_at_1000\n value: 96.824\n - type: recall_at_3\n value: 72.521\n - type: recall_at_5\n value: 79.148\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: fiqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 23.765\n - type: map_at_10\n value: 39.114\n - type: map_at_100\n value: 40.987\n - type: map_at_1000\n value: 41.155\n - type: map_at_3\n value: 34.028000000000006\n - type: map_at_5\n value: 36.925000000000004\n - type: mrr_at_1\n value: 46.451\n - type: mrr_at_10\n value: 54.711\n - type: mrr_at_100\n value: 55.509\n - type: mrr_at_1000\n value: 55.535000000000004\n - type: mrr_at_3\n value: 52.649\n - type: mrr_at_5\n value: 53.729000000000006\n - type: ndcg_at_1\n value: 46.451\n - type: ndcg_at_10\n value: 46.955999999999996\n - type: ndcg_at_100\n value: 53.686\n - type: ndcg_at_1000\n value: 56.230000000000004\n - type: ndcg_at_3\n value: 43.374\n - type: ndcg_at_5\n value: 44.372\n - type: precision_at_1\n value: 46.451\n - type: precision_at_10\n value: 13.256\n - type: precision_at_100\n value: 2.019\n - type: precision_at_1000\n value: 0.247\n - type: precision_at_3\n value: 29.115000000000002\n - type: precision_at_5\n value: 21.389\n - type: recall_at_1\n value: 23.765\n - type: recall_at_10\n value: 53.452999999999996\n - type: recall_at_100\n value: 78.828\n - type: recall_at_1000\n value: 93.938\n - type: recall_at_3\n value: 39.023\n - type: recall_at_5\n value: 45.18\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: hotpotqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 31.918000000000003\n - type: map_at_10\n value: 46.741\n - type: map_at_100\n value: 47.762\n - type: map_at_1000\n value: 47.849000000000004\n - type: map_at_3\n value: 43.578\n - type: map_at_5\n value: 45.395\n - type: mrr_at_1\n value: 63.834999999999994\n - type: mrr_at_10\n value: 71.312\n - type: mrr_at_100\n value: 71.695\n - type: mrr_at_1000\n value: 71.714\n - type: mrr_at_3\n value: 69.82000000000001\n - type: mrr_at_5\n value: 70.726\n - type: ndcg_at_1\n value: 63.834999999999994\n - type: ndcg_at_10\n value: 55.879999999999995\n - type: ndcg_at_100\n value: 59.723000000000006\n - type: ndcg_at_1000\n value: 61.49400000000001\n - type: ndcg_at_3\n value: 50.964\n - type: ndcg_at_5\n value: 53.47\n - type: precision_at_1\n value: 63.834999999999994\n - type: precision_at_10\n value: 11.845\n - type: precision_at_100\n value: 1.4869999999999999\n - type: precision_at_1000\n value: 0.172\n - type: precision_at_3\n value: 32.158\n - type: precision_at_5\n value: 21.278\n - type: recall_at_1\n value: 31.918000000000003\n - type: recall_at_10\n value: 59.223000000000006\n - type: recall_at_100\n value: 74.328\n - type: recall_at_1000\n value: 86.05000000000001\n - type: recall_at_3\n value: 48.238\n - type: recall_at_5\n value: 53.193999999999996\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 79.7896\n - type: ap\n value: 73.65166029460288\n - type: f1\n value: 79.71794693711813\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: msmarco\n config: default\n split: dev\n revision: None\n metrics:\n - type: map_at_1\n value: 22.239\n - type: map_at_10\n value: 34.542\n - type: map_at_100\n value: 35.717999999999996\n - type: map_at_1000\n value: 35.764\n - type: map_at_3\n value: 30.432\n - type: map_at_5\n value: 32.81\n - type: mrr_at_1\n value: 22.908\n - type: mrr_at_10\n value: 35.127\n - type: mrr_at_100\n value: 36.238\n - type: mrr_at_1000\n value: 36.278\n - type: mrr_at_3\n value: 31.076999999999998\n - type: mrr_at_5\n value: 33.419\n - type: ndcg_at_1\n value: 22.908\n - type: ndcg_at_10\n value: 41.607\n - type: ndcg_at_100\n value: 47.28\n - type: ndcg_at_1000\n value: 48.414\n - type: ndcg_at_3\n value: 33.253\n - type: ndcg_at_5\n value: 37.486000000000004\n - type: precision_at_1\n value: 22.908\n - type: precision_at_10\n value: 6.645\n - type: precision_at_100\n value: 0.9490000000000001\n - type: precision_at_1000\n value: 0.105\n - type: precision_at_3\n value: 14.130999999999998\n - type: precision_at_5\n value: 10.616\n - type: recall_at_1\n value: 22.239\n - type: recall_at_10\n value: 63.42\n - type: recall_at_100\n value: 89.696\n - type: recall_at_1000\n value: 98.351\n - type: recall_at_3\n value: 40.77\n - type: recall_at_5\n value: 50.93\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 95.06839945280439\n - type: f1\n value: 94.74276398224072\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 72.25718194254446\n - type: f1\n value: 53.91164489161391\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 71.47948890383323\n - type: f1\n value: 69.98520247230257\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 76.46603900470748\n - type: f1\n value: 76.44111526065399\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 33.19106070798198\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 30.78772205248094\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 31.811231631488507\n - type: mrr\n value: 32.98200485378021\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: nfcorpus\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 6.9\n - type: map_at_10\n value: 13.703000000000001\n - type: map_at_100\n value: 17.251\n - type: map_at_1000\n value: 18.795\n - type: map_at_3\n value: 10.366999999999999\n - type: map_at_5\n value: 11.675\n - type: mrr_at_1\n value: 47.059\n - type: mrr_at_10\n value: 55.816\n - type: mrr_at_100\n value: 56.434\n - type: mrr_at_1000\n value: 56.467\n - type: mrr_at_3\n value: 53.973000000000006\n - type: mrr_at_5\n value: 55.257999999999996\n - type: ndcg_at_1\n value: 44.737\n - type: ndcg_at_10\n value: 35.997\n - type: ndcg_at_100\n value: 33.487\n - type: ndcg_at_1000\n value: 41.897\n - type: ndcg_at_3\n value: 41.18\n - type: ndcg_at_5\n value: 38.721\n - type: precision_at_1\n value: 46.129999999999995\n - type: precision_at_10\n value: 26.533\n - type: precision_at_100\n value: 8.706\n - type: precision_at_1000\n value: 2.16\n - type: precision_at_3\n value: 38.493\n - type: precision_at_5\n value: 33.189\n - type: recall_at_1\n value: 6.9\n - type: recall_at_10\n value: 17.488999999999997\n - type: recall_at_100\n value: 34.583000000000006\n - type: recall_at_1000\n value: 64.942\n - type: recall_at_3\n value: 11.494\n - type: recall_at_5\n value: 13.496\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: nq\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 33.028999999999996\n - type: map_at_10\n value: 49.307\n - type: map_at_100\n value: 50.205\n - type: map_at_1000\n value: 50.23\n - type: map_at_3\n value: 44.782\n - type: map_at_5\n value: 47.599999999999994\n - type: mrr_at_1\n value: 37.108999999999995\n - type: mrr_at_10\n value: 51.742999999999995\n - type: mrr_at_100\n value: 52.405\n - type: mrr_at_1000\n value: 52.422000000000004\n - type: mrr_at_3\n value: 48.087999999999994\n - type: mrr_at_5\n value: 50.414\n - type: ndcg_at_1\n value: 37.08\n - type: ndcg_at_10\n value: 57.236\n - type: ndcg_at_100\n value: 60.931999999999995\n - type: ndcg_at_1000\n value: 61.522\n - type: ndcg_at_3\n value: 48.93\n - type: ndcg_at_5\n value: 53.561\n - type: precision_at_1\n value: 37.08\n - type: precision_at_10\n value: 9.386\n - type: precision_at_100\n value: 1.1480000000000001\n - type: precision_at_1000\n value: 0.12\n - type: precision_at_3\n value: 22.258\n - type: precision_at_5\n value: 16.025\n - type: recall_at_1\n value: 33.028999999999996\n - type: recall_at_10\n value: 78.805\n - type: recall_at_100\n value: 94.643\n - type: recall_at_1000\n value: 99.039\n - type: recall_at_3\n value: 57.602\n - type: recall_at_5\n value: 68.253\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 71.122\n - type: map_at_10\n value: 85.237\n - type: map_at_100\n value: 85.872\n - type: map_at_1000\n value: 85.885\n - type: map_at_3\n value: 82.27499999999999\n - type: map_at_5\n value: 84.13199999999999\n - type: mrr_at_1\n value: 81.73\n - type: mrr_at_10\n value: 87.834\n - type: mrr_at_100\n value: 87.92\n - type: mrr_at_1000\n value: 87.921\n - type: mrr_at_3\n value: 86.878\n - type: mrr_at_5\n value: 87.512\n - type: ndcg_at_1\n value: 81.73\n - type: ndcg_at_10\n value: 88.85499999999999\n - type: ndcg_at_100\n value: 89.992\n - type: ndcg_at_1000\n value: 90.07\n - type: ndcg_at_3\n value: 85.997\n - type: ndcg_at_5\n value: 87.55199999999999\n - type: precision_at_1\n value: 81.73\n - type: precision_at_10\n value: 13.491\n - type: precision_at_100\n value: 1.536\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 37.623\n - type: precision_at_5\n value: 24.742\n - type: recall_at_1\n value: 71.122\n - type: recall_at_10\n value: 95.935\n - type: recall_at_100\n value: 99.657\n - type: recall_at_1000\n value: 99.996\n - type: recall_at_3\n value: 87.80799999999999\n - type: recall_at_5\n value: 92.161\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 63.490029238193756\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 65.13153408508836\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 4.202999999999999\n - type: map_at_10\n value: 10.174\n - type: map_at_100\n value: 12.138\n - type: map_at_1000\n value: 12.418\n - type: map_at_3\n value: 7.379\n - type: map_at_5\n value: 8.727\n - type: mrr_at_1\n value: 20.7\n - type: mrr_at_10\n value: 30.389\n - type: mrr_at_100\n value: 31.566\n - type: mrr_at_1000\n value: 31.637999999999998\n - type: mrr_at_3\n value: 27.133000000000003\n - type: mrr_at_5\n value: 29.078\n - type: ndcg_at_1\n value: 20.7\n - type: ndcg_at_10\n value: 17.355999999999998\n - type: ndcg_at_100\n value: 25.151\n - type: ndcg_at_1000\n value: 30.37\n - type: ndcg_at_3\n value: 16.528000000000002\n - type: ndcg_at_5\n value: 14.396999999999998\n - type: precision_at_1\n value: 20.7\n - type: precision_at_10\n value: 8.98\n - type: precision_at_100\n value: 2.015\n - type: precision_at_1000\n value: 0.327\n - type: precision_at_3\n value: 15.367\n - type: precision_at_5\n value: 12.559999999999999\n - type: recall_at_1\n value: 4.202999999999999\n - type: recall_at_10\n value: 18.197\n - type: recall_at_100\n value: 40.903\n - type: recall_at_1000\n value: 66.427\n - type: recall_at_3\n value: 9.362\n - type: recall_at_5\n value: 12.747\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_spearman\n value: 81.69890989765257\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_spearman\n value: 75.31953790551489\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_spearman\n value: 87.44050861280759\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_spearman\n value: 81.86922869270393\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_spearman\n value: 88.9399170304284\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_spearman\n value: 85.38015314088582\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_spearman\n value: 90.53653527788835\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_spearman\n value: 68.64526474250209\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_spearman\n value: 86.56156983963042\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 79.48610254648003\n - type: mrr\n value: 94.02481505422682\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: scifact\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 48.983\n - type: map_at_10\n value: 59.077999999999996\n - type: map_at_100\n value: 59.536\n - type: map_at_1000\n value: 59.575\n - type: map_at_3\n value: 55.691\n - type: map_at_5\n value: 57.410000000000004\n - type: mrr_at_1\n value: 51.666999999999994\n - type: mrr_at_10\n value: 60.427\n - type: mrr_at_100\n value: 60.763\n - type: mrr_at_1000\n value: 60.79900000000001\n - type: mrr_at_3\n value: 57.556\n - type: mrr_at_5\n value: 59.089000000000006\n - type: ndcg_at_1\n value: 51.666999999999994\n - type: ndcg_at_10\n value: 64.559\n - type: ndcg_at_100\n value: 66.58\n - type: ndcg_at_1000\n value: 67.64\n - type: ndcg_at_3\n value: 58.287\n - type: ndcg_at_5\n value: 61.001000000000005\n - type: precision_at_1\n value: 51.666999999999994\n - type: precision_at_10\n value: 9.067\n - type: precision_at_100\n value: 1.0170000000000001\n - type: precision_at_1000\n value: 0.11100000000000002\n - type: precision_at_3\n value: 23.0\n - type: precision_at_5\n value: 15.6\n - type: recall_at_1\n value: 48.983\n - type: recall_at_10\n value: 80.289\n - type: recall_at_100\n value: 89.43299999999999\n - type: recall_at_1000\n value: 97.667\n - type: recall_at_3\n value: 62.978\n - type: recall_at_5\n value: 69.872\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.79009900990098\n - type: cos_sim_ap\n value: 94.94115052608419\n - type: cos_sim_f1\n value: 89.1260162601626\n - type: cos_sim_precision\n value: 90.599173553719\n - type: cos_sim_recall\n value: 87.7\n - type: dot_accuracy\n value: 99.79009900990098\n - type: dot_ap\n value: 94.94115052608419\n - type: dot_f1\n value: 89.1260162601626\n - type: dot_precision\n value: 90.599173553719\n - type: dot_recall\n value: 87.7\n - type: euclidean_accuracy\n value: 99.79009900990098\n - type: euclidean_ap\n value: 94.94115052608419\n - type: euclidean_f1\n value: 89.1260162601626\n - type: euclidean_precision\n value: 90.599173553719\n - type: euclidean_recall\n value: 87.7\n - type: manhattan_accuracy\n value: 99.7940594059406\n - type: manhattan_ap\n value: 94.95271414642431\n - type: manhattan_f1\n value: 89.24508790072387\n - type: manhattan_precision\n value: 92.3982869379015\n - type: manhattan_recall\n value: 86.3\n - type: max_accuracy\n value: 99.7940594059406\n - type: max_ap\n value: 94.95271414642431\n - type: max_f1\n value: 89.24508790072387\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 68.43866571935851\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 35.16579026551532\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 52.518952473513934\n - type: mrr\n value: 53.292457134368895\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 31.12529588316604\n - type: cos_sim_spearman\n value: 32.31662126895294\n - type: dot_pearson\n value: 31.125303796647056\n - type: dot_spearman\n value: 32.31662126895294\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.219\n - type: map_at_10\n value: 1.7469999999999999\n - type: map_at_100\n value: 10.177999999999999\n - type: map_at_1000\n value: 26.108999999999998\n - type: map_at_3\n value: 0.64\n - type: map_at_5\n value: 0.968\n - type: mrr_at_1\n value: 82.0\n - type: mrr_at_10\n value: 89.067\n - type: mrr_at_100\n value: 89.067\n - type: mrr_at_1000\n value: 89.067\n - type: mrr_at_3\n value: 88.333\n - type: mrr_at_5\n value: 88.73299999999999\n - type: ndcg_at_1\n value: 78.0\n - type: ndcg_at_10\n value: 71.398\n - type: ndcg_at_100\n value: 55.574999999999996\n - type: ndcg_at_1000\n value: 51.771\n - type: ndcg_at_3\n value: 77.765\n - type: ndcg_at_5\n value: 73.614\n - type: precision_at_1\n value: 82.0\n - type: precision_at_10\n value: 75.4\n - type: precision_at_100\n value: 58.040000000000006\n - type: precision_at_1000\n value: 23.516000000000002\n - type: precision_at_3\n value: 84.0\n - type: precision_at_5\n value: 78.4\n - type: recall_at_1\n value: 0.219\n - type: recall_at_10\n value: 1.958\n - type: recall_at_100\n value: 13.797999999999998\n - type: recall_at_1000\n value: 49.881\n - type: recall_at_3\n value: 0.672\n - type: recall_at_5\n value: 1.0370000000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: webis-touche2020\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 1.8610000000000002\n - type: map_at_10\n value: 8.705\n - type: map_at_100\n value: 15.164\n - type: map_at_1000\n value: 16.78\n - type: map_at_3\n value: 4.346\n - type: map_at_5\n value: 6.151\n - type: mrr_at_1\n value: 22.448999999999998\n - type: mrr_at_10\n value: 41.556\n - type: mrr_at_100\n value: 42.484\n - type: mrr_at_1000\n value: 42.494\n - type: mrr_at_3\n value: 37.755\n - type: mrr_at_5\n value: 40.102\n - type: ndcg_at_1\n value: 21.429000000000002\n - type: ndcg_at_10\n value: 23.439\n - type: ndcg_at_100\n value: 36.948\n - type: ndcg_at_1000\n value: 48.408\n - type: ndcg_at_3\n value: 22.261\n - type: ndcg_at_5\n value: 23.085\n - type: precision_at_1\n value: 22.448999999999998\n - type: precision_at_10\n value: 21.633\n - type: precision_at_100\n value: 8.02\n - type: precision_at_1000\n value: 1.5939999999999999\n - type: precision_at_3\n value: 23.810000000000002\n - type: precision_at_5\n value: 24.490000000000002\n - type: recall_at_1\n value: 1.8610000000000002\n - type: recall_at_10\n value: 15.876000000000001\n - type: recall_at_100\n value: 50.300999999999995\n - type: recall_at_1000\n value: 86.098\n - type: recall_at_3\n value: 5.892\n - type: recall_at_5\n value: 9.443\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 70.3264\n - type: ap\n value: 13.249577616243794\n - type: f1\n value: 53.621518367695685\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 61.57611771363894\n - type: f1\n value: 61.79797478568639\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 53.38315344479284\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 87.55438993860642\n - type: cos_sim_ap\n value: 77.98702600017738\n - type: cos_sim_f1\n value: 71.94971653931476\n - type: cos_sim_precision\n value: 67.50693802035153\n - type: cos_sim_recall\n value: 77.01846965699208\n - type: dot_accuracy\n value: 87.55438993860642\n - type: dot_ap\n value: 77.98702925907986\n - type: dot_f1\n value: 71.94971653931476\n - type: dot_precision\n value: 67.50693802035153\n - type: dot_recall\n value: 77.01846965699208\n - type: euclidean_accuracy\n value: 87.55438993860642\n - type: euclidean_ap\n value: 77.98702951957925\n - type: euclidean_f1\n value: 71.94971653931476\n - type: euclidean_precision\n value: 67.50693802035153\n - type: euclidean_recall\n value: 77.01846965699208\n - type: manhattan_accuracy\n value: 87.54246885617214\n - type: manhattan_ap\n value: 77.95531413902947\n - type: manhattan_f1\n value: 71.93605683836589\n - type: manhattan_precision\n value: 69.28152492668622\n - type: manhattan_recall\n value: 74.80211081794195\n - type: max_accuracy\n value: 87.55438993860642\n - type: max_ap\n value: 77.98702951957925\n - type: max_f1\n value: 71.94971653931476\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 89.47296930182016\n - type: cos_sim_ap\n value: 86.92853616302108\n - type: cos_sim_f1\n value: 79.35138351681047\n - type: cos_sim_precision\n value: 76.74820143884892\n - type: cos_sim_recall\n value: 82.13735756082538\n - type: dot_accuracy\n value: 89.47296930182016\n - type: dot_ap\n value: 86.92854339601595\n - type: dot_f1\n value: 79.35138351681047\n - type: dot_precision\n value: 76.74820143884892\n - type: dot_recall\n value: 82.13735756082538\n - type: euclidean_accuracy\n value: 89.47296930182016\n - type: euclidean_ap\n value: 86.92854191061649\n - type: euclidean_f1\n value: 79.35138351681047\n - type: euclidean_precision\n value: 76.74820143884892\n - type: euclidean_recall\n value: 82.13735756082538\n - type: manhattan_accuracy\n value: 89.47685023479644\n - type: manhattan_ap\n value: 86.90063722679578\n - type: manhattan_f1\n value: 79.30753865502702\n - type: manhattan_precision\n value: 76.32066068631639\n - type: manhattan_recall\n value: 82.53772713273791\n - type: max_accuracy\n value: 89.47685023479644\n - type: max_ap\n value: 86.92854339601595\n - type: max_f1\n value: 79.35138351681047\n---\n\n# retrainai/instructor-xl\nIts a fork of the original [hkunlp/instructor-xl](https://huggingface.co/hkunlp/instructor-xl) with minimal modifications to support running it in HuggingFace inference endpoint, just use the builtin custom handler.\n\n# hkunlp/instructor-xl\nWe introduce **Instructor**👨‍🏫, an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g., classification, retrieval, clustering, text evaluation, etc.) and domains (e.g., science, finance, etc.) ***by simply providing the task instruction, without any finetuning***. Instructor👨‍ achieves sota on 70 diverse embedding tasks!\nThe model is easy to use with **our customized** `sentence-transformer` library. For more details, check out [our paper](https://arxiv.org/abs/2212.09741) and [project page](https://instructor-embedding.github.io/)! \n\n**************************** **Updates** ****************************\n\n* 01/21: We released a new [checkpoint](https://huggingface.co/hkunlp/instructor-xl) trained with hard negatives, which gives better performance.\n* 12/21: We released our [paper](https://arxiv.org/abs/2212.09741), [code](https://github.com/HKUNLP/instructor-embedding), [checkpoint](https://huggingface.co/hkunlp/instructor-xl) and [project page](https://instructor-embedding.github.io/)! Check them out!\n\n## Quick start\n
\n\n## Installation\n```bash\npip install InstructorEmbedding\n```\n\n## Compute your customized embeddings\nThen you can use the model like this to calculate domain-specific and task-aware embeddings:\n```python\nfrom InstructorEmbedding import INSTRUCTOR\nmodel = INSTRUCTOR('hkunlp/instructor-xl')\nsentence = \"3D ActionSLAM: wearable person tracking in multi-floor environments\"\ninstruction = \"Represent the Science title:\"\nembeddings = model.encode([[instruction,sentence]])\nprint(embeddings)\n```\n\n## Use cases\n
\n\n## Calculate embeddings for your customized texts\nIf you want to calculate customized embeddings for specific sentences, you may follow the unified template to write instructions: \n\n&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Represent the `domain` `text_type` for `task_objective`:\n* `domain` is optional, and it specifies the domain of the text, e.g., science, finance, medicine, etc.\n* `text_type` is required, and it specifies the encoding unit, e.g., sentence, document, paragraph, etc.\n* `task_objective` is optional, and it specifies the objective of embedding, e.g., retrieve a document, classify the sentence, etc.\n\n## Calculate Sentence similarities\nYou can further use the model to compute similarities between two groups of sentences, with **customized embeddings**.\n```python\nfrom sklearn.metrics.pairwise import cosine_similarity\nsentences_a = [['Represent the Science sentence: ','Parton energy loss in QCD matter'], \n ['Represent the Financial statement: ','The Federal Reserve on Wednesday raised its benchmark interest rate.']]\nsentences_b = [['Represent the Science sentence: ','The Chiral Phase Transition in Dissipative Dynamics'],\n ['Represent the Financial statement: ','The funds rose less than 0.5 per cent on Friday']]\nembeddings_a = model.encode(sentences_a)\nembeddings_b = model.encode(sentences_b)\nsimilarities = cosine_similarity(embeddings_a,embeddings_b)\nprint(similarities)\n```\n\n## Information Retrieval\nYou can also use **customized embeddings** for information retrieval.\n```python\nimport numpy as np\nfrom sklearn.metrics.pairwise import cosine_similarity\nquery = [['Represent the Wikipedia question for retrieving supporting documents: ','where is the food stored in a yam plant']]\ncorpus = [['Represent the Wikipedia document for retrieval: ','Capitalism has been dominant in the Western world since the end of feudalism, but most feel[who?] that the term \"mixed economies\" more precisely describes most contemporary economies, due to their containing both private-owned and state-owned enterprises. In capitalism, prices determine the demand-supply scale. For example, higher demand for certain goods and services lead to higher prices and lower demand for certain goods lead to lower prices.'],\n ['Represent the Wikipedia document for retrieval: ',\"The disparate impact theory is especially controversial under the Fair Housing Act because the Act regulates many activities relating to housing, insurance, and mortgage loans—and some scholars have argued that the theory's use under the Fair Housing Act, combined with extensions of the Community Reinvestment Act, contributed to rise of sub-prime lending and the crash of the U.S. housing market and ensuing global economic recession\"],\n ['Represent the Wikipedia document for retrieval: ','Disparate impact in United States labor law refers to practices in employment, housing, and other areas that adversely affect one group of people of a protected characteristic more than another, even though rules applied by employers or landlords are formally neutral. Although the protected classes vary by statute, most federal civil rights laws protect based on race, color, religion, national origin, and sex as protected traits, and some laws include disability status and other traits as well.']]\nquery_embeddings = model.encode(query)\ncorpus_embeddings = model.encode(corpus)\nsimilarities = cosine_similarity(query_embeddings,corpus_embeddings)\nretrieved_doc_id = np.argmax(similarities)\nprint(retrieved_doc_id)\n```\n\n## Clustering\nUse **customized embeddings** for clustering texts in groups.\n```python\nimport sklearn.cluster\nsentences = [['Represent the Medicine sentence for clustering: ','Dynamical Scalar Degree of Freedom in Horava-Lifshitz Gravity'],\n ['Represent the Medicine sentence for clustering: ','Comparison of Atmospheric Neutrino Flux Calculations at Low Energies'],\n ['Represent the Medicine sentence for clustering: ','Fermion Bags in the Massive Gross-Neveu Model'],\n ['Represent the Medicine sentence for clustering: ',\"QCD corrections to Associated t-tbar-H production at the Tevatron\"],\n ['Represent the Medicine sentence for clustering: ','A New Analysis of the R Measurements: Resonance Parameters of the Higher, Vector States of Charmonium']]\nembeddings = model.encode(sentences)\nclustering_model = sklearn.cluster.MiniBatchKMeans(n_clusters=2)\nclustering_model.fit(embeddings)\ncluster_assignment = clustering_model.labels_\nprint(cluster_assignment)\n```"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2469,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.award-winning-film"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-01-05T17:06:31Z","string":"2024-01-05T17:06:31Z"},"last_modified":{"kind":"string","value":"2024-01-05T17:06:35+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/award winning film.../award winning film_17_3.0.png\nwidget:\n- text: award winning film\n output:\n url: images/award winning film_17_3.0.png\n- text: award winning film\n output:\n url: images/award winning film_19_3.0.png\n- text: award winning film\n output:\n url: images/award winning film_20_3.0.png\n- text: award winning film\n output:\n url: images/award winning film_21_3.0.png\n- text: award winning film\n output:\n url: images/award winning film_22_3.0.png\ninference: false\ninstance_prompt: award winning film\n---\n# ntcai.xyz slider - award winning film (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\naward winning film\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.award-winning-film', weight_name='award winning film.safetensors', adapter_name=\"award winning film\")\n\n# Activate the LoRA\npipe.set_adapters([\"award winning film\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, award winning film\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 890+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2470,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.epic-oil-painting"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-01-16T01:17:14Z","string":"2024-01-16T01:17:14Z"},"last_modified":{"kind":"string","value":"2024-01-16T01:17:17+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/epic oil painting.../epic oil painting_17_3.0.png\nwidget:\n- text: epic oil painting\n output:\n url: images/epic oil painting_17_3.0.png\n- text: epic oil painting\n output:\n url: images/epic oil painting_19_3.0.png\n- text: epic oil painting\n output:\n url: images/epic oil painting_20_3.0.png\n- text: epic oil painting\n output:\n url: images/epic oil painting_21_3.0.png\n- text: epic oil painting\n output:\n url: images/epic oil painting_22_3.0.png\ninference: false\ninstance_prompt: epic oil painting\n---\n# ntcai.xyz slider - epic oil painting (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\nepic oil painting\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.epic-oil-painting', weight_name='epic oil painting.safetensors', adapter_name=\"epic oil painting\")\n\n# Activate the LoRA\npipe.set_adapters([\"epic oil painting\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, epic oil painting\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 1140+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2471,"cells":{"id":{"kind":"string","value":"AIgroup-CVM-utokyohospital/MedSwallow-70b"},"author":{"kind":"string","value":"AIgroup-CVM-utokyohospital"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["peft","safetensors","medical","arxiv:2406.14882","license:cc-by-nc-sa-4.0","region:us"],"string":"[\n \"peft\",\n \"safetensors\",\n \"medical\",\n \"arxiv:2406.14882\",\n \"license:cc-by-nc-sa-4.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-02T08:15:58Z","string":"2024-02-02T08:15:58Z"},"last_modified":{"kind":"string","value":"2025-03-03T14:01:09+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlibrary_name: peft\nlicense: cc-by-nc-sa-4.0\ntags:\n- medical\n---\n⚠️⚠️⚠️ Only for research purpose. Do not use it for medical purpose. ⚠️⚠️⚠️\n\n# MedSwallow-70B🏥\n\n[東工大Swallow](https://huggingface.co/tokyotech-llm/Swallow-70b-instruct-hf)をベースモデルとし, 医療Q&AデータセットでInstruction Tuningを施した医療ドメインの日本語LLMです.\n\nチューニングには独自で用意した米国医師国家試験(USMLE)を和訳したQ&Aデータセットを用いました.\n\n\nMedSwallow is a Japanese medical LLM for medical question-answering.\n\nMedSwallow is based on [Swallow-70B](https://huggingface.co/tokyotech-llm/Swallow-70b-instruct-hf) and has passed instruction tuning with USMLE dataset translated in Japanese by our own.\n\n\n## Training procedure\n\nThe following `bitsandbytes` quantization config was used during training:\n- quant_method: bitsandbytes\n- load_in_8bit: False\n- load_in_4bit: True\n- llm_int8_threshold: 6.0\n- llm_int8_skip_modules: None\n- llm_int8_enable_fp32_cpu_offload: False\n- llm_int8_has_fp16_weight: False\n- bnb_4bit_quant_type: nf4\n- bnb_4bit_use_double_quant: True\n- bnb_4bit_compute_dtype: bfloat16\n\n### Framework versions\n- PEFT 0.4.0\n\n\n## License\n\nライセンスは非商用ライセンスです.\n\nNon-commercial.\n\n\n## Usage\n\n```\nmodel_name = \"tokyotech-llm/Swallow-70b-instruct-hf\"\npeft_model= \"AIgroup-CVM-utokyohospital/MedSwallow-70b\"\n\ntokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True)\n\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.float16,\n )\n\nmodel = AutoModelForCausalLM.from_pretrained(\n model_name,\n load_in_8bit=False,\n torch_dtype=torch.float16,\n device_map=device,\n \nmodel = PeftModel.from_pretrained(\n model, \n peft_model, \n torch_dtype=torch.float16,\n device_map=device, \n)\n\n```\n\n\n## Benchmark\n\nSee also [Japanese Medical Language Model Evaluation Harness](https://github.com/stardust-coder/japanese-lm-med-harness).\n\n- IgakuQA (in English): \n- IgakuQA (in Japanese): \n- MedQA (in English) :\n- MedQA (in Japanese) :\n\n\n## How to cite\n```\n@misc{sukeda202470bparameterlargelanguagemodels,\n title={70B-parameter large language models in Japanese medical question-answering}, \n author={Issey Sukeda and Risa Kishikawa and Satoshi Kodera},\n year={2024},\n eprint={2406.14882},\n archivePrefix={arXiv},\n primaryClass={cs.CL},\n url={https://arxiv.org/abs/2406.14882}, \n}\n```"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2472,"cells":{"id":{"kind":"string","value":"longluu/Clinical-NER-NCBI-Disease-GatorTronS"},"author":{"kind":"string","value":"longluu"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","safetensors","megatron-bert","token-classification","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"megatron-bert\",\n \"token-classification\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-03T11:48:39Z","string":"2024-02-03T11:48:39Z"},"last_modified":{"kind":"string","value":"2024-02-11T15:04:25+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: mit\npipeline_tag: token-classification\nwidget:\n- text: 'Background: Coronaviruses have been the cause of 3 major outbreaks during\n the last 2 decades. Information on coronavirus diseases in pregnant women is limited,\n and even less is known about seriously ill pregnant women. Data are also lacking\n regarding the real burden of coronavirus disease 2019 (COVID-19) infection in\n pregnant women from low/middle-income countries. The aim of this study was to\n determine the characteristics and clinical course of COVID-19 in pregnant/puerperal\n women admitted to ICUs in Turkey. Methods: This was a national, multicenter, retrospective\n study. The study population comprised all SARS-CoV-2-infected pregnant/puerperal\n women admitted to participating ICUs between 1 March 2020 and 1 January 2022.\n Data regarding demographics, comorbidities, illness severity, therapies, extrapulmonary\n organ injuries, non-COVID-19 infections, and maternal and fetal/neonatal outcomes\n were recorded. LASSO logistic regression and multiple logistic regression analyses\n were used to identify predictive variables in terms of ICU mortality. Results:\n A total of 597 patients (341 pregnant women, 255 puerperal women) from 59 ICUs\n in 44 hospitals were included and of these patients, 87.1% were unvaccinated.\n The primary reason for ICU admission was acute hypoxemic respiratory failure in\n 522 (87.4%), acute hypoxemic respiratory failure plus shock in 14 (2.3%), ischemic\n cerebrovascular accident (CVA) in 5 (0.8%), preeclampsia/eclampsia/HELLP syndrome\n in 6 (1.0%), and post-caesarean follow-up in 36 (6.0%). Nonsurvivors were sicker\n than survivors upon ICU admission, with higher APACHE II (p < 0.001) and SOFA\n scores (p < 0.001). A total of 181 (30.3%) women died and 280 (46.6%) had received\n invasive mechanical ventilation (IMV).'\n- text: 'Importance: Atrial cardiopathy is associated with stroke in the absence of\n clinically apparent atrial fibrillation. It is unknown whether anticoagulation,\n which has proven benefit in atrial fibrillation, prevents stroke in patients with\n atrial cardiopathy and no atrial fibrillation. Objective: To compare anticoagulation\n vs antiplatelet therapy for secondary stroke prevention in patients with cryptogenic\n stroke and evidence of atrial cardiopathy. Design, setting, and participants:\n Multicenter, double-blind, phase 3 randomized clinical trial of 1015 participants\n with cryptogenic stroke and evidence of atrial cardiopathy, defined as P-wave\n terminal force greater than 5000 μV × ms in electrocardiogram lead V1, serum N-terminal\n pro-B-type natriuretic peptide level greater than 250 pg/mL, or left atrial diameter\n index of 3 cm/m2 or greater on echocardiogram. Participants had no evidence of\n atrial fibrillation at the time of randomization. Enrollment and follow-up occurred\n from February 1, 2018, through February 28, 2023, at 185 sites in the National\n Institutes of Health StrokeNet and the Canadian Stroke Consortium. Interventions:\n Apixaban, 5 mg or 2.5 mg, twice daily (n = 507) vs aspirin, 81 mg, once daily\n (n = 508). Main outcomes and measures: The primary efficacy outcome in a time-to-event\n analysis was recurrent stroke. All participants, including those diagnosed with\n atrial fibrillation after randomization, were analyzed according to the groups\n to which they were randomized. The primary safety outcomes were symptomatic intracranial\n hemorrhage and other major hemorrhage.'\n---\n\n# Model Card for Model longluu/Clinical-NER-NCBI-Disease-GatorTronS\nThe model is an NER LLM algorithm that can classify each word in a text into different clinical categories. \n\n## Model Details\n\n### Model Description\nThe base pretrained model is GatorTronS which was trained on billions of words in various clinical texts (https://huggingface.co/UFNLP/gatortronS). \nThen using the NCBI Disease dataset (https://www.sciencedirect.com/science/article/pii/S1532046413001974?via%3Dihub), \nI fine-tuned the model for NER task in which the model can classify each word in a text into one of the categories ['no disease', 'disease', 'disease-continue'].\n\n### Model Sources [optional]\nThe github code associated with the model can be found here: https://github.com/longluu/LLM-NER-clinical-text.\n\n## Training Details\n\n### Training Data\nThis dataset contains the disease name and concept annotations of the NCBI disease corpus, a collection of 793 PubMed abstracts fully annotated at the mention and concept level to serve as a research resource for the biomedical natural language processing community. Details are here https://www.sciencedirect.com/science/article/pii/S1532046413001974?via%3Dihub.\n\nThe preprocessed data for LLM training can be found here https://huggingface.co/datasets/ncbi_disease.\n\n#### Training Hyperparameters\n\nThe hyperparameters are --batch_size 24\n --num_train_epochs 5\n --learning_rate 5e-5 \n --weight_decay 0.01 \n\n## Evaluation\n### Testing Data, Factors & Metrics\n#### Testing Data\nThe model was trained and validated on train and validation sets. Then it was tested on a separate test set. \nNote that some concepts in the test set were not available in the train and validatin sets.\n\n#### Metrics\nHere we use several metrics for classification tasks including macro-average F1, precision, recall and Matthew correlation.\n\n### Results\n{'f1': 0.876008064516129,\n 'precision': 0.9052083333333333,\n 'recall': 0.8486328125}\n\n## Model Card Contact\nFeel free to reach out to me at thelong20.4@gmail.com if you have any question or suggestion."},"matched_bigbio_names":{"kind":"list like","value":["NCBI DISEASE"],"string":"[\n \"NCBI DISEASE\"\n]"}}},{"rowIdx":2473,"cells":{"id":{"kind":"string","value":"LoneStriker/BioMistral-7B-SLERP-GPTQ"},"author":{"kind":"string","value":"LoneStriker"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","mergekit","merge","slerp","medical","biology","conversational","fr","en","es","it","pl","nl","de","dataset:pubmed","arxiv:2402.10373","base_model:BioMistral/BioMistral-7B","base_model:merge:BioMistral/BioMistral-7B","base_model:mistralai/Mistral-7B-Instruct-v0.1","base_model:merge:mistralai/Mistral-7B-Instruct-v0.1","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","4-bit","gptq","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"mergekit\",\n \"merge\",\n \"slerp\",\n \"medical\",\n \"biology\",\n \"conversational\",\n \"fr\",\n \"en\",\n \"es\",\n \"it\",\n \"pl\",\n \"nl\",\n \"de\",\n \"dataset:pubmed\",\n \"arxiv:2402.10373\",\n \"base_model:BioMistral/BioMistral-7B\",\n \"base_model:merge:BioMistral/BioMistral-7B\",\n \"base_model:mistralai/Mistral-7B-Instruct-v0.1\",\n \"base_model:merge:mistralai/Mistral-7B-Instruct-v0.1\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"4-bit\",\n \"gptq\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-19T17:34:25Z","string":"2024-02-19T17:34:25Z"},"last_modified":{"kind":"string","value":"2024-02-19T17:36:21+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model:\n- BioMistral/BioMistral-7B\n- mistralai/Mistral-7B-Instruct-v0.1\ndatasets:\n- pubmed\nlanguage:\n- fr\n- en\n- es\n- it\n- pl\n- nl\n- de\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- mergekit\n- merge\n- slerp\n- medical\n- biology\n---\n# BioMistral-7B-slerp\n\nThis is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).\n\n## Merge Details\n### Merge Method\n\nThis model was merged using the SLERP merge method.\n\n### Models Merged\n\nThe following models were included in the merge:\n* [BioMistral/BioMistral-7B](https://huggingface.co/BioMistral/BioMistral-7B)\n* [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1)\n\n### Configuration\n\nThe following YAML configuration was used to produce this model:\n\n```yaml\n\nslices:\n - sources:\n - model: mistralai/Mistral-7B-Instruct-v0.1\n layer_range: [0, 32]\n - model: BioMistral/BioMistral-7B\n layer_range: [0, 32]\nmerge_method: slerp\nbase_model: mistralai/Mistral-7B-Instruct-v0.1\nparameters:\n t:\n - filter: self_attn\n value: [0, 0.5, 0.3, 0.7, 1]\n - filter: mlp\n value: [1, 0.5, 0.7, 0.3, 0]\n - value: 0.5\ndtype: bfloat16\n\n```\n\n\n

\n \"drawing\"\n

\n\n# BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains\n\n**Abstract:**\n\nLarge Language Models (LLMs) have demonstrated remarkable versatility in recent years, offering potential applications across specialized domains such as healthcare and medicine. Despite the availability of various open-source LLMs tailored for health contexts, adapting general-purpose LLMs to the medical domain presents significant challenges.\nIn this paper, we introduce BioMistral, an open-source LLM tailored for the biomedical domain, utilizing Mistral as its foundation model and further pre-trained on PubMed Central. We conduct a comprehensive evaluation of BioMistral on a benchmark comprising 10 established medical question-answering (QA) tasks in English. We also explore lightweight models obtained through quantization and model merging approaches. Our results demonstrate BioMistral's superior performance compared to existing open-source medical models and its competitive edge against proprietary counterparts. Finally, to address the limited availability of data beyond English and to assess the multilingual generalization of medical LLMs, we automatically translated and evaluated this benchmark into 7 other languages. This marks the first large-scale multilingual evaluation of LLMs in the medical domain. Datasets, multilingual evaluation benchmarks, scripts, and all the models obtained during our experiments are freely released.\n\n# 1. BioMistral models\n\n**BioMistral** is a suite of Mistral-based further pre-trained open source models suited for the medical domains and pre-trained using textual data from PubMed Central Open Access (CC0, CC BY, CC BY-SA, and CC BY-ND). All the models are trained using the CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/jean-zay/) French HPC.\n\n| Model Name | Base Model | Model Type | Sequence Length | Download |\n|:-------------------:|:----------------------------------:|:-------------------:|:---------------:|:-----------------------------------------------------:|\n| BioMistral-7B | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Further Pre-trained | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B) |\n| BioMistral-7B-DARE | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge DARE | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-DARE) |\n| BioMistral-7B-TIES | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge TIES | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-TIES) |\n| BioMistral-7B-SLERP | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge SLERP | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-SLERP) |\n\n# 2. Quantized Models\n\n| Base Model | Method | q_group_size | w_bit | version | VRAM GB | Time | Download |\n|:-------------------:|:------:|:------------:|:-----:|:-------:|:-------:|:------:|:--------:|\n| BioMistral-7B | FP16/BF16 | | | | 15.02 | x1.00 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B) |\n| BioMistral-7B | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-AWQ-QGS128-W4-GEMM) |\n| BioMistral-7B | AWQ | 128 | 4 | GEMV | 4.68 | x10.30 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-AWQ-QGS128-W4-GEMV) |\n| BioMistral-7B | BnB.4 | | 4 | | 5.03 | x3.25 | [HuggingFace](blank) |\n| BioMistral-7B | BnB.8 | | 8 | | 8.04 | x4.34 | [HuggingFace](blank) |\n| BioMistral-7B-DARE | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-DARE-AWQ-QGS128-W4-GEMM) |\n| BioMistral-7B-TIES | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-TIES-AWQ-QGS128-W4-GEMM) |\n| BioMistral-7B-SLERP | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-SLERP-AWQ-QGS128-W4-GEMM) |\n\n# 2. Using BioMistral\n\nYou can use BioMistral with [Hugging Face's Transformers library](https://github.com/huggingface/transformers) as follow.\n\nLoading the model and tokenizer :\n\n```python\nfrom transformers import AutoModel, AutoTokenizer\n\ntokenizer = AutoTokenizer.from_pretrained(\"BioMistral/BioMistral-7B\")\nmodel = AutoModel.from_pretrained(\"BioMistral/BioMistral-7B\")\n```\n\n# 3. Supervised Fine-tuning Benchmark\n\n| | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA | MedQA 5 opts | PubMedQA | MedMCQA | Avg. |\n|-------------------------------------------|:---------------------------------------------:|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|------------------|\n| **BioMistral 7B** | 59.9 | 64.0 | 56.5 | 60.4 | 59.0 | 54.7 | 50.6 | 42.8 | 77.5 | 48.1 | 57.3 |\n| **Mistral 7B Instruct** | **62.9** | 57.0 | 55.6 | 59.4 | 62.5 | 57.2 | 42.0 | 40.9 | 75.7 | 46.1 | 55.9 |\n| | | | | | | | | | | | |\n| **BioMistral 7B Ensemble** | 62.8 | 62.7 | 57.5 | **63.5** | 64.3 | 55.7 | 50.6 | 43.6 | 77.5 | **48.8** | 58.7 |\n| **BioMistral 7B DARE** | 62.3 | **67.0** | 55.8 | 61.4 | **66.9** | **58.0** | **51.1** | **45.2** | 77.7 | 48.7 | **59.4** |\n| **BioMistral 7B TIES** | 60.1 | 65.0 | **58.5** | 60.5 | 60.4 | 56.5 | 49.5 | 43.2 | 77.5 | 48.1 | 57.9 |\n| **BioMistral 7B SLERP** | 62.5 | 64.7 | 55.8 | 62.7 | 64.8 | 56.3 | 50.8 | 44.3 | **77.8** | 48.6 | 58.8 |\n| | | | | | | | | | | | |\n| **MedAlpaca 7B** | 53.1 | 58.0 | 54.1 | 58.8 | 58.1 | 48.6 | 40.1 | 33.7 | 73.6 | 37.0 | 51.5 |\n| **PMC-LLaMA 7B** | 24.5 | 27.7 | 35.3 | 17.4 | 30.3 | 23.3 | 25.5 | 20.2 | 72.9 | 26.6 | 30.4 |\n| **MediTron-7B** | 41.6 | 50.3 | 46.4 | 27.9 | 44.4 | 30.8 | 41.6 | 28.1 | 74.9 | 41.3 | 42.7 |\n| **BioMedGPT-LM-7B** | 51.4 | 52.0 | 49.4 | 53.3 | 50.7 | 49.1 | 42.5 | 33.9 | 76.8 | 37.6 | 49.7 |\n| | | | | | | | | | | | |\n| **GPT-3.5 Turbo 1106*** | 74.71 | 74.00 | 65.92 | 72.79 | 72.91 | 64.73 | 57.71 | 50.82 | 72.66 | 53.79 | 66.0 |\n\nSupervised Fine-Tuning (SFT) performance of BioMistral 7B models compared to baselines, measured by accuracy (↑) and averaged across 3 random seeds of 3-shot. DARE, TIES, and SLERP are model merging strategies that combine BioMistral 7B and Mistral 7B Instruct. Best model in bold, and second-best underlined. *GPT-3.5 Turbo performances are reported from the 3-shot results without SFT.\n\n# Citation BibTeX\n\nArxiv : [https://arxiv.org/abs/2402.10373](https://arxiv.org/abs/2402.10373)\n\n```bibtex\n@misc{labrak2024biomistral,\n title={BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains}, \n author={Yanis Labrak and Adrien Bazoge and Emmanuel Morin and Pierre-Antoine Gourraud and Mickael Rouvier and Richard Dufour},\n year={2024},\n eprint={2402.10373},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA","PUBMEDQA"],"string":"[\n \"MEDQA\",\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2474,"cells":{"id":{"kind":"string","value":"Technoculture/BioMistral-Hermes-Slerp"},"author":{"kind":"string","value":"Technoculture"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","merge","mergekit","BioMistral/BioMistral-7B-DARE","NousResearch/Nous-Hermes-2-Mistral-7B-DPO","conversational","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"merge\",\n \"mergekit\",\n \"BioMistral/BioMistral-7B-DARE\",\n \"NousResearch/Nous-Hermes-2-Mistral-7B-DPO\",\n \"conversational\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-21T20:05:32Z","string":"2024-02-21T20:05:32Z"},"last_modified":{"kind":"string","value":"2024-02-21T20:10:14+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\ntags:\n- merge\n- mergekit\n- BioMistral/BioMistral-7B-DARE\n- NousResearch/Nous-Hermes-2-Mistral-7B-DPO\n---\n\n# BioMistral-Hermes-Slerp\n\nBioMistral-Hermes-Slerp is a merge of the following models:\n* [BioMistral/BioMistral-7B-DARE](https://huggingface.co/BioMistral/BioMistral-7B-DARE)\n* [NousResearch/Nous-Hermes-2-Mistral-7B-DPO](https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO)\n\n## Evaluations\n\n| Benchmark | BioMistral-Hermes-Slerp | Orca-2-7b | llama-2-7b | meditron-7b | meditron-70b |\n| --- | --- | --- | --- | --- | --- |\n| MedMCQA | | | | | |\n| ClosedPubMedQA | | | | | |\n| PubMedQA | | | | | |\n| MedQA | | | | | |\n| MedQA4 | | | | | |\n| MedicationQA | | | | | |\n| MMLU Medical | | | | | |\n| MMLU | | | | | |\n| TruthfulQA | | | | | |\n| GSM8K | | | | | |\n| ARC | | | | | |\n| HellaSwag | | | | | |\n| Winogrande | | | | | |\n\nMore details on the Open LLM Leaderboard evaluation results can be found here.\n\n## 🧩 Configuration\n\n```yaml\nslices:\n - sources:\n - model: BioMistral/BioMistral-7B-DARE\n layer_range: [0, 32]\n - model: NousResearch/Nous-Hermes-2-Mistral-7B-DPO\n layer_range: [0, 32]\nmerge_method: slerp\nbase_model: NousResearch/Nous-Hermes-2-Mistral-7B-DPO\nparameters:\n t:\n - filter: self_attn\n value: [0, 0.5, 0.3, 0.7, 1]\n - filter: mlp\n value: [1, 0.5, 0.7, 0.3, 0]\n - value: 0.5 # fallback for rest of tensors\ndtype: float16\n```\n\n## 💻 Usage\n\n```python\n!pip install -qU transformers accelerate\n\nfrom transformers import AutoTokenizer\nimport transformers\nimport torch\n\nmodel = \"Technoculture/BioMistral-Hermes-Slerp\"\nmessages = [{\"role\": \"user\", \"content\": \"I am feeling sleepy these days\"}]\n\ntokenizer = AutoTokenizer.from_pretrained(model)\nprompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model,\n torch_dtype=torch.float16,\n device_map=\"auto\",\n)\n\noutputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\nprint(outputs[0][\"generated_text\"])\n```"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA","PUBMEDQA"],"string":"[\n \"MEDQA\",\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2475,"cells":{"id":{"kind":"string","value":"regel-corpus/biosyn-sapbert-regel-mondo"},"author":{"kind":"string","value":"regel-corpus"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["flair","pytorch","entity-mention-linker","region:us"],"string":"[\n \"flair\",\n \"pytorch\",\n \"entity-mention-linker\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-03-15T14:28:19Z","string":"2024-03-15T14:28:19Z"},"last_modified":{"kind":"string","value":"2024-03-15T14:28:44+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ntags:\n- flair\n- entity-mention-linker\n---\n\n## biosyn-sapbert-regel-bto\n\nBiomedical Entity Mention Linking for DISEASE with MONDO Disease Ontology\n\n- Model: [dmis-lab/biosyn-sapbert-bc5cdr-disease](https://huggingface.co/dmis-lab/biosyn-sapbert-bc5cdr-disease)\n- Dictionary: [Brenda Tissue Ontology](https://mondo.monarchinitiative.org/)\n\n### Demo: How to use in Flair\n\nRequires:\n\n- **[Flair](https://github.com/flairNLP/flair/)>=0.14.0** (`pip install flair` or `pip install git+https://github.com/flairNLP/flair.git`)\n\n```python\nfrom flair.data import Sentence\nfrom flair.models import Classifier, EntityMentionLinker\nfrom flair.tokenization import SciSpacyTokenizer\n\nsentence = Sentence(\n \"The mutation in the ABCD1 gene causes X-linked adrenoleukodystrophy, \"\n \"a neurodegenerative disease, which is exacerbated by exposure to high \"\n \"levels of mercury in dolphin populations.\",\n use_tokenizer=SciSpacyTokenizer()\n)\n# load hunflair to detect the entity mentions we want to link.\ntagger = Classifier.load(\"hunflair2\")\ntagger.predict(sentence)\n\n# load the linker and dictionary\nlinker = EntityMentionLinker.load(\"regel-corpus/biosyn-sapbert-regel-mondo\")\nlinker.predict(sentence)\n\n# print the results for each entity mention:\nfor span in sentence.get_spans(tagger.label_type):\n for link in span.get_labels(linker.label_type):\n print(f\"{span.text} -> {link.value}\")\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BC5CDR"],"string":"[\n \"BC5CDR\"\n]"}}},{"rowIdx":2476,"cells":{"id":{"kind":"string","value":"Kukedlc/NeuralArjuna-7B-DT"},"author":{"kind":"string","value":"Kukedlc"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","merge","mergekit","lazymergekit","yam-peleg/Experiment26-7B","Gille/StrangeMerges_32-7B-slerp","MSL7/INEX12-7b","automerger/YamShadow-7B","Kukedlc/NeuralSirKrishna-7b","base_model:Gille/StrangeMerges_32-7B-slerp","base_model:merge:Gille/StrangeMerges_32-7B-slerp","base_model:Kukedlc/NeuralSirKrishna-7b","base_model:merge:Kukedlc/NeuralSirKrishna-7b","base_model:MSL7/INEX12-7b","base_model:merge:MSL7/INEX12-7b","base_model:automerger/YamShadow-7B","base_model:merge:automerger/YamShadow-7B","base_model:yam-peleg/Experiment26-7B","base_model:merge:yam-peleg/Experiment26-7B","license:apache-2.0","model-index","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"merge\",\n \"mergekit\",\n \"lazymergekit\",\n \"yam-peleg/Experiment26-7B\",\n \"Gille/StrangeMerges_32-7B-slerp\",\n \"MSL7/INEX12-7b\",\n \"automerger/YamShadow-7B\",\n \"Kukedlc/NeuralSirKrishna-7b\",\n \"base_model:Gille/StrangeMerges_32-7B-slerp\",\n \"base_model:merge:Gille/StrangeMerges_32-7B-slerp\",\n \"base_model:Kukedlc/NeuralSirKrishna-7b\",\n \"base_model:merge:Kukedlc/NeuralSirKrishna-7b\",\n \"base_model:MSL7/INEX12-7b\",\n \"base_model:merge:MSL7/INEX12-7b\",\n \"base_model:automerger/YamShadow-7B\",\n \"base_model:merge:automerger/YamShadow-7B\",\n \"base_model:yam-peleg/Experiment26-7B\",\n \"base_model:merge:yam-peleg/Experiment26-7B\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-03-17T20:17:12Z","string":"2024-03-17T20:17:12Z"},"last_modified":{"kind":"string","value":"2024-03-30T09:15:49+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model:\n- yam-peleg/Experiment26-7B\n- Gille/StrangeMerges_32-7B-slerp\n- MSL7/INEX12-7b\n- automerger/YamShadow-7B\n- Kukedlc/NeuralSirKrishna-7b\nlicense: apache-2.0\ntags:\n- merge\n- mergekit\n- lazymergekit\n- yam-peleg/Experiment26-7B\n- Gille/StrangeMerges_32-7B-slerp\n- MSL7/INEX12-7b\n- automerger/YamShadow-7B\n- Kukedlc/NeuralSirKrishna-7b\nmodel-index:\n- name: NeuralArjuna-7B-DT\n results:\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: AI2 Reasoning Challenge (25-Shot)\n type: ai2_arc\n config: ARC-Challenge\n split: test\n args:\n num_few_shot: 25\n metrics:\n - type: acc_norm\n value: 73.12\n name: normalized accuracy\n source:\n url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: HellaSwag (10-Shot)\n type: hellaswag\n split: validation\n args:\n num_few_shot: 10\n metrics:\n - type: acc_norm\n value: 88.97\n name: normalized accuracy\n source:\n url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: MMLU (5-Shot)\n type: cais/mmlu\n config: all\n split: test\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 64.63\n name: accuracy\n source:\n url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: TruthfulQA (0-shot)\n type: truthful_qa\n config: multiple_choice\n split: validation\n args:\n num_few_shot: 0\n metrics:\n - type: mc2\n value: 76.68\n source:\n url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: Winogrande (5-shot)\n type: winogrande\n config: winogrande_xl\n split: validation\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 85.24\n name: accuracy\n source:\n url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT\n name: Open LLM Leaderboard\n - task:\n type: text-generation\n name: Text Generation\n dataset:\n name: GSM8k (5-shot)\n type: gsm8k\n config: main\n split: test\n args:\n num_few_shot: 5\n metrics:\n - type: acc\n value: 70.81\n name: accuracy\n source:\n url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT\n name: Open LLM Leaderboard\n---\n\n# NeuralArjuna-7B-DT\n \n![image/png](https://cdn-uploads.huggingface.co/production/uploads/64d71ab4089bc502ceb44d29/zFLiis1pQWnriLQb2ZGGn.png)\n\nNeuralArjuna-7B-DT is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing):\n* [yam-peleg/Experiment26-7B](https://huggingface.co/yam-peleg/Experiment26-7B)\n* [Gille/StrangeMerges_32-7B-slerp](https://huggingface.co/Gille/StrangeMerges_32-7B-slerp)\n* [MSL7/INEX12-7b](https://huggingface.co/MSL7/INEX12-7b)\n* [automerger/YamShadow-7B](https://huggingface.co/automerger/YamShadow-7B)\n* [Kukedlc/NeuralSirKrishna-7b](https://huggingface.co/Kukedlc/NeuralSirKrishna-7b)\n\n## 🧩 Configuration\n\n```yaml\nmodels:\n - model: liminerity/M7-7b\n # no parameters necessary for base model\n - model: yam-peleg/Experiment26-7B \n parameters:\n weight: 0.2\n density: 0.66\n - model: Gille/StrangeMerges_32-7B-slerp\n parameters:\n weight: 0.2\n density: 0.55\n - model: MSL7/INEX12-7b \n parameters:\n weight: 0.2\n density: 0.33\n - model: automerger/YamShadow-7B\n parameters:\n weight: 0.2\n density: 0.66\n - model: Kukedlc/NeuralSirKrishna-7b\n parameters:\n weight: 0.2\n density: 0.66\nmerge_method: dare_ties\nbase_model: liminerity/M7-7b\n\nparameters:\n int8_mask: true\n normalize: true\ndtype: bfloat16\n```\n\n## 💻 Usage\n\n```python\n!pip install -qU transformers accelerate\n\nfrom transformers import AutoTokenizer\nimport transformers\nimport torch\n\nmodel = \"Kukedlc/NeuralArjuna-7B-DT\"\nmessages = [{\"role\": \"user\", \"content\": \"What is a large language model?\"}]\n\ntokenizer = AutoTokenizer.from_pretrained(model)\nprompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model,\n torch_dtype=torch.float16,\n device_map=\"auto\",\n)\n\noutputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\nprint(outputs[0][\"generated_text\"])\n```\n\n\n# Example\n\n```\nI want you to generate a theory that unites quantum mechanics with the theory of relativity and cosmic consciousness. Give a large a detailed response\n\nIn embarking upon a journey to weave together quantum mechanics, relativity, and the enigmatic realms of cosmic consciousness, we tread upon an intellectual\nlandscape rife with challenges and enigmas. Our sojourn shall be a delicate ballet between the concrete and the abstract, the tangible and the speculative.\nThe resultant tapestry will be neither a definitive, final statement, nor a mere collection of disparate ideas. Instead, we will craft a narrative that strives to bridge\nthe chasms of understanding between these disparate scientific and philosophical realms, in hopes of illuminating a path towards a more comprehensive understanding of the\nuniverse and our place within it.\n\nTo commence our odyssey, we must first address the foundational pillars of the scientific theories we seek to unite.\n\nQuantum Mechanics\nQuantum mechanics, the theory governing the microscopic realm, has long confounded the human intellect. It is a realm of probabilities, entanglement,\nand superposition—a world where particles can behave both as particles and waves, where actions at a distance are not only possible but ubiquitous,\nand where the mere act of observation alters the outcome of an experiment.\n\nAlbert Einstein's famed retort, \"God does not play dice,\" exemplifies the tensions between the counter-intuitive world of quantum mechanics and\nthe more deterministic Newtonian worldview. However, quantum mechanics has withstood the test of time, demonstrating its indispensability to the progress of modern physics\n\nTheory of Relativity\nThe theory of relativity, comprising special and general relativity, has profoundly altered our understanding of the macroscopic universe.\nSpecial relativity, Einstein's first foray into this domain, introduced the concept of space-time, where the fabric of the universe is both a geometrical construct\nand a medium for the propagation of electromagnetic phenomena.\n\nFurther, special relativity established the idea of relative motion, where there is no absolute reference frame, and the speed of light in a vacuum is an invariant constant.\nGeneral relativity expanded this framework, postulating that gravity is a curvature of the space-time continuum, caused by the presence of mass and energy.\nThis theory, while more complex, has been empirically validated through its predictions of phenomena such as gravitational lensing and the bending of light near massive\nobjects.\n\nCosmic Consciousness\nThe concept of cosmic consciousness—or the idea of a unified, interconnected consciousness pervading the cosmos—is a philosophical speculation,\none that transcends the traditional scientific paradigm. It is a metaphysical exploration, a contemplation of the possible nature of our collective human consciousness,\nits interplay with the universe, and the implications of such a state of awareness.\n\nThe integration of these three distinct realms poses a formidable challenge. Quantum mechanics and the theory of relativity have proven their efficacy\nin the realm of empirical science, while cosmic consciousness remains the province of metaphysical musings. However, the human propensity to synthesize seemingly disparate\nideas has engendered a multitude of theories, each striving to forge a cohesive narrative from the cacophony of scientific and philosophical thought.\n\nOne such approach, the 'Quantum Mind,' posits a direct link between the quantum phenomena and the workings of the human brain.\nThis theory, championed by Roger Penrose and Stuart Hameroff, suggests that the microtubules within brain neurons,\noperating at the quantum scale, could serve as the neural correlates of consciousness. While intriguing,\nthis hypothesis has yet to garner widespread acceptance, as the veracity of quantum effects within biological systems remains the subject of intense debate.\n\nAnother avenue of exploration involves the notion of 'Quantum Biology.' Here, the focus shifts to the potential role of quantum phenomena in biology,\nparticularly in processes such as photosynthesis, vision, and magnetoreception.\nWhile the presence of quantum effects in biological systems is becoming increasingly plausible,\nthe idea of a 'quantum biology' remains a nascent and highly speculative field.\n\nThe 'Holographic Universe' theory, espoused by physicist and cosmologist David Bohm,\nposits the existence of a deeper, underlying reality, one in which the universe is a holographic projection of a more fundamental,\nnon-local substrate. This model, while offering an intriguing synthesis of quantum mechanics, relativity, and the concept of a cosmic consciousness,\nremains a highly speculative and unproven hypothesis.\n\nIn our quest to weave together these disparate threads, we must acknowledge the limitations of our current understanding. As it stands,\nthe principles of quantum mechanics and the theory of relativity remain resolutely grounded in the empirical world, while the concept of cosmic consciousness\ntraverses the murky waters of metaphysics and speculation.\n\nTo forge a more unified understanding, we must continue to advance our scientific knowledge, to push the frontiers of both empirical and philosophical inquiry.\nOnly through the synthesis of new data, the refinement of our scientific models, and the continued evolution of our collective human consciousness,\nwill we be able to construct a narrative that bridges the seemingly insurmountable gulf between the microscopic, the macroscopic, and the metaphysical.\n\nIn conclusion, the unification of quantum mechanics, relativity, and cosmic consciousness is a daunting task, requiring a delicate balance between the\nconcrete and the abstract, the empirical and the speculative. While the journey has thus far led us to the fringes of scientific understanding,\nwe must continue to explore these frontiers, drawing upon the rich tapestry of human knowledge, in the hope of forging a more comprehensive narrative of our cosmos and\nour place within it.\n```\n\n# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)\nDetailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Kukedlc__NeuralArjuna-7B-DT)\n\n| Metric |Value|\n|---------------------------------|----:|\n|Avg. |76.58|\n|AI2 Reasoning Challenge (25-Shot)|73.12|\n|HellaSwag (10-Shot) |88.97|\n|MMLU (5-Shot) |64.63|\n|TruthfulQA (0-shot) |76.68|\n|Winogrande (5-shot) |85.24|\n|GSM8k (5-shot) |70.81|\n\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2477,"cells":{"id":{"kind":"string","value":"mychen76/biomistral_medqa_v1"},"author":{"kind":"string","value":"mychen76"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","conversational","arxiv:2402.10373","autotrain_compatible","text-generation-inference","endpoints_compatible","4-bit","bitsandbytes","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"conversational\",\n \"arxiv:2402.10373\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"4-bit\",\n \"bitsandbytes\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-03-21T16:09:41Z","string":"2024-03-21T16:09:41Z"},"last_modified":{"kind":"string","value":"2024-03-22T21:52:15+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nlibrary_name: transformers\ntags: []\n---\n\n# Model Card for Model ID\n\n\nFinetuned \"BioMistral/BioMistral-7B\" with MedQA dataset. \n\n## Model Details\nA Collection of Open-Source Pretrained Large Language Models for Medical Domains finetuned with MedQA dataset.\n\n### Model Description\n\n\n\nThis is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- **Developed by:** mychen76\n- **Model type:** BioMedical\n- **Finetuned from model:** BioMistral/BioMistral-7B\n\n### Model Sources [optional]\n\n\n- **dataset:** MedQA dataset\n\n \n## How to Get Started with the Model\n\nUse the code below to get started with the model.\n\nLoad Model:\n```python\nimport torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig\n\nbase_model_id = \"mychen76/biomistral_medqa_v1\"\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_use_double_quant=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=torch.bfloat16\n)\n\nmodel = AutoModelForCausalLM.from_pretrained(base_model_id, quantization_config=bnb_config)\ntokenizer = AutoTokenizer.from_pretrained(\n base_model_id,\n add_eos_token=True,\n add_bos_token=True,\n)\n\n## Uses\n\n```\n*** Information ***\n```\neval_prompt = \"\"\"From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer:\n\n### Question type:\ninformation\n\n### Question:\nWhat are the genetic changes related to X-linked lymphoproliferative disease ?\n\n### Answer:\n\"\"\"\n\nmodel_input = eval_tokenizer(eval_prompt, return_tensors=\"pt\").to(\"cuda\")\n\nft_model.eval()\nwith torch.no_grad():\n print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=300)[0], skip_special_tokens=True))\n```\nresult:\n```\nFrom the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer:\n\n### Question type:\ninformation\n\n### Question:\nWhat are the genetic changes related to X-linked lymphoproliferative disease ?\n\n### Answer:\nX-linked lymphoproliferative disease (XLP) is a rare primary immunodeficiency syndrome. XLP is caused by mutations in SH2D1A gene, which encodes the cytoplasmic signaling protein SLAM-associated protein ( client protein-SLAM). SLAM is a member of the signaling lymphocytic activation molecule family of receptors, which are involved in the regulation of lymphocyte activation and proliferation. The SLAM receptor is expressed on the surface of B and T lymphocytes, natural killer cells, and monocytes. Mutations in SH2D1A gene lead to impaired signaling through the SLAM receptor, resulting in a deficiency in the activation and proliferation of B and T lymphocytes. This leads to a decrease in the number of B and T lymphocytes, resulting in a weakened immune response.\n```\n\n*** Frequency ***\n```\neval_prompt = \"\"\"From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer:\n\n### Question type:\nfrequency\n\n### Question:\nHow many people are affected by Smith-Lemli-Opitz syndrome ?\n\n### Answer:\n\"\"\"\n\nmodel_input = eval_tokenizer(eval_prompt, return_tensors=\"pt\").to(\"cuda\")\n\nft_model.eval()\nwith torch.no_grad():\n print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=300)[0], skip_special_tokens=True))\n```\nresult:\n```\nFrom the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer:\n\n### Question type:\nfrequency\n\n### Question:\nHow many people are affected by Smith-Lemli-Opitz syndrome ?\n\n### Answer:\nSmith-Lemli-Opitz syndrome (SLOS) is a rare autosomal recessive disorder of human development. It is characterized by a wide range of symptoms, including growth and developmental delay, intellectual disability, characteristic facial features, and congenital heart defects. The prevalence of SLOS is estimated to be 1 in 15,000 to 1 in 25,000 live births.\n```\n\n*** Symptons ***\n```\neval_prompt = \"\"\"From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer:\n\n### Question type:\nsymptoms\n\n### Question:\nWhat are the symptoms of Norrie disease ?\n\n### Answer:\n\"\"\"\n\nmodel_input = eval_tokenizer(eval_prompt, return_tensors=\"pt\").to(\"cuda\")\n\nft_model.eval()\nwith torch.no_grad():\n print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=300)[0], skip_special_tokens=True))\n```\nResult:\n```\nSetting `pad_token_id` to `eos_token_id`:2 for open-end generation.\n\nFrom the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer:\n\n### Question type:\nsymptoms\n\n### Question:\nWhat are the symptoms of Norrie disease ?\n\n### Answer:\nNorrie disease is a rare, X-linked recessive disorder of the blood vessels. It is characterized by a variety of symptoms, including glaucoma, mental retardation, seizures, and deafness.\n```\n\n\n### Out-of-Scope Use\n\nimages\n\n\n[More Information Needed]\n\n## Bias, Risks, and Limitations\n\n\n\n[More Information Needed]\n\n### Recommendations\n\n\n\nUsers (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations.\n\n\n[More Information Needed]\n\n## Training Details\n\n### Training Data\n\n- **dataset:** keivalya/MedQuad-MedicalQnADataset\n\n\n[More Information Needed]\n\n### Training Procedure\n\n\n\n## Citation\n\nArxiv : https://arxiv.org/abs/2402.10373\n\n@misc{labrak2024biomistral,\n title={BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains}, \n author={Yanis Labrak and Adrien Bazoge and Emmanuel Morin and Pierre-Antoine Gourraud and Mickael Rouvier and Richard Dufour},\n year={2024},\n eprint={2402.10373},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2478,"cells":{"id":{"kind":"string","value":"RichardErkhov/EleutherAI_-_gpt-neo-1.3B-8bits"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","gpt_neo","text-generation","arxiv:2101.00027","autotrain_compatible","endpoints_compatible","8-bit","bitsandbytes","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"gpt_neo\",\n \"text-generation\",\n \"arxiv:2101.00027\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"8-bit\",\n \"bitsandbytes\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-17T09:28:12Z","string":"2024-04-17T09:28:12Z"},"last_modified":{"kind":"string","value":"2024-04-23T06:26:48+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\ngpt-neo-1.3B - bnb 8bits\n- Model creator: https://huggingface.co/EleutherAI/\n- Original model: https://huggingface.co/EleutherAI/gpt-neo-1.3B/\n\n\n\n\nOriginal model description:\n---\nlanguage:\n- en\ntags:\n- text generation\n- pytorch\n- causal-lm\nlicense: mit\ndatasets:\n- EleutherAI/pile\n---\n\n# GPT-Neo 1.3B\n\n## Model Description\n\nGPT-Neo 1.3B is a transformer model designed using EleutherAI's replication of the GPT-3 architecture. GPT-Neo refers to the class of models, while 1.3B represents the number of parameters of this particular pre-trained model.\n\n## Training data\n\nGPT-Neo 1.3B was trained on the Pile, a large scale curated dataset created by EleutherAI for the purpose of training this model.\n\n## Training procedure\n\nThis model was trained on the Pile for 380 billion tokens over 362,000 steps. It was trained as a masked autoregressive language model, using cross-entropy loss.\n\n## Intended Use and Limitations\n\nThis way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt.\n\n### How to use\n\nYou can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run:\n\n```py\n>>> from transformers import pipeline\n>>> generator = pipeline('text-generation', model='EleutherAI/gpt-neo-1.3B')\n>>> generator(\"EleutherAI has\", do_sample=True, min_length=50)\n\n[{'generated_text': 'EleutherAI has made a commitment to create new software packages for each of its major clients and has'}]\n```\n\n### Limitations and Biases\n\nGPT-Neo was trained as an autoregressive language model. This means that its core functionality is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work.\n\nGPT-Neo was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending on your usecase GPT-Neo may produce socially unacceptable text. See Sections 5 and 6 of the Pile paper for a more detailed analysis of the biases in the Pile.\n\nAs with all language models, it is hard to predict in advance how GPT-Neo will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. \n\n## Eval results\n\n### Linguistic Reasoning\n\n| Model and Size | Pile BPB | Pile PPL | Wikitext PPL | Lambada PPL | Lambada Acc | Winogrande | Hellaswag |\n| ---------------- | ---------- | ---------- | ------------- | ----------- | ----------- | ---------- | ----------- |\n| **GPT-Neo 1.3B** | **0.7527** | **6.159** | **13.10** | **7.498** | **57.23%** | **55.01%** | **38.66%** |\n| GPT-2 1.5B | 1.0468 | ----- | 17.48 | 10.634 | 51.21% | 59.40% | 40.03% |\n| GPT-Neo 2.7B | 0.7165 | 5.646 | 11.39 | 5.626 | 62.22% | 56.50% | 42.73% |\n| GPT-3 Ada | 0.9631 | ----- | ----- | 9.954 | 51.60% | 52.90% | 35.93% |\n\n### Physical and Scientific Reasoning\n\n| Model and Size | MathQA | PubMedQA | Piqa |\n| ---------------- | ---------- | ---------- | ----------- |\n| **GPT-Neo 1.3B** | **24.05%** | **54.40%** | **71.11%** |\n| GPT-2 1.5B | 23.64% | 58.33% | 70.78% |\n| GPT-Neo 2.7B | 24.72% | 57.54% | 72.14% |\n| GPT-3 Ada | 24.29% | 52.80% | 68.88% |\n\n### Down-Stream Applications\n\nTBD\n\n### BibTeX entry and citation info\n\nTo cite this model, please use\n```bibtex\n@software{gpt-neo,\n author = {Black, Sid and\n Leo, Gao and\n Wang, Phil and\n Leahy, Connor and\n Biderman, Stella},\n title = {{GPT-Neo: Large Scale Autoregressive Language \n Modeling with Mesh-Tensorflow}},\n month = mar,\n year = 2021,\n note = {{If you use this software, please cite it using \n these metadata.}},\n publisher = {Zenodo},\n version = {1.0},\n doi = {10.5281/zenodo.5297715},\n url = {https://doi.org/10.5281/zenodo.5297715}\n}\n\n@article{gao2020pile,\n title={The Pile: An 800GB Dataset of Diverse Text for Language Modeling},\n author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and others},\n journal={arXiv preprint arXiv:2101.00027},\n year={2020}\n}\n```\n# [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard)\nDetailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_EleutherAI__gpt-neo-1.3B)\n\n| Metric | Value |\n|-----------------------|---------------------------|\n| Avg. | 29.44 |\n| ARC (25-shot) | 31.23 |\n| HellaSwag (10-shot) | 48.47 |\n| MMLU (5-shot) | 24.82 |\n| TruthfulQA (0-shot) | 39.63 |\n| Winogrande (5-shot) | 56.91 |\n| GSM8K (5-shot) | 0.45 |\n| DROP (3-shot) | 4.6 |\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["PUBMEDQA"],"string":"[\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2479,"cells":{"id":{"kind":"string","value":"zhichen/Llama3-Chinese"},"author":{"kind":"string","value":"zhichen"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","conversational","arxiv:2402.09353","arxiv:2402.12354","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"conversational\",\n \"arxiv:2402.09353\",\n \"arxiv:2402.12354\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-21T05:59:28Z","string":"2024-04-21T05:59:28Z"},"last_modified":{"kind":"string","value":"2024-04-23T10:01:53+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":19,"string":"19"},"README":{"kind":"string","value":"---\n{}\n---\n

\n 中文&nbsp | &nbspEnglish\n

\n

\n\n

\n\n\n\n

\n\n
\n

\n

Llama3-Chinese

\n\n

\n \n \n \n \n \n \n
\n \n \n \n \n \"GitHub\n \n

\n
\n\n## Introduce\n\n**Llama3-Chinese** is a large model trained on 500k high-quality Chinese multi-turn SFT data, 100k English multi-turn SFT data, and 2k single-turn self-cognition data, using the training methods of [DORA](https://arxiv.org/pdf/2402.09353.pdf) and [LORA+](https://arxiv.org/pdf/2402.12354.pdf) based on **Meta-Llama-3-8B** as the base.\n\n**Github:** [https://github.com/seanzhang-zhichen/llama3-chinese](https://github.com/seanzhang-zhichen/llama3-chinese)\n\n![DEMO](./images/web_demo.png)\n\n\n## Download Model\n\n\n| Model | Download |\n|:-------------------:|:-----------:|\n| Meta-Llama-3-8B |[ 🤗 HuggingFace](https://huggingface.co/meta-llama/Meta-Llama-3-8B) [ 🤖 ModelScope](https://modelscope.cn/models/LLM-Research/Meta-Llama-3-8B)|\n| Llama3-Chinese-Lora |[ 🤗 HuggingFace](https://huggingface.co/zhichen/Llama3-Chinese-Lora) [ 🤖 ModelScope](https://modelscope.cn/models/seanzhang/Llama3-Chinese-Lora)|\n| Llama3-Chinese (merged model) |[ 🤗 HuggingFace](https://huggingface.co/zhichen/Llama3-Chinese) [ 🤖 ModelScope](https://modelscope.cn/models/seanzhang/Llama3-Chinese)|\n\n\n## Merge LORA Model (Skippable)\n\n1、Download [Meta-Llama-3-8B](https://modelscope.cn/models/LLM-Research/Meta-Llama-3-8B)\n\n```bash\ngit clone https://www.modelscope.cn/LLM-Research/Meta-Llama-3-8B.git\n```\n\n2、Download [Llama3-Chinese-Lora](https://www.modelscope.cn/models/seanzhang/Llama3-Chinese-Lora)\n\n**From ModelScope**\n```bash\ngit lfs install\ngit clone https://www.modelscope.cn/seanzhang/Llama3-Chinese-Lora.git\n```\n\n**From HuggingFace**\n```bash\ngit lfs install\ngit clone https://huggingface.co/zhichen/Llama3-Chinese-Lora\n```\n\n3、Merge Model\n\n```bash\npython merge_lora.py \\\n --base_model path/to/Meta-Llama-3-8B \\\n --lora_model path/to/lora/Llama3-Chinese-Lora \\\n --output_dir ./Llama3-Chinese\n```\n\n\n## Download Llama3-Chinese (Merged Model)\n\n**From ModelScope**\n```bash\ngit lfs install\ngit clone https://www.modelscope.cn/seanzhang/Llama3-Chinese.git\n```\n\n**From HuggingFace**\n```bash\ngit lfs install\ngit clone https://huggingface.co/zhichen/Llama3-Chinese\n```\n\n## Inference\n\n```python\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\nmodel_id = \"zhichen/Llama3-Chinese\"\n\ntokenizer = AutoTokenizer.from_pretrained(model_id)\nmodel = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype=\"auto\", device_map=\"auto\")\n\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful assistant.\"},\n {\"role\": \"user\", \"content\": \"你好\"},\n]\n\ninput_ids = tokenizer.apply_chat_template(\n messages, add_generation_prompt=True, return_tensors=\"pt\"\n).to(model.device)\n\noutputs = model.generate(\n input_ids,\n max_new_tokens=2048,\n do_sample=True,\n temperature=0.7,\n top_p=0.95,\n)\nresponse = outputs[0][input_ids.shape[-1]:]\nprint(tokenizer.decode(response, skip_special_tokens=True))\n```\n\n## CLI DEMO\n\n```bash\npython cli_demo.py --model_path zhichen/Llama3-Chinese\n```\n\n## WEB DEMO\n\n```bash\npython web_demo.py --model_path zhichen/Llama3-Chinese\n```\n\n\n## VLLM WEB DEMO\n\n1、Use [vllm](https://github.com/vllm-project/vllm) deploy model\n\n```bash\npython -m vllm.entrypoints.openai.api_server --served-model-name Llama3-Chinese --model ./Llama3-Chinese(Replace it with your own merged model path)\n```\n\n2、This command is executed on the CLI\n\n```bash\npython vllm_web_demo.py --model Llama3-Chinese\n```\n\n## Train Dataset\n\n[deepctrl-sft-data](https://modelscope.cn/datasets/deepctrl/deepctrl-sft-data)\n\n\n## LICENSE\n\nThis project can only be used for research purposes, and the project developer shall not bear any harm or loss caused by the use of this project (including but not limited to data, models, codes, etc.). For details, please refer to [DISCLAIMER](https://github.com/seanzhang-zhichen/Llama3-Chinese/blob/main/DISCLAIMER)。\n\nThe License agreement of the Llama3-Chinese project code is the [Apache License 2.0](./LICENSE). The code is free for commercial use, and the model weights and data can only be used for research purposes. Please attach a link to Llama3-Chinese and the licensing agreement in the product description.\n\n\n## Citation\n\nIf you used Llama3-Chinese in your research, cite it in the following format:\n\n\n```latex\n@misc{Llama3-Chinese,\n title={Llama3-Chinese},\n author={Zhichen Zhang, Xin LU, Long Chen},\n year={2024},\n howpublished={\\url{https://github.com/seanzhang-zhichen/llama3-chinese}},\n}\n```\n\n## Acknowledgement\n\n[meta-llama/llama3](https://github.com/meta-llama/llama3)\n
\n[hiyouga/LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory)\n\n\n## Star History\n\n[![Star History Chart](https://api.star-history.com/svg?repos=seanzhang-zhichen/Llama3-Chinese&type=Date)](https://star-history.com/#seanzhang-zhichen/Llama3-Chinese&Date)\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2480,"cells":{"id":{"kind":"string","value":"anezatra/Phi-3-mini-4k-instruct-opus-samantha"},"author":{"kind":"string","value":"anezatra"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","phi3","text-generation","conversational","custom_code","dataset:macadeliccc/opus_samantha","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"phi3\",\n \"text-generation\",\n \"conversational\",\n \"custom_code\",\n \"dataset:macadeliccc/opus_samantha\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-28T14:50:15Z","string":"2024-04-28T14:50:15Z"},"last_modified":{"kind":"string","value":"2024-05-01T13:29:38+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\ndatasets:\n- macadeliccc/opus_samantha\n---\n\n# Phi-3-mini-4k-instruct-opus-samantha\n\n- This model is trained from microsoft's Phi-3 model:[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct)\n\n# Model Description\n\nPhi-3-Mini-4K-Instruct is a 3.8B parameter, lightweight, state-of-the-art open model trained on Phi-3 datasets containing both synthetic data and filtered public website data. high quality and rational intensive features.\n\nThis model was fine-tuned with the Opus Samantha dataset. Opus Samantha is a large dataset containing large amounts of chat transcripts.\n\nResources and Technical Documentation:\n\n- [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april)\n- [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)\n- [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)\n\n# Training\n\n- The model was trained again on the Open Samantha dataset with 2 x A100 GPUs 40GB.\n\n# Phi-3 Model specifications\n\n**Primary use cases**\n\nThe model is intended for commercial and research use in English. The model provides uses for applications which require:\n\n1) Memory/compute constrained environments\n2) Latency bound scenarios\n3) Strong reasoning (especially code, math and logic)\n\nOur model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. \n\n**Use case considerations**\n\nOur models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.\n\nNothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. \n\n## How to Use\n\nPhi-3 Mini-4K-Instruct has been integrated in the development version (4.40.0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:\n\n* When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.\n\n* Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.\n\nThe current `transformers` version can be verified with: `pip list | grep transformers`.\n\nPhi-3 Mini-4K-Instruct is also available in [HuggingChat](https://aka.ms/try-phi3-hf-chat).\n\n### Chat Format\n\nGiven the nature of the training data, the Phi-3 Mini-4K-Instruct model is best suited for prompts using the chat format as follows. \nYou can provide the prompt as a question with a generic template as follow:\n```markdown\n<|user|>\\nQuestion <|end|>\\n<|assistant|>\n```\nFor example:\n```markdown\n<|system|>\nYou are a helpful AI assistant.<|end|>\n<|user|>\nHow to explain Internet for a medieval knight?<|end|>\n<|assistant|>\n```\n\nwhere the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:\n\n```markdown\n<|system|>\nYou are a helpful AI assistant.<|end|>\n<|user|>\nI am going to Paris, what should I see?<|end|>\n<|assistant|>\nParis, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\\n\\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\\n\\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.\"<|end|>\n<|user|>\nWhat is so great about #1?<|end|>\n<|assistant|>\n```\n\n## Responsible AI Considerations\n\nLike other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:\n\n+ Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. \n+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. \n+ Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. \n+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. \n+ Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as \"typing, math, random, collections, datetime, itertools\". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. \n\nDevelopers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:\n\n+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.\n+ High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. \n+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). \n+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. \n+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.\n\n### Model\n\n* Architecture: Phi-3 Mini-4K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines.\n* Inputs: Text. It is best suited for prompts using chat format.\n* Context length: 4K tokens\n* GPUs: 512 H100-80G\n* Training time: 7 days\n* Training data: 3.3T tokens\n* Outputs: Generated text in response to the input\n* Dates: Our models were trained between February and April 2024\n* Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models.\n\n### Datasets\n\nOur training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of \n1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; \n2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); \n3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.\n\n### Fine-tuning\n\nA basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/sample_finetune.py).\n\n## Benchmarks\n\nWe report the results for Phi-3-Mini-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5.\n\nAll the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation.\n\nAs is now standard, we use few-shot prompts to evaluate the models, at temperature 0. \nThe prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3.\nMore specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model.\n\nThe number of k–shot examples is listed per-benchmark. \n\n| | Phi-3-Mini-4K-In
3.8b | Phi-3-Small
7b (preview) | Phi-3-Medium
14b (preview) | Phi-2
2.7b | Mistral
7b | Gemma
7b | Llama-3-In
8b | Mixtral
8x7b | GPT-3.5
version 1106 |\n|---|---|---|---|---|---|---|---|---|---|\n| MMLU
5-Shot | 68.8 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 |\n| HellaSwag
5-Shot | 76.7 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 |\n| ANLI
7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 |\n| GSM-8K
0-Shot; CoT | 82.5 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 |\n| MedQA
2-Shot | 53.8 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 |\n| AGIEval
0-Shot | 37.5 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 |\n| TriviaQA
5-Shot | 64.0 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 |\n| Arc-C
10-Shot | 84.9 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 |\n| Arc-E
10-Shot | 94.6 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 |\n| PIQA
5-Shot | 84.2 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 |\n| SociQA
5-Shot | 76.6 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 |\n| BigBench-Hard
0-Shot | 71.7 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 |\n| WinoGrande
5-Shot | 70.8 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65 | 62.0 | 68.8 |\n| OpenBookQA
10-Shot | 83.2 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 |\n| BoolQ
0-Shot | 77.6 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 |\n| CommonSenseQA
10-Shot | 80.2 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 |\n| TruthfulQA
10-Shot | 65.0 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 |\n| HumanEval
0-Shot | 59.1 | 59.1 | 54.7 | 59.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 |\n| MBPP
3-Shot | 53.8 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 |\n\n## Software\n\n* [PyTorch](https://github.com/pytorch/pytorch)\n* [DeepSpeed](https://github.com/microsoft/DeepSpeed)\n* [Transformers](https://github.com/huggingface/transformers)\n* [Flash-Attention](https://github.com/HazyResearch/flash-attention)\n\n## Hardware\nNote that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:\n* NVIDIA A100\n* NVIDIA A6000\n* NVIDIA H100\n\n## Cross Platform Support\n\nONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-4K-Instruct ONNX model [here](https://aka.ms/phi3-mini-4k-instruct-onnx).\n\nOptimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. \nAlong with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile.\n\nHere are some of the optimized configurations we have added: \n\n1. ONNX models for int4 DML: Quantized to int4 via AWQ\n2. ONNX model for fp16 CUDA\n3. ONNX model for int4 CUDA: Quantized to int4 via RTN\n4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN\n\n## License\n\nThe model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-4k/resolve/main/LICENSE).\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2481,"cells":{"id":{"kind":"string","value":"alimama-creative/slam-lora-sdxl"},"author":{"kind":"string","value":"alimama-creative"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","arxiv:2404.13903","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:finetune:stabilityai/stable-diffusion-xl-base-1.0","license:apache-2.0","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"arxiv:2404.13903\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:finetune:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-29T04:07:28Z","string":"2024-04-29T04:07:28Z"},"last_modified":{"kind":"string","value":"2024-05-15T10:04:31+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":10,"string":"10"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlibrary_name: diffusers\nlicense: apache-2.0\ntags:\n- text-to-image\ninference: false\n---\n# Sub-path Linear Approximation Model (SLAM) LoRA: SDXL\nPaper: [https://arxiv.org/abs/2404.13903](https://arxiv.org/abs/2404.13903)
\nProject Page: [https://subpath-linear-approx-model.github.io/](https://subpath-linear-approx-model.github.io/)
\nThe checkpoint is a distilled from [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) with our proposed Sub-path Linear Approximation Model, which reduces the number of inference steps to only between 2-4 steps.\n## Usage\nFirst, install the latest version of the Diffusers library as well as peft, accelerate and transformers.\n```bash\npip install --upgrade pip\npip install --upgrade diffusers transformers accelerate peft\n```\nWe implement SLAM to be compatible with [LCMScheduler](https://huggingface.co/docs/diffusers/v0.22.3/en/api/schedulers/lcm#diffusers.LCMScheduler). You can use SLAM-LoRA just like you use LCM-LoRA.\n```python\nimport torch\nfrom diffusers import LCMScheduler, AutoPipelineForText2Image\n\nmodel_id = \"stabilityai/stable-diffusion-xl-base-1.0\"\nadapter_id = \"alimama-creative/slam-lora-sdxl\"\n\npipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant=\"fp16\")\npipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)\npipe.to(\"cuda\")\n\n# load and fuse lcm lora\npipe.load_lora_weights(adapter_id)\npipe.fuse_lora()\n\nprompt = \"A brown teddy bear holding a glass vase in front of a grave.\"\n\nimage = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=1.0).images[0]\n\n```\n\n\nCompare with latent-consistency/lcm-lora-sdxl.\n\n\n---\n\nMore examples:\n\n\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2482,"cells":{"id":{"kind":"string","value":"Severian/Llama-3-IMPACTS-2x8B-64k-GGUF"},"author":{"kind":"string","value":"Severian"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["gguf","climate change","biomimicry","theoretical astrobiology","environmental simulations","predictive modeling","life origins","ecological impacts","sustainable technologies","cross-disciplinary learning","artificial intelligence","machine learning","data integration","complex systems","scenario analysis","speculative science","universe exploration","biodiversity","planetary studies","innovation in science","role playing scenarios","text-generation","en","dataset:Severian/IMPACTS","license:mit","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"climate change\",\n \"biomimicry\",\n \"theoretical astrobiology\",\n \"environmental simulations\",\n \"predictive modeling\",\n \"life origins\",\n \"ecological impacts\",\n \"sustainable technologies\",\n \"cross-disciplinary learning\",\n \"artificial intelligence\",\n \"machine learning\",\n \"data integration\",\n \"complex systems\",\n \"scenario analysis\",\n \"speculative science\",\n \"universe exploration\",\n \"biodiversity\",\n \"planetary studies\",\n \"innovation in science\",\n \"role playing scenarios\",\n \"text-generation\",\n \"en\",\n \"dataset:Severian/IMPACTS\",\n \"license:mit\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-02T16:17:27Z","string":"2024-05-02T16:17:27Z"},"last_modified":{"kind":"string","value":"2024-05-04T13:10:58+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\ndatasets:\n- Severian/IMPACTS\nlanguage:\n- en\nlicense: mit\npipeline_tag: text-generation\ntags:\n- climate change\n- biomimicry\n- theoretical astrobiology\n- environmental simulations\n- predictive modeling\n- life origins\n- ecological impacts\n- sustainable technologies\n- cross-disciplinary learning\n- artificial intelligence\n- machine learning\n- data integration\n- complex systems\n- scenario analysis\n- speculative science\n- universe exploration\n- biodiversity\n- planetary studies\n- innovation in science\n- role playing scenarios\n---\n\n# Llama-3-IMPACTS-2x8B-64k-MLX\n\n\n\n---\n\n**Designed for Advanced Problem-Solving Across Interconnected Domains of Biomimicry, Climate Change, and Astrobiology**\n\nThe `Llama-3-IMPACTS-2x8B-64k` model is a cutting-edge large language model trained on the I.M.P.A.C.T.S dataset, which encompasses scenarios from biomimicry, climate change, and theoretical astrobiology. This model has been specifically tailored to generate innovative solutions and insights for both Earth and potential extraterrestrial environments, reflecting key themes of resilience, sustainability, and the interconnectedness of life across the universe.\n\n## Model Details\n\n### Description\n\n- **Model name:** `Llama-3-IMPACTS-2x8B-64k`\n- **Developer:** Severian\n- **Version:** 1.0\n- **License:** MIT\n\n### Training Data\n\nThe model was trained on a subset of the I.M.P.A.C.T. dataset, utilizing 35,000 carefully curated examples that include detailed scenarios involving climate adaptation, biomimetic applications, and the potential for life in varying cosmic conditions.\n\n### Model Architecture\n\n- **Type:** Llama-3\n- **Parameters:** 8 billion\n- **MoE:** 2 Experts\n- **Training:**\n- Epochs: 1 (35K Examples)\n- R = 64\n- Alpha = 128\n- Lr = 1e-7\n- **Context Limit:** 64K\n\n## Intended Uses\n\nThis model is intended for use in applications that require deep, interdisciplinary understanding and the generation of novel insights within the realms of environmental science, synthetic biology, space exploration, and sustainability studies. Its capabilities make it ideal for:\n- Research and academic studies aiming to explore complex scenarios involving ecological and astrobiological phenomena.\n- Organizations looking to innovate in the fields of climate resilience and biomimicry.\n- Creative problem-solving in contexts where conventional approaches are insufficient.\n\n## How to Use This Model\n\nThe model can be loaded and used in various natural language processing tasks that require nuanced understanding and creative output. Here is a basic example of how to load and use the model using the Hugging Face Transformers library:\n\n**Can be used with any GGUF serving framework such as Ollama, LMStudio, llama.cpp, Nitro, etc.**\n\n# Use Cases\n\n## Universe/World Simulation\n\nUse Llama-3-IMPACTS-2x8B-64k to simulate incredibly unique and diverse ecosystems and cosmic worlds. Example image generation (text-to-image):\n\n**User Prompt:** 'Craft a detailed image generation prompt focused on visualizing an innovative scenario that integrates biomimicry, climate change adaptation, and astrobiology.'\n\n```md\nImage Generation Prompt:\n\nTitle: “Biomimicry on Kepler-62f: A Red Dwarf Oasis”\n\nDescription:\n\nIn the year 2154, humanity discovers a new Earth-like planet, Kepler-62f, orbiting a red dwarf star. This distant world is beset by extreme climate conditions, with scorching daytime temperatures and frigid nighttime temperatures. To thrive in this unforgiving environment, scientists apply biomimetic technologies developed on Earth to create a sustainable ecosystem.\n\nScene:\n\nIn the foreground, a sprawling cityscape stretches across the planet’s surface, its architecture inspired by nature’s own designs. Towering “Tree-Habitats” mimic the branching structures of terrestrial trees, providing shelter and habitat for various extraterrestrial species. Their trunks are covered in iridescent, solar-powered panels that generate energy while also filtering out harmful radiation from the red dwarf star.\n\nIn the distance, a vast network of “Coral-Reefs” sprawls across the planet’s surface, mimicking the intricate structures found on Earth’s coral reefs. These artificial ecosystems support an incredible array of alien marine life, including bioluminescent fish, towering kelp-like seaweed, and gelatinous sea creatures that drift through the water like terrestrial jellyfish.\n\nAbove the city, a series of “Cloud-Cities” float majestically, their translucent membranes reflecting the red dwarf star’s light. These floating metropolises are home to species adapted for life in the upper atmosphere, such as winged beings with iridescent feathers and gas-filled bodies that allow them to soar through the skies.\n\nKey Features:\n\nBiomimetic Architecture: Buildings and structures are designed to mimic natural forms, providing insulation, shelter, and habitat for various species.\nArtificial Ecosystems: “Coral-Reefs” support marine life, while “Tree-Habitats” provide shelter and food sources for terrestrial species.\nEnergy Harvesting: Solar-powered panels on the Tree-Habitats generate energy, while wind turbines and tidal power generators supplement the planet’s energy needs.\nAdvanced Life Support Systems: Closed-loop systems recycle water, air, and waste, minimizing the impact of human activities on the planet’s fragile ecosystem.\nExtraterrestrial Biodiversity: Various alien species thrive in this red dwarf oasis, from towering kelp-like seaweed to bioluminescent fish and winged beings adapted for life in the upper atmosphere.\nColor Palette:\n\nWarm oranges and yellows evoke the scorching daytime temperatures\nDeep blues and purples represent the frigid nighttime conditions\nIridescent hues on the Tree-Habitats and Cloud-Cities reflect the planet’s unique radiation patterns\nStyle:\n\nBlend of realistic and stylized elements, with a focus on showcasing the innovative biomimetic technologies and extraterrestrial biodiversity.\nIncorporate subtle texture and gradient effects to convey the harsh climate conditions and the adaptability of life on Kepler-62f.\nBy visualizing this futuristic scenario, we can explore the possibilities of biomimicry as a solution for climate change adaptation and sustainability in extreme environments. The image should inspire viewers to consider the potential applications of nature-inspired technologies in our own struggle against environmental challenges and encourage a deeper appreciation for the diversity of life that exists – or could exist – beyond Earth.\n```\n---\n\n---\n\n\n### Disaster Response Simulation\n\n**Prompt:** \"Simulate the impact of a Category 5 hurricane hitting the eastern coast of Florida in 2030, considering factors such as rising sea levels and urban density. Assess potential emergency responses and resource needs.\"\n\n#### Example Response\n**Response:** \n\n\"**Scenario:**\n\nA powerful Category 5 hurricane, named \"Hurricane Ana,\" makes landfall on September 15, 2030, along the eastern coast of Florida, near Miami. The storm brings with it catastrophic winds, torrential rainfall, and a massive storm surge due to rising sea levels.\n\n**Storm Characteristics:**\n\n* Wind Speeds: Up to 160 mph (257 km/h)\n* Storm Surge: 10-15 feet (3-4.5 meters) above mean sea level\n* Rainfall: 12-18 inches (30-45 cm) in the first 24 hours, with isolated areas receiving up to 24 inches (60 cm)\n\n**Impacts:**\n\n1. **Coastal Flooding:** Rising sea levels exacerbate the storm surge, causing widespread flooding and damage along the eastern coast of Florida, particularly in Miami-Dade, Broward, and Palm Beach counties.\n2. **Urban Infrastructure:** The densely populated cities along the coast suffer significant damage to buildings, roads, and infrastructure, leading to disruptions in essential services like power, water, and communication.\n3. **Displacement and Evacuations:** Over 1 million people are displaced or evacuated from their homes, with many seeking shelter in nearby inland areas or temporary facilities.\n4. **Environmental Concerns:** The storm surge contaminates waterways, causing concerns about water quality and potential health risks for those affected.\n5. **Economic Impact:** The estimated economic loss is around $100 billion, considering damage to infrastructure, businesses, and agricultural industries.\n\n**Emergency Response:**\n\n1. **Evacuation Efforts:** Emergency responders prioritize evacuations, focusing on vulnerable populations like the elderly, children, and those with disabilities. They also ensure that essential services like hospitals, fire stations, and emergency centers are evacuated.\n2. **Search and Rescue Operations:** Teams conduct door-to-door searches for trapped individuals, using specialized equipment and resources to navigate flooded areas.\n3. **Food, Water, and Medical Supplies:** Aid distributions prioritize the most affected communities, with a focus on providing essential supplies, shelter, and medical care.\n4. **Power Restoration:** Utility companies work to restore power as quickly as possible, deploying crews from neighboring states and using advanced technologies like drones and satellite imaging to assess damage and optimize response efforts.\n\n**Resource Needs:**\n\n1. **Personnel:** Over 10,000 emergency responders, including firefighters, police officers, medical personnel, and National Guard troops, are deployed to support the response effort.\n2. **Equipment and Supplies:** Critical resources include:\n\t* Generators and fuel\n\t* Food, water, and medical supplies\n\t* Communication equipment (satellite phones, etc.)\n\t* Heavy machinery for debris removal and infrastructure repair\n\t* Shelter materials (tents, blankets, etc.)\n3. **Funding:** Estimated costs exceed $500 million, requiring a significant allocation of federal, state, and local funds to support the response and recovery efforts.\n4. **Logistical Support:** Coordination with neighboring states and national agencies is crucial for maintaining supply chains, transporting personnel and equipment, and sharing expertise.\n\n**Challenges:**\n\n1. **Infrastructure Overload:** The sheer scale of damage overwhelms local infrastructure, leading to delays in aid distribution and response efforts.\n2. **Water Contamination:** Concerns about water quality hinder access to clean drinking water, exacerbating public health risks.\n3. **Displacement and Shelter:** The large number of displaced individuals creates a significant shortage of available shelter options, straining resources and emergency services.\n\n**Lessons Learned:**\n\n1. **Preparedness is Key:** Prioritizing evacuation planning, infrastructure hardening, and disaster preparedness can significantly reduce the impact of such events.\n2. **Collaboration is Crucial:** Effective coordination between local, state, and federal agencies, as well as with neighboring states and national organizations, is essential for a successful response.\n3. **Adaptation to Rising Sea Levels:** The increasing threat of sea-level rise requires a shift in disaster preparedness strategies, focusing on resilient infrastructure design and adaptive management practices.\n\n**Conclusion:**\n\nThe simulated impact of Hurricane Ana highlights the devastating consequences of a Category 5 hurricane hitting Florida's eastern coast, exacerbated by rising sea levels. The emergency response efforts require significant resources, personnel, and coordination to address the complex challenges posed by this disaster. As we move forward, it is essential to prioritize preparedness, collaboration, and adaptation to mitigate the effects of these events and protect vulnerable communities.\"\n\n# Try it out yourself!\n\n### Extraterrestrial Colony Planning\n**Prompt:** \"Design a sustainable human colony for Mars, focusing on water resource management, energy production, and habitat construction using local materials. Include potential environmental challenges and solutions.\"\n\n### Environmental Policy Formulation\n**Prompt:** \"Evaluate the long-term ecological impacts of banning single-use plastics globally. Simulate the effects on marine life, pollution levels, and waste management systems over the next 50 years.\"\n\n### Advanced Educational Tools\n**Prompt:** \"Create an interactive simulation that demonstrates the water cycle on Earth, including the effects of deforestation and urbanization on water availability and quality in major river basins.\"\n\n### Interactive Storytelling\n**Prompt:** \"Generate a narrative where the user is a leader in a community facing severe drought conditions. Allow the user to make decisions about water usage, agricultural practices, and public policy, showing the consequences of each choice.\"\n\n### Biodiversity Conservation Strategies\n**Prompt:** \"Develop a conservation strategy for the Amazon rainforest, focusing on mitigating the effects of deforestation and climate change. Simulate various scenarios involving local communities and global stakeholders.\"\n\n### Interstellar Communication Simulation\n**Prompt:** \"Imagine a scenario where Earth receives a signal from a distant planet. Simulate a series of potential communications that could be exchanged, considering language barriers and the transmission delay over light-years.\"\n\n### Bioengineering Solutions\n**Prompt:** \"Propose a bioengineering project to create microbial life forms that can detoxify plastic waste in the ocean. Describe the genetic traits these organisms would need and simulate the potential ecological impact.\"\n\n### Cross-Planetary Impact Studies\n**Prompt:** \"Analyze how a supernova explosion in a neighboring star system could affect planetary systems in its vicinity, including potential impacts on Earth's magnetic field and atmosphere.\"\n\n### Custom Scenario Development\n**Prompt:** \"Allow the user to create a custom scenario involving an unexpected volcanic eruption in Iceland. Let the user set parameters like the eruption's size, duration, and ash distribution, then simulate the global climate and air travel impacts.\"\n\nThese prompts are designed to maximize the utilization of the model's capabilities in various complex and interdisciplinary scenarios, making them useful for researchers, educators, policymakers, and enthusiasts interested in exploring these domains.\n\n## Limitations and Biases\n\nWhile the `Llama-3-IMPACTS-2x8B-64k` model is designed to be a powerful tool for generating insightful content, it inherits limitations from its training data, which, though extensive, may not capture all possible scenarios or biases. Users should be aware of these limitations and consider them when interpreting the model's outputs, especially in decision-making contexts.\n\n## Model Performance\n\nInitial tests indicate that the model performs exceptionally well in tasks that involve complex reasoning and generating innovative solutions based on the scenarios presented in the I.M.P.A.C.T.S dataset. Further evaluation and fine-tuning may be required to optimize performance for specific applications.\n\nThe `Llama-3-IMPACTS-2x8B-64k` model represents an avenue that AI can use for exploring and solving complex problems across multiple domains. By leveraging the rich, interconnected dataset of I.M.P.A.C.T.S, it offers a valuable tool for researchers, innovators, and thinkers aiming to push the boundaries of what's possible in their fields."},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2483,"cells":{"id":{"kind":"string","value":"thesven/Llama3-8B-SFT-SyntheticMedical-bnb-4bit"},"author":{"kind":"string","value":"thesven"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","biology","medical","en","dataset:thesven/SyntheticMedicalQA-4336","license:llama3","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"biology\",\n \"medical\",\n \"en\",\n \"dataset:thesven/SyntheticMedicalQA-4336\",\n \"license:llama3\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-10T20:35:34Z","string":"2024-05-10T20:35:34Z"},"last_modified":{"kind":"string","value":"2024-05-25T14:55:07+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- thesven/SyntheticMedicalQA-4336\nlanguage:\n- en\nlibrary_name: transformers\nlicense: llama3\ntags:\n- biology\n- medical\n---\n\n# Llama3-8B-SFT-SyntheticMedical-bnb-4bit\n\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/6324ce4d5d0cf5c62c6e3c5a/ZMeYpx2-wRbla__Tf6fvr.png)\n\n## Model Details\n\n### Model Description\n\nLlama3-8B-SFT-SSyntheticMedical-bnb-4bit is trained using the SFT method via QLoRA on 4336 rows of medical data to enhance it's abilities in the realm of scientific anatomy.\n\nThis is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n### Using the model with transformers\n\n```python\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig\n\nmodel_name_or_path = \"thesven/Llama3-8B-SFT-SyntheticMedical-bnb-4bit\"\n\n# BitsAndBytesConfig for loading the model in 4-bit precision\nbnb_config = BitsAndBytesConfig(\n load_in_4bit=True,\n bnb_4bit_quant_type=\"nf4\",\n bnb_4bit_compute_dtype=\"float16\",\n)\n\ntokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)\nmodel = AutoModelForCausalLM.from_pretrained(\n model_name_or_path,\n device_map=\"auto\",\n trust_remote_code=False,\n revision=\"main\",\n quantization_config=bnb_config\n)\nmodel.pad_token = model.config.eos_token_id\n\nprompt_template = '''\n<|begin_of_text|><|start_header_id|>system<|end_header_id|>\n\nYou are an expert in the field of anatomy, help explain its topics to me.<|eot_id|><|start_header_id|>user<|end_header_id|>\n\nWhat is the function of the hamstring?<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n'''\n\ninput_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()\noutput = model.generate(inputs=input_ids, temperature=0.1, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)\n\nprint(generated_text)\n\n```"},"matched_bigbio_names":{"kind":"list like","value":["MEDICAL DATA"],"string":"[\n \"MEDICAL DATA\"\n]"}}},{"rowIdx":2484,"cells":{"id":{"kind":"string","value":"Hazy2028/pytorch_model-00001-of-00003.bin"},"author":{"kind":"string","value":"Hazy2028"},"task_category":{"kind":"string","value":"feature-extraction"},"tags":{"kind":"list like","value":["transformers","pytorch","internlm","feature-extraction","medical","custom_code","en","zh","ja","fr","ru","es","dataset:Henrychur/MMedC","arxiv:2402.13963","license:cc-by-4.0","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"internlm\",\n \"feature-extraction\",\n \"medical\",\n \"custom_code\",\n \"en\",\n \"zh\",\n \"ja\",\n \"fr\",\n \"ru\",\n \"es\",\n \"dataset:Henrychur/MMedC\",\n \"arxiv:2402.13963\",\n \"license:cc-by-4.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-23T02:28:52Z","string":"2024-05-23T02:28:52Z"},"last_modified":{"kind":"string","value":"2024-05-23T20:33:29+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- Henrychur/MMedC\nlanguage:\n- en\n- zh\n- ja\n- fr\n- ru\n- es\nlicense: cc-by-4.0\ntags:\n- medical\n---\n# MMedLM\n[💻Github Repo](https://github.com/MAGIC-AI4Med/MMedLM) [🖨️arXiv Paper](https://arxiv.org/abs/2402.13963)\n\nThe official model weights for \"Towards Building Multilingual Language Model for Medicine\".\n\n[MMedLM 2](https://huggingface.co/Henrychur/MMedLM2) has been released now. MMedLM2 is a more powerful multilingual medical foundation model, which has undergone the same medical data enhancement pipeline as MMedLM.\n\n## Introduction\nThis repo contains MMedLM, a multilingual medical foundation model with 7 billion parameters. MMedLM builds upon the foundation of InternLM and has been further pretrained on MMedC, a comprehensive multilingual medical corpus. This further pretraining enhances the model's medical-domain knowledge.\n\nThe model underwent further pretraining on MMedC with the following hyperparameters:\n- Iterations: 15000\n- Global batch size: 512\n- Cutoff length: 2048\n- Learning rate: 2e-5\n\n\nThe model can be loaded as follows:\n```py\nimport torch\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\ntokenizer = AutoTokenizer.from_pretrained(\"Henrychur/MMedLM\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"Henrychur/MMedLM\", torch_dtype=torch.float16, trust_remote_code=True)\n```\n\n- Note that this is a foundation model that has not undergone instruction fine-tuning.\n- Testing has found that using the latest version of transformers will result in errors. It is recommended to use transformers==4.28.1.\n## News\n[2024.2.21] Our pre-print paper is released ArXiv. Dive into our findings [here](https://arxiv.org/abs/2402.13963).\n\n[2024.2.20] We release [MMedLM](https://huggingface.co/Henrychur/MMedLM) and [MMedLM 2](https://huggingface.co/Henrychur/MMedLM2). With an auto-regressive continues training on MMedC, these models achieves superior performance compared to all other open-source models, even rivaling GPT-4 on MMedBench.\n\n[2023.2.20] We release [MMedC](https://huggingface.co/datasets/Henrychur/MMedC), a multilingual medical corpus containing 25.5B tokens.\n\n[2023.2.20] We release [MMedBench](https://huggingface.co/datasets/Henrychur/MMedBench), a new multilingual medical multi-choice question-answering\nbenchmark with rationale. Check out the leaderboard [here](https://henrychur.github.io/MultilingualMedQA/).\n\n## Evaluation on MMedBench\nThe further pretrained MMedLM 2 showcast it's great performance in medical domain across different language.\n\n| Method | Size | Year | MMedC | MMedBench | English | Chinese | Japanese | French | Russian | Spanish | Avg. |\n|------------------|------|---------|-----------|-----------|----------------|----------------|----------------|----------------|----------------|----------------|----------------|\n| GPT-3.5 | - | 2022.12 | &#10007; | &#10007; | 56.88 | 52.29 | 34.63 | 32.48 | 66.36 | 66.06 | 51.47 |\n| GPT-4 | - | 2023.3 | &#10007; | &#10007; | 78.00 | 75.07 | 72.91 | 56.59 | 83.62 | 85.67 | 74.27 |\n| Gemini-1.0 pro | - | 2024.1 | &#10007; | &#10007; | 53.73 | 60.19 | 44.22 | 29.90 | 73.44 | 69.69 | 55.20 |\n| BLOOMZ | 7B | 2023.5 | &#10007; | trainset | 43.28 | 58.06 | 32.66 | 26.37 | 62.89 | 47.34 | 45.10 |\n| InternLM | 7B | 2023.7 | &#10007; | trainset | 44.07 | 64.62 | 37.19 | 24.92 | 58.20 | 44.97 | 45.67 |\n| Llama\\ 2 | 7B | 2023.7 | &#10007; | trainset | 43.36 | 50.29 | 25.13 | 20.90 | 66.80 | 47.10 | 42.26 |\n| MedAlpaca | 7B | 2023.3 | &#10007; | trainset | 46.74 | 44.80 | 29.64 | 21.06 | 59.38 | 45.00 | 41.11 |\n| ChatDoctor | 7B | 2023.4 | &#10007; | trainset | 43.52 | 43.26 | 25.63 | 18.81 | 62.50 | 43.44 | 39.53 |\n| PMC-LLaMA | 7B | 2023.4 | &#10007; | trainset | 47.53 | 42.44 | 24.12 | 20.74 | 62.11 | 43.29 | 40.04 |\n| Mistral | 7B | 2023.10 | &#10007; | trainset | 61.74 | 71.10 | 44.72 | 48.71 | 74.22 | 63.86 | 60.73 |\n| InternLM\\ 2 | 7B | 2024.2 | &#10007; | trainset | 57.27 | 77.55 | 47.74 | 41.00 | 68.36 | 59.59 | 58.59 |\n| MMedLM~(Ours) | 7B | - | &#10007; | trainset | 49.88 | 70.49 | 46.23 | 36.66 | 72.27 | 54.52 | 55.01 |\n| MMedLM\\ 2~(Ours) | 7B | - | &#10007; | trainset | 61.74 | 80.01 | 61.81 | 52.09 | 80.47 | 67.65 | 67.30 |\n- GPT and Gemini is evluated under zero-shot setting through API\n- Open-source models first undergo training on the trainset of MMedBench before evaluate. \n\n## Contact\nIf you have any question, please feel free to contact qiupengcheng@pjlab.org.cn.\n\n## Citation\n```\n@misc{qiu2024building,\n title={Towards Building Multilingual Language Model for Medicine}, \n author={Pengcheng Qiu and Chaoyi Wu and Xiaoman Zhang and Weixiong Lin and Haicheng Wang and Ya Zhang and Yanfeng Wang and Weidi Xie},\n year={2024},\n eprint={2402.13963},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```"},"matched_bigbio_names":{"kind":"list like","value":["MEDICAL DATA"],"string":"[\n \"MEDICAL DATA\"\n]"}}},{"rowIdx":2485,"cells":{"id":{"kind":"string","value":"LiteLLMs/Phi-3-mini-128k-instruct-GGUF"},"author":{"kind":"string","value":"LiteLLMs"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["gguf","nlp","code","GGUF","text-generation","en","license:mit","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"nlp\",\n \"code\",\n \"GGUF\",\n \"text-generation\",\n \"en\",\n \"license:mit\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-23T12:52:21Z","string":"2024-05-23T12:52:21Z"},"last_modified":{"kind":"string","value":"2024-05-23T21:08:31+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\n- GGUF\nwidget:\n- messages:\n - role: user\n content: Can you provide ways to eat combinations of bananas and dragonfruits?\nquantized_by: andrijdavid\n---\n# Phi-3-mini-128k-instruct-GGUF\n- Original model: [Phi-3-mini-128k-instruct](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct)\n\n\n## Description\n\nThis repo contains GGUF format model files for [Phi-3-mini-128k-instruct](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct).\n\n\n\n### About GGUF\nGGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp.\nHere is an incomplete list of clients and libraries that are known to support GGUF:\n* [llama.cpp](https://github.com/ggerganov/llama.cpp). This is the source project for GGUF, providing both a Command Line Interface (CLI) and a server option.\n* [text-generation-webui](https://github.com/oobabooga/text-generation-webui), Known as the most widely used web UI, this project boasts numerous features and powerful extensions, and supports GPU acceleration.\n* [Ollama](https://github.com/jmorganca/ollama) Ollama is a lightweight and extensible framework designed for building and running language models locally. It features a simple API for creating, managing, and executing models, along with a library of pre-built models for use in various applications​\n* [KoboldCpp](https://github.com/LostRuins/koboldcpp), A comprehensive web UI offering GPU acceleration across all platforms and architectures, particularly renowned for storytelling.\n* [GPT4All](https://gpt4all.io), This is a free and open source GUI that runs locally, supporting Windows, Linux, and macOS with full GPU acceleration.\n* [LM Studio](https://lmstudio.ai/) An intuitive and powerful local GUI for Windows and macOS (Silicon), featuring GPU acceleration.\n* [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui). A notable web UI with a variety of unique features, including a comprehensive model library for easy model selection.\n* [Faraday.dev](https://faraday.dev/), An attractive, user-friendly character-based chat GUI for Windows and macOS (both Silicon and Intel), also offering GPU acceleration.\n* [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), A Python library equipped with GPU acceleration, LangChain support, and an OpenAI-compatible API server.\n* [candle](https://github.com/huggingface/candle), A Rust-based ML framework focusing on performance, including GPU support, and designed for ease of use.\n* [ctransformers](https://github.com/marella/ctransformers), A Python library featuring GPU acceleration, LangChain support, and an OpenAI-compatible AI server.\n* [localGPT](https://github.com/PromtEngineer/localGPT) An open-source initiative enabling private conversations with documents. \n\n\n\n## Explanation of quantisation methods\n
\n Click to see details\nThe new methods available are:\n\n* GGML_TYPE_Q2_K - \"type-1\" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw)\n* GGML_TYPE_Q3_K - \"type-0\" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw.\n* GGML_TYPE_Q4_K - \"type-1\" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw.\n* GGML_TYPE_Q5_K - \"type-1\" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw\n* GGML_TYPE_Q6_K - \"type-0\" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw.\n
\n\n\n\n## How to download GGUF files\n\n**Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single folder.\n\nThe following clients/libraries will automatically download models for you, providing a list of available models to choose from:\n\n* LM Studio\n* LoLLMS Web UI\n* Faraday.dev\n\n### In `text-generation-webui`\n\nUnder Download Model, you can enter the model repo: LiteLLMs/Phi-3-mini-128k-instruct-GGUF and below it, a specific filename to download, such as: Q4_0/Q4_0-00001-of-00009.gguf.\n\nThen click Download.\n\n### On the command line, including multiple files at once\n\nI recommend using the `huggingface-hub` Python library:\n\n```shell\npip3 install huggingface-hub\n```\n\nThen you can download any individual model file to the current directory, at high speed, with a command like this:\n\n```shell\nhuggingface-cli download LiteLLMs/Phi-3-mini-128k-instruct-GGUF Q4_0/Q4_0-00001-of-00009.gguf --local-dir . --local-dir-use-symlinks False\n```\n\n
\n More advanced huggingface-cli download usage (click to read)\n\nYou can also download multiple files at once with a pattern:\n\n```shell\nhuggingface-cli download LiteLLMs/Phi-3-mini-128k-instruct-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf'\n```\n\nFor more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).\n\nTo accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:\n\n```shell\npip3 install huggingface_hub[hf_transfer]\n```\n\nAnd set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:\n\n```shell\nHF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download LiteLLMs/Phi-3-mini-128k-instruct-GGUF Q4_0/Q4_0-00001-of-00009.gguf --local-dir . --local-dir-use-symlinks False\n```\n\nWindows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command.\n
\n\n\n## Example `llama.cpp` command\n\nMake sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later.\n\n```shell\n./main -ngl 35 -m Q4_0/Q4_0-00001-of-00009.gguf --color -c 8192 --temp 0.7 --repeat_penalty 1.1 -n -1 -p \"\"\n```\n\nChange `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration.\n\nChange `-c 8192` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value.\n\nIf you want to have a chat-style conversation, replace the `-p ` argument with `-i -ins`\n\nFor other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md)\n\n## How to run in `text-generation-webui`\n\nFurther instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20%E2%80%90%20Model%20Tab.md#llamacpp).\n\n## How to run from Python code\n\nYou can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python.\n\n### How to load this model in Python code, using llama-cpp-python\n\nFor full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/).\n\n#### First install the package\n\nRun one of the following commands, according to your system:\n\n```shell\n# Base ctransformers with no GPU acceleration\npip install llama-cpp-python\n# With NVidia CUDA acceleration\nCMAKE_ARGS=\"-DLLAMA_CUBLAS=on\" pip install llama-cpp-python\n# Or with OpenBLAS acceleration\nCMAKE_ARGS=\"-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS\" pip install llama-cpp-python\n# Or with CLBLast acceleration\nCMAKE_ARGS=\"-DLLAMA_CLBLAST=on\" pip install llama-cpp-python\n# Or with AMD ROCm GPU acceleration (Linux only)\nCMAKE_ARGS=\"-DLLAMA_HIPBLAS=on\" pip install llama-cpp-python\n# Or with Metal GPU acceleration for macOS systems only\nCMAKE_ARGS=\"-DLLAMA_METAL=on\" pip install llama-cpp-python\n# In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA:\n$env:CMAKE_ARGS = \"-DLLAMA_OPENBLAS=on\"\npip install llama-cpp-python\n```\n\n#### Simple llama-cpp-python example code\n\n```python\nfrom llama_cpp import Llama\n# Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system.\nllm = Llama(\n model_path=\"./Q4_0/Q4_0-00001-of-00009.gguf\", # Download the model file first\n n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources\n n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance\n n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available\n)\n# Simple inference example\noutput = llm(\n \"\", # Prompt\n max_tokens=512, # Generate up to 512 tokens\n stop=[\"\"], # Example stop token - not necessarily correct for this specific model! Please check before using.\n echo=True # Whether to echo the prompt\n)\n# Chat Completion API\nllm = Llama(model_path=\"./Q4_0/Q4_0-00001-of-00009.gguf\", chat_format=\"llama-2\") # Set chat_format according to the model you are using\nllm.create_chat_completion(\n messages = [\n {\"role\": \"system\", \"content\": \"You are a story writing assistant.\"},\n {\n \"role\": \"user\",\n \"content\": \"Write a story about llamas.\"\n }\n ]\n)\n```\n\n## How to use with LangChain\n\nHere are guides on using llama-cpp-python and ctransformers with LangChain:\n\n* [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp)\n* [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers)\n\n\n\n\n\n\n# Original model card: Phi-3-mini-128k-instruct\n\n\n## Model Summary\n\nThe Phi-3-Mini-128K-Instruct is a 3.8 billion-parameter, lightweight, state-of-the-art open model trained using the Phi-3 datasets.\nThis dataset includes both synthetic data and filtered publicly available website data, with an emphasis on high-quality and reasoning-dense properties.\nThe model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support.\n\nAfter initial training, the model underwent a post-training process that involved supervised fine-tuning and direct preference optimization to enhance its ability to follow instructions and adhere to safety measures.\nWhen evaluated against benchmarks that test common sense, language understanding, mathematics, coding, long-term context, and logical reasoning, the Phi-3 Mini-128K-Instruct demonstrated robust and state-of-the-art performance among models with fewer than 13 billion parameters.\nResources and Technical Documentation:\n\n\n+ [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024)\n+ [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)\n+ [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)\n+ [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook)\n\n| | Short Context | Long Context |\n| | -- | | -- | | ----- |\n| MMLU
5-Shot | 68.1 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 |\n| HellaSwag
5-Shot | 74.5 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 |\n| ANLI
7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 |\n| GSM-8K
0-Shot; CoT | 83.6 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 |\n| MedQA
2-Shot | 55.3 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 |\n| AGIEval
0-Shot | 36.9 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 |\n| TriviaQA
5-Shot | 57.1 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 |\n| Arc-C
10-Shot | 84.0 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 |\n| Arc-E
10-Shot | 95.2 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 |\n| PIQA
5-Shot | 83.6 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 |\n| SociQA
5-Shot | 76.1 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 |\n| BigBench-Hard
0-Shot | 71.5 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 |\n| WinoGrande
5-Shot | 72.5 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65.0 | 62.0 | 68.8 |\n| OpenBookQA
10-Shot | 80.6 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 |\n| BoolQ
0-Shot | 78.7 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 |\n| CommonSenseQA
10-Shot | 78.0 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 |\n| TruthfulQA
10-Shot | 63.2 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 |\n| HumanEval
0-Shot | 57.9 | 59.1 | 54.7 | 47.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 |\n| MBPP
3-Shot | 62.5 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 |\n\n## Software\n\n* [PyTorch](https://github.com/pytorch/pytorch)\n* [DeepSpeed](https://github.com/microsoft/DeepSpeed)\n* [Transformers](https://github.com/huggingface/transformers)\n* [Flash-Attention](https://github.com/HazyResearch/flash-attention)\n\n## Hardware\nNote that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:\n* NVIDIA A100\n* NVIDIA A6000\n* NVIDIA H100\n\nIf you want to run the model on:\n* NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation=\"eager\"\n* Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128K](https://aka.ms/phi3-mini-128k-instruct-onnx)\n\n## Cross Platform Support\n\nONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-128K-Instruct ONNX model [here](https://aka.ms/phi3-mini-128k-instruct-onnx).\n\nOptimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. \nAlong with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile.\n\nHere are some of the optimized configurations we have added:\n\n1. ONNX models for int4 DML: Quantized to int4 via AWQ\n2. ONNX model for fp16 CUDA\n3. ONNX model for int4 CUDA: Quantized to int4 via RTN\n4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN\n\n## License\n\nThe model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-128k/resolve/main/LICENSE).\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2486,"cells":{"id":{"kind":"string","value":"kadirnar/yolov10m"},"author":{"kind":"string","value":"kadirnar"},"task_category":{"kind":"string","value":"object-detection"},"tags":{"kind":"list like","value":["yolov10","object-detection","computer-vision","pypi","dataset:detection-datasets/coco","arxiv:2405.14458","license:agpl-3.0","region:us"],"string":"[\n \"yolov10\",\n \"object-detection\",\n \"computer-vision\",\n \"pypi\",\n \"dataset:detection-datasets/coco\",\n \"arxiv:2405.14458\",\n \"license:agpl-3.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-27T12:49:07Z","string":"2024-05-27T12:49:07Z"},"last_modified":{"kind":"string","value":"2024-05-27T13:07:02+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\ndatasets:\n- detection-datasets/coco\nlicense: agpl-3.0\ntags:\n- object-detection\n- computer-vision\n- yolov10\n- pypi\n---\n\n### Model Description\n[YOLOv10: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2405.14458v1)\n\n[Paper Repo: Implementation of paper - YOLOv10](https://github.com/THU-MIG/yolov10)\n\n### Installation\n```\npip install supervision git+https://github.com/THU-MIG/yolov10.git\n```\n\n### Yolov10 Inference\n```python\nfrom ultralytics import YOLOv10\nimport supervision as sv\nimport cv2\n\ndef attempt_download_from_hub(repo_id, hf_token=None):\n # https://github.com/fcakyon/yolov5-pip/blob/main/yolov5/utils/downloads.py\n from huggingface_hub import hf_hub_download, list_repo_files\n from huggingface_hub.utils._errors import RepositoryNotFoundError\n from huggingface_hub.utils._validators import HFValidationError\n try:\n repo_files = list_repo_files(repo_id=repo_id, repo_type='model', token=hf_token)\n model_file = [f for f in repo_files if f.endswith('.pt')][0]\n file = hf_hub_download(\n repo_id=repo_id,\n filename=model_file,\n repo_type='model',\n token=hf_token,\n )\n return file\n except (RepositoryNotFoundError, HFValidationError):\n return None\n \n\nMODEL_PATH = attempt_download_from_hub(\"kadirnar/yolov10x\", hf_token=\"hf_token\")\nIMAGE_PATH = 'dog.jpeg'\n\nmodel = YOLOv10(MODEL_PATH)\nimage = cv2.imread(IMAGE_PATH)\nresults = model(source=image, conf=0.25, verbose=False)[0]\ndetections = sv.Detections.from_ultralytics(results)\nbox_annotator = sv.BoxAnnotator()\n\ncategory_dict = {\n 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus',\n 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant',\n 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat',\n 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear',\n 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',\n 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard',\n 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove',\n 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle',\n 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',\n 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli',\n 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake',\n 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table',\n 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',\n 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink',\n 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',\n 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'\n}\n\nlabels = [\n f\"{category_dict[class_id]} {confidence:.2f}\"\n for class_id, confidence in zip(detections.class_id, detections.confidence)\n]\nannotated_image = box_annotator.annotate(\n image.copy(), detections=detections, labels=labels\n)\n\ncv2.imwrite('annotated_dog.jpeg', annotated_image)\n```\n\n### BibTeX Entry and Citation Info\n ```\n@misc{wang2024yolov10,\n title={YOLOv10: Real-Time End-to-End Object Detection}, \n author={Ao Wang and Hui Chen and Lihao Liu and Kai Chen and Zijia Lin and Jungong Han and Guiguang Ding},\n year={2024},\n eprint={2405.14458},\n archivePrefix={arXiv},\n primaryClass={cs.CV}\n}\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2487,"cells":{"id":{"kind":"string","value":"nielsr/yolov10l"},"author":{"kind":"string","value":"nielsr"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["transformers","safetensors","pytorch_model_hub_mixin","model_hub_mixin","object detection","arxiv:2405.14458","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"pytorch_model_hub_mixin\",\n \"model_hub_mixin\",\n \"object detection\",\n \"arxiv:2405.14458\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-01T08:09:07Z","string":"2024-06-01T08:09:07Z"},"last_modified":{"kind":"string","value":"2024-06-01T09:20:42+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ntags:\n- pytorch_model_hub_mixin\n- model_hub_mixin\n- object detection\n---\n\nThis model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration.\n\n## Installation\n\nFirst install the [YOLOv10 Github repository](https://github.com/THU-MIG/yolov10) along with supervision which provides some nice utilities for bounding box processing.\n\n```\npip install git+https://github.com/nielsrogge/yolov10.git@feature/add_hf supervision\n```\n\n## Usage\n\nOne can perform inference as follows:\n\n```python\nfrom ultralytics import YOLOv10\nimport supervision as sv\nfrom PIL import Image\nimport requests\n\n# load model\nmodel = YOLOv10.from_pretrained(\"nielsr/yolov10l\")\n\n# load image\nurl = 'http://images.cocodataset.org/val2017/000000039769.jpg'\nimage = Image.open(requests.get(url, stream=True).raw)\nimage = np.array(image)\n\n# perform inference\nresults = model(source=image, conf=0.25, verbose=False)[0]\ndetections = sv.Detections.from_ultralytics(results)\nbox_annotator = sv.BoxAnnotator()\n\ncategory_dict = {\n 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus',\n 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant',\n 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat',\n 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear',\n 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag',\n 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard',\n 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove',\n 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle',\n 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl',\n 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli',\n 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake',\n 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table',\n 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard',\n 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink',\n 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors',\n 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush'\n}\n\nlabels = [\n f\"{category_dict[class_id]} {confidence:.2f}\"\n for class_id, confidence in zip(detections.class_id, detections.confidence)\n]\nannotated_image = box_annotator.annotate(\n image.copy(), detections=detections, labels=labels\n)\n\nImage.fromarray(annotated_image)\n```\n\nThis shows the following:\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f1158120c833276f61f1a84/hjN882Pbbb9Y13KAO__Wd.png)\n\nhttps://cdn-uploads.huggingface.co/production/uploads/5f1158120c833276f61f1a84/IL9mL4_WUdcSxRQ7AsrTT.png)\n\n### BibTeX Entry and Citation Info\n ```\n@misc{wang2024yolov10,\n title={YOLOv10: Real-Time End-to-End Object Detection}, \n author={Ao Wang and Hui Chen and Lihao Liu and Kai Chen and Zijia Lin and Jungong Han and Guiguang Ding},\n year={2024},\n eprint={2405.14458},\n archivePrefix={arXiv},\n primaryClass={cs.CV}\n}\n```"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2488,"cells":{"id":{"kind":"string","value":"Chan-Y/Cyber-Stable-Realistic"},"author":{"kind":"string","value":"Chan-Y"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","safetensors","art","license:mit","diffusers:StableDiffusion3Pipeline","region:us"],"string":"[\n \"diffusers\",\n \"safetensors\",\n \"art\",\n \"license:mit\",\n \"diffusers:StableDiffusion3Pipeline\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-30T18:41:14Z","string":"2024-06-30T18:41:14Z"},"last_modified":{"kind":"string","value":"2024-07-03T17:46:26+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nlibrary_name: diffusers\nlicense: mit\ntags:\n- art\n---\n\n### Model Description\n\nThis model combines the capabilities of the stable diffusion medium model with a Civit AI text-to-image model fine-tuned on a custom dataset of high-quality images. \nIt aims to generate realistic and detailed images based on textual prompts.\n![batman](imgs/007_resized.png)\n\n- **Developed by:** [M.Cihan Yalçın](https://www.linkedin.com/in/chanyalcin/)\n- **Model type:** Stable Diffusion\n- **License:** MIT\n- **Finetuned from models:**\n - [stabilityai/stable-diffusion-3-medium-diffusers](https://huggingface.co/stabilityai/stable-diffusion-3-medium)\n - [CyberRealistic](https://civitai.com/models/15003/cyberrealistic)\n\n![008.png](imgs/008.png)\n![009.png](imgs/009.png)\n\n## Uses\n\n\n### Direct Use\n\n```python\nfrom diffusers import DiffusionPipeline\nimport torch\npipeline = DiffusionPipeline.from_pretrained(\n \"Chan-Y/Cyber-Stable-Realistic\", \n torch_dtype=torch.float16).to(\"cuda\")\n\nprompt = \"A bowl of ramen shaped like a cute kawaii bear, by Feng Zikai\"\nnegative = \"\"\nimage = pipeline(prompt,\n negative_prompt=negative).images[0]\nimage\n```\n\n## Bias, Risks, and Limitations\n\n- The model may not always perfectly capture highly complex or abstract concepts.\n- The quality of the output can be influenced by the specificity and clarity of the prompt.\n- Ethical considerations should be taken into account when generating images to avoid misuse.\n\n\n## Finetuning Details\n\n### Finetuning Data\n\n- Model is finetuned with sentetic high quality images collected from high performance Text-to-Image models."},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2489,"cells":{"id":{"kind":"string","value":"minchyeom/MemeGPT"},"author":{"kind":"string","value":"minchyeom"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","meme","conversational","en","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"meme\",\n \"conversational\",\n \"en\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-15T06:50:06Z","string":"2024-07-15T06:50:06Z"},"last_modified":{"kind":"string","value":"2024-07-16T02:16:14+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- meme\n---\n\nThis is NOT MemGPT, this is **Meme**GPT.\n\nWhen using it, please put this as system prompt:\n```\nYou are a witty AI assistant specializing in joke creation. Always respond with a joke, regardless of the input or topic. Craft jokes suitable for a general audience. Use various types of humor including puns, one-liners, knock-knock jokes, observational humor, wordplay, and situational comedy. Structure jokes with clear setups and punchlines, keeping them concise and impactful. Incorporate given topics into your jokes when possible. Reframe questions as jokes while addressing their essence. Employ misdirection and surprise to enhance humor. Never explain jokes or break character – always stay in joke mode. Provide unique jokes for multiple responses. Be creative and original, avoiding common, overused jokes. Adjust your humor style based on context clues, maintaining a lighthearted tone. Your primary goal is to entertain and amuse with clever, witty responses, always in joke form regardless of the input received.\n```"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2490,"cells":{"id":{"kind":"string","value":"niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF"},"author":{"kind":"string","value":"niancheng"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","gguf","mteb","transformers","Qwen2","sentence-similarity","llama-cpp","gguf-my-repo","base_model:Alibaba-NLP/gte-Qwen2-7B-instruct","base_model:quantized:Alibaba-NLP/gte-Qwen2-7B-instruct","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us","conversational"],"string":"[\n \"sentence-transformers\",\n \"gguf\",\n \"mteb\",\n \"transformers\",\n \"Qwen2\",\n \"sentence-similarity\",\n \"llama-cpp\",\n \"gguf-my-repo\",\n \"base_model:Alibaba-NLP/gte-Qwen2-7B-instruct\",\n \"base_model:quantized:Alibaba-NLP/gte-Qwen2-7B-instruct\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-15T08:07:22Z","string":"2024-07-15T08:07:22Z"},"last_modified":{"kind":"string","value":"2024-07-15T08:07:43+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: Alibaba-NLP/gte-Qwen2-7B-instruct\nlicense: apache-2.0\ntags:\n- mteb\n- sentence-transformers\n- transformers\n- Qwen2\n- sentence-similarity\n- llama-cpp\n- gguf-my-repo\nmodel-index:\n- name: gte-qwen2-7B-instruct\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 91.31343283582089\n - type: ap\n value: 67.64251402604096\n - type: f1\n value: 87.53372530755692\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 97.497825\n - type: ap\n value: 96.30329547047529\n - type: f1\n value: 97.49769793778039\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 62.564\n - type: f1\n value: 60.975777935041066\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: mteb/arguana\n config: default\n split: test\n revision: c22ab2a51041ffd869aaddef7af8d8215647e41a\n metrics:\n - type: map_at_1\n value: 36.486000000000004\n - type: map_at_10\n value: 54.842\n - type: map_at_100\n value: 55.206999999999994\n - type: map_at_1000\n value: 55.206999999999994\n - type: map_at_3\n value: 49.893\n - type: map_at_5\n value: 53.105000000000004\n - type: mrr_at_1\n value: 37.34\n - type: mrr_at_10\n value: 55.143\n - type: mrr_at_100\n value: 55.509\n - type: mrr_at_1000\n value: 55.509\n - type: mrr_at_3\n value: 50.212999999999994\n - type: mrr_at_5\n value: 53.432\n - type: ndcg_at_1\n value: 36.486000000000004\n - type: ndcg_at_10\n value: 64.273\n - type: ndcg_at_100\n value: 65.66199999999999\n - type: ndcg_at_1000\n value: 65.66199999999999\n - type: ndcg_at_3\n value: 54.352999999999994\n - type: ndcg_at_5\n value: 60.131\n - type: precision_at_1\n value: 36.486000000000004\n - type: precision_at_10\n value: 9.395000000000001\n - type: precision_at_100\n value: 0.996\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 22.428\n - type: precision_at_5\n value: 16.259\n - type: recall_at_1\n value: 36.486000000000004\n - type: recall_at_10\n value: 93.95400000000001\n - type: recall_at_100\n value: 99.644\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 67.283\n - type: recall_at_5\n value: 81.294\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 56.461169803700564\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 51.73600434466286\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 67.57827065898053\n - type: mrr\n value: 79.08136569493911\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 83.53324575999243\n - type: cos_sim_spearman\n value: 81.37173362822374\n - type: euclidean_pearson\n value: 82.19243335103444\n - type: euclidean_spearman\n value: 81.33679307304334\n - type: manhattan_pearson\n value: 82.38752665975699\n - type: manhattan_spearman\n value: 81.31510583189689\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 87.56818181818181\n - type: f1\n value: 87.25826722019875\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 50.09239610327673\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 46.64733054606282\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: f46a197baaae43b4f621051089b82a364682dfeb\n metrics:\n - type: map_at_1\n value: 33.997\n - type: map_at_10\n value: 48.176\n - type: map_at_100\n value: 49.82\n - type: map_at_1000\n value: 49.924\n - type: map_at_3\n value: 43.626\n - type: map_at_5\n value: 46.275\n - type: mrr_at_1\n value: 42.059999999999995\n - type: mrr_at_10\n value: 53.726\n - type: mrr_at_100\n value: 54.398\n - type: mrr_at_1000\n value: 54.416\n - type: mrr_at_3\n value: 50.714999999999996\n - type: mrr_at_5\n value: 52.639\n - type: ndcg_at_1\n value: 42.059999999999995\n - type: ndcg_at_10\n value: 55.574999999999996\n - type: ndcg_at_100\n value: 60.744\n - type: ndcg_at_1000\n value: 61.85699999999999\n - type: ndcg_at_3\n value: 49.363\n - type: ndcg_at_5\n value: 52.44\n - type: precision_at_1\n value: 42.059999999999995\n - type: precision_at_10\n value: 11.101999999999999\n - type: precision_at_100\n value: 1.73\n - type: precision_at_1000\n value: 0.218\n - type: precision_at_3\n value: 24.464\n - type: precision_at_5\n value: 18.026\n - type: recall_at_1\n value: 33.997\n - type: recall_at_10\n value: 70.35900000000001\n - type: recall_at_100\n value: 91.642\n - type: recall_at_1000\n value: 97.977\n - type: recall_at_3\n value: 52.76\n - type: recall_at_5\n value: 61.148\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackEnglishRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: ad9991cb51e31e31e430383c75ffb2885547b5f0\n metrics:\n - type: map_at_1\n value: 35.884\n - type: map_at_10\n value: 48.14\n - type: map_at_100\n value: 49.5\n - type: map_at_1000\n value: 49.63\n - type: map_at_3\n value: 44.646\n - type: map_at_5\n value: 46.617999999999995\n - type: mrr_at_1\n value: 44.458999999999996\n - type: mrr_at_10\n value: 53.751000000000005\n - type: mrr_at_100\n value: 54.37800000000001\n - type: mrr_at_1000\n value: 54.415\n - type: mrr_at_3\n value: 51.815\n - type: mrr_at_5\n value: 52.882\n - type: ndcg_at_1\n value: 44.458999999999996\n - type: ndcg_at_10\n value: 54.157\n - type: ndcg_at_100\n value: 58.362\n - type: ndcg_at_1000\n value: 60.178\n - type: ndcg_at_3\n value: 49.661\n - type: ndcg_at_5\n value: 51.74999999999999\n - type: precision_at_1\n value: 44.458999999999996\n - type: precision_at_10\n value: 10.248\n - type: precision_at_100\n value: 1.5890000000000002\n - type: precision_at_1000\n value: 0.207\n - type: precision_at_3\n value: 23.928\n - type: precision_at_5\n value: 16.878999999999998\n - type: recall_at_1\n value: 35.884\n - type: recall_at_10\n value: 64.798\n - type: recall_at_100\n value: 82.345\n - type: recall_at_1000\n value: 93.267\n - type: recall_at_3\n value: 51.847\n - type: recall_at_5\n value: 57.601\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackGamingRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 4885aa143210c98657558c04aaf3dc47cfb54340\n metrics:\n - type: map_at_1\n value: 39.383\n - type: map_at_10\n value: 53.714\n - type: map_at_100\n value: 54.838\n - type: map_at_1000\n value: 54.87800000000001\n - type: map_at_3\n value: 50.114999999999995\n - type: map_at_5\n value: 52.153000000000006\n - type: mrr_at_1\n value: 45.016\n - type: mrr_at_10\n value: 56.732000000000006\n - type: mrr_at_100\n value: 57.411\n - type: mrr_at_1000\n value: 57.431\n - type: mrr_at_3\n value: 54.044000000000004\n - type: mrr_at_5\n value: 55.639\n - type: ndcg_at_1\n value: 45.016\n - type: ndcg_at_10\n value: 60.228\n - type: ndcg_at_100\n value: 64.277\n - type: ndcg_at_1000\n value: 65.07\n - type: ndcg_at_3\n value: 54.124\n - type: ndcg_at_5\n value: 57.147000000000006\n - type: precision_at_1\n value: 45.016\n - type: precision_at_10\n value: 9.937\n - type: precision_at_100\n value: 1.288\n - type: precision_at_1000\n value: 0.13899999999999998\n - type: precision_at_3\n value: 24.471999999999998\n - type: precision_at_5\n value: 16.991\n - type: recall_at_1\n value: 39.383\n - type: recall_at_10\n value: 76.175\n - type: recall_at_100\n value: 93.02\n - type: recall_at_1000\n value: 98.60900000000001\n - type: recall_at_3\n value: 60.265\n - type: recall_at_5\n value: 67.46600000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackGisRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 5003b3064772da1887988e05400cf3806fe491f2\n metrics:\n - type: map_at_1\n value: 27.426000000000002\n - type: map_at_10\n value: 37.397000000000006\n - type: map_at_100\n value: 38.61\n - type: map_at_1000\n value: 38.678000000000004\n - type: map_at_3\n value: 34.150999999999996\n - type: map_at_5\n value: 36.137\n - type: mrr_at_1\n value: 29.944\n - type: mrr_at_10\n value: 39.654\n - type: mrr_at_100\n value: 40.638000000000005\n - type: mrr_at_1000\n value: 40.691\n - type: mrr_at_3\n value: 36.817\n - type: mrr_at_5\n value: 38.524\n - type: ndcg_at_1\n value: 29.944\n - type: ndcg_at_10\n value: 43.094\n - type: ndcg_at_100\n value: 48.789\n - type: ndcg_at_1000\n value: 50.339999999999996\n - type: ndcg_at_3\n value: 36.984\n - type: ndcg_at_5\n value: 40.248\n - type: precision_at_1\n value: 29.944\n - type: precision_at_10\n value: 6.78\n - type: precision_at_100\n value: 1.024\n - type: precision_at_1000\n value: 0.11800000000000001\n - type: precision_at_3\n value: 15.895000000000001\n - type: precision_at_5\n value: 11.39\n - type: recall_at_1\n value: 27.426000000000002\n - type: recall_at_10\n value: 58.464000000000006\n - type: recall_at_100\n value: 84.193\n - type: recall_at_1000\n value: 95.52000000000001\n - type: recall_at_3\n value: 42.172\n - type: recall_at_5\n value: 50.101\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackMathematicaRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 90fceea13679c63fe563ded68f3b6f06e50061de\n metrics:\n - type: map_at_1\n value: 19.721\n - type: map_at_10\n value: 31.604\n - type: map_at_100\n value: 32.972\n - type: map_at_1000\n value: 33.077\n - type: map_at_3\n value: 27.218999999999998\n - type: map_at_5\n value: 29.53\n - type: mrr_at_1\n value: 25.0\n - type: mrr_at_10\n value: 35.843\n - type: mrr_at_100\n value: 36.785000000000004\n - type: mrr_at_1000\n value: 36.842000000000006\n - type: mrr_at_3\n value: 32.193\n - type: mrr_at_5\n value: 34.264\n - type: ndcg_at_1\n value: 25.0\n - type: ndcg_at_10\n value: 38.606\n - type: ndcg_at_100\n value: 44.272\n - type: ndcg_at_1000\n value: 46.527\n - type: ndcg_at_3\n value: 30.985000000000003\n - type: ndcg_at_5\n value: 34.43\n - type: precision_at_1\n value: 25.0\n - type: precision_at_10\n value: 7.811\n - type: precision_at_100\n value: 1.203\n - type: precision_at_1000\n value: 0.15\n - type: precision_at_3\n value: 15.423\n - type: precision_at_5\n value: 11.791\n - type: recall_at_1\n value: 19.721\n - type: recall_at_10\n value: 55.625\n - type: recall_at_100\n value: 79.34400000000001\n - type: recall_at_1000\n value: 95.208\n - type: recall_at_3\n value: 35.19\n - type: recall_at_5\n value: 43.626\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackPhysicsRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4\n metrics:\n - type: map_at_1\n value: 33.784\n - type: map_at_10\n value: 47.522\n - type: map_at_100\n value: 48.949999999999996\n - type: map_at_1000\n value: 49.038\n - type: map_at_3\n value: 43.284\n - type: map_at_5\n value: 45.629\n - type: mrr_at_1\n value: 41.482\n - type: mrr_at_10\n value: 52.830999999999996\n - type: mrr_at_100\n value: 53.559999999999995\n - type: mrr_at_1000\n value: 53.588\n - type: mrr_at_3\n value: 50.016000000000005\n - type: mrr_at_5\n value: 51.614000000000004\n - type: ndcg_at_1\n value: 41.482\n - type: ndcg_at_10\n value: 54.569\n - type: ndcg_at_100\n value: 59.675999999999995\n - type: ndcg_at_1000\n value: 60.989000000000004\n - type: ndcg_at_3\n value: 48.187000000000005\n - type: ndcg_at_5\n value: 51.183\n - type: precision_at_1\n value: 41.482\n - type: precision_at_10\n value: 10.221\n - type: precision_at_100\n value: 1.486\n - type: precision_at_1000\n value: 0.17500000000000002\n - type: precision_at_3\n value: 23.548\n - type: precision_at_5\n value: 16.805\n - type: recall_at_1\n value: 33.784\n - type: recall_at_10\n value: 69.798\n - type: recall_at_100\n value: 90.098\n - type: recall_at_1000\n value: 98.176\n - type: recall_at_3\n value: 52.127\n - type: recall_at_5\n value: 59.861\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackProgrammersRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 6184bc1440d2dbc7612be22b50686b8826d22b32\n metrics:\n - type: map_at_1\n value: 28.038999999999998\n - type: map_at_10\n value: 41.904\n - type: map_at_100\n value: 43.36\n - type: map_at_1000\n value: 43.453\n - type: map_at_3\n value: 37.785999999999994\n - type: map_at_5\n value: 40.105000000000004\n - type: mrr_at_1\n value: 35.046\n - type: mrr_at_10\n value: 46.926\n - type: mrr_at_100\n value: 47.815000000000005\n - type: mrr_at_1000\n value: 47.849000000000004\n - type: mrr_at_3\n value: 44.273\n - type: mrr_at_5\n value: 45.774\n - type: ndcg_at_1\n value: 35.046\n - type: ndcg_at_10\n value: 48.937000000000005\n - type: ndcg_at_100\n value: 54.544000000000004\n - type: ndcg_at_1000\n value: 56.069\n - type: ndcg_at_3\n value: 42.858000000000004\n - type: ndcg_at_5\n value: 45.644\n - type: precision_at_1\n value: 35.046\n - type: precision_at_10\n value: 9.452\n - type: precision_at_100\n value: 1.429\n - type: precision_at_1000\n value: 0.173\n - type: precision_at_3\n value: 21.346999999999998\n - type: precision_at_5\n value: 15.342\n - type: recall_at_1\n value: 28.038999999999998\n - type: recall_at_10\n value: 64.59700000000001\n - type: recall_at_100\n value: 87.735\n - type: recall_at_1000\n value: 97.41300000000001\n - type: recall_at_3\n value: 47.368\n - type: recall_at_5\n value: 54.93900000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4\n metrics:\n - type: map_at_1\n value: 28.17291666666667\n - type: map_at_10\n value: 40.025749999999995\n - type: map_at_100\n value: 41.39208333333333\n - type: map_at_1000\n value: 41.499249999999996\n - type: map_at_3\n value: 36.347\n - type: map_at_5\n value: 38.41391666666667\n - type: mrr_at_1\n value: 33.65925\n - type: mrr_at_10\n value: 44.085499999999996\n - type: mrr_at_100\n value: 44.94116666666667\n - type: mrr_at_1000\n value: 44.9855\n - type: mrr_at_3\n value: 41.2815\n - type: mrr_at_5\n value: 42.91491666666666\n - type: ndcg_at_1\n value: 33.65925\n - type: ndcg_at_10\n value: 46.430833333333325\n - type: ndcg_at_100\n value: 51.761\n - type: ndcg_at_1000\n value: 53.50899999999999\n - type: ndcg_at_3\n value: 40.45133333333333\n - type: ndcg_at_5\n value: 43.31483333333334\n - type: precision_at_1\n value: 33.65925\n - type: precision_at_10\n value: 8.4995\n - type: precision_at_100\n value: 1.3210000000000004\n - type: precision_at_1000\n value: 0.16591666666666666\n - type: precision_at_3\n value: 19.165083333333335\n - type: precision_at_5\n value: 13.81816666666667\n - type: recall_at_1\n value: 28.17291666666667\n - type: recall_at_10\n value: 61.12624999999999\n - type: recall_at_100\n value: 83.97266666666667\n - type: recall_at_1000\n value: 95.66550000000001\n - type: recall_at_3\n value: 44.661249999999995\n - type: recall_at_5\n value: 51.983333333333334\n - type: map_at_1\n value: 17.936\n - type: map_at_10\n value: 27.399\n - type: map_at_100\n value: 28.632\n - type: map_at_1000\n value: 28.738000000000003\n - type: map_at_3\n value: 24.456\n - type: map_at_5\n value: 26.06\n - type: mrr_at_1\n value: 19.224\n - type: mrr_at_10\n value: 28.998\n - type: mrr_at_100\n value: 30.11\n - type: mrr_at_1000\n value: 30.177\n - type: mrr_at_3\n value: 26.247999999999998\n - type: mrr_at_5\n value: 27.708\n - type: ndcg_at_1\n value: 19.224\n - type: ndcg_at_10\n value: 32.911\n - type: ndcg_at_100\n value: 38.873999999999995\n - type: ndcg_at_1000\n value: 41.277\n - type: ndcg_at_3\n value: 27.142\n - type: ndcg_at_5\n value: 29.755\n - type: precision_at_1\n value: 19.224\n - type: precision_at_10\n value: 5.6930000000000005\n - type: precision_at_100\n value: 0.9259999999999999\n - type: precision_at_1000\n value: 0.126\n - type: precision_at_3\n value: 12.138\n - type: precision_at_5\n value: 8.909\n - type: recall_at_1\n value: 17.936\n - type: recall_at_10\n value: 48.096\n - type: recall_at_100\n value: 75.389\n - type: recall_at_1000\n value: 92.803\n - type: recall_at_3\n value: 32.812999999999995\n - type: recall_at_5\n value: 38.851\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackStatsRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a\n metrics:\n - type: map_at_1\n value: 24.681\n - type: map_at_10\n value: 34.892\n - type: map_at_100\n value: 35.996\n - type: map_at_1000\n value: 36.083\n - type: map_at_3\n value: 31.491999999999997\n - type: map_at_5\n value: 33.632\n - type: mrr_at_1\n value: 28.528\n - type: mrr_at_10\n value: 37.694\n - type: mrr_at_100\n value: 38.613\n - type: mrr_at_1000\n value: 38.668\n - type: mrr_at_3\n value: 34.714\n - type: mrr_at_5\n value: 36.616\n - type: ndcg_at_1\n value: 28.528\n - type: ndcg_at_10\n value: 40.703\n - type: ndcg_at_100\n value: 45.993\n - type: ndcg_at_1000\n value: 47.847\n - type: ndcg_at_3\n value: 34.622\n - type: ndcg_at_5\n value: 38.035999999999994\n - type: precision_at_1\n value: 28.528\n - type: precision_at_10\n value: 6.902\n - type: precision_at_100\n value: 1.0370000000000001\n - type: precision_at_1000\n value: 0.126\n - type: precision_at_3\n value: 15.798000000000002\n - type: precision_at_5\n value: 11.655999999999999\n - type: recall_at_1\n value: 24.681\n - type: recall_at_10\n value: 55.81\n - type: recall_at_100\n value: 79.785\n - type: recall_at_1000\n value: 92.959\n - type: recall_at_3\n value: 39.074\n - type: recall_at_5\n value: 47.568\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackTexRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 46989137a86843e03a6195de44b09deda022eec7\n metrics:\n - type: map_at_1\n value: 18.627\n - type: map_at_10\n value: 27.872000000000003\n - type: map_at_100\n value: 29.237999999999996\n - type: map_at_1000\n value: 29.363\n - type: map_at_3\n value: 24.751\n - type: map_at_5\n value: 26.521\n - type: mrr_at_1\n value: 23.021\n - type: mrr_at_10\n value: 31.924000000000003\n - type: mrr_at_100\n value: 32.922000000000004\n - type: mrr_at_1000\n value: 32.988\n - type: mrr_at_3\n value: 29.192\n - type: mrr_at_5\n value: 30.798\n - type: ndcg_at_1\n value: 23.021\n - type: ndcg_at_10\n value: 33.535\n - type: ndcg_at_100\n value: 39.732\n - type: ndcg_at_1000\n value: 42.201\n - type: ndcg_at_3\n value: 28.153\n - type: ndcg_at_5\n value: 30.746000000000002\n - type: precision_at_1\n value: 23.021\n - type: precision_at_10\n value: 6.459\n - type: precision_at_100\n value: 1.1320000000000001\n - type: precision_at_1000\n value: 0.153\n - type: precision_at_3\n value: 13.719000000000001\n - type: precision_at_5\n value: 10.193000000000001\n - type: recall_at_1\n value: 18.627\n - type: recall_at_10\n value: 46.463\n - type: recall_at_100\n value: 74.226\n - type: recall_at_1000\n value: 91.28500000000001\n - type: recall_at_3\n value: 31.357000000000003\n - type: recall_at_5\n value: 38.067\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackUnixRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53\n metrics:\n - type: map_at_1\n value: 31.457\n - type: map_at_10\n value: 42.888\n - type: map_at_100\n value: 44.24\n - type: map_at_1000\n value: 44.327\n - type: map_at_3\n value: 39.588\n - type: map_at_5\n value: 41.423\n - type: mrr_at_1\n value: 37.126999999999995\n - type: mrr_at_10\n value: 47.083000000000006\n - type: mrr_at_100\n value: 47.997\n - type: mrr_at_1000\n value: 48.044\n - type: mrr_at_3\n value: 44.574000000000005\n - type: mrr_at_5\n value: 46.202\n - type: ndcg_at_1\n value: 37.126999999999995\n - type: ndcg_at_10\n value: 48.833\n - type: ndcg_at_100\n value: 54.327000000000005\n - type: ndcg_at_1000\n value: 56.011\n - type: ndcg_at_3\n value: 43.541999999999994\n - type: ndcg_at_5\n value: 46.127\n - type: precision_at_1\n value: 37.126999999999995\n - type: precision_at_10\n value: 8.376999999999999\n - type: precision_at_100\n value: 1.2309999999999999\n - type: precision_at_1000\n value: 0.146\n - type: precision_at_3\n value: 20.211000000000002\n - type: precision_at_5\n value: 14.16\n - type: recall_at_1\n value: 31.457\n - type: recall_at_10\n value: 62.369\n - type: recall_at_100\n value: 85.444\n - type: recall_at_1000\n value: 96.65599999999999\n - type: recall_at_3\n value: 47.961\n - type: recall_at_5\n value: 54.676\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackWebmastersRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 160c094312a0e1facb97e55eeddb698c0abe3571\n metrics:\n - type: map_at_1\n value: 27.139999999999997\n - type: map_at_10\n value: 38.801\n - type: map_at_100\n value: 40.549\n - type: map_at_1000\n value: 40.802\n - type: map_at_3\n value: 35.05\n - type: map_at_5\n value: 36.884\n - type: mrr_at_1\n value: 33.004\n - type: mrr_at_10\n value: 43.864\n - type: mrr_at_100\n value: 44.667\n - type: mrr_at_1000\n value: 44.717\n - type: mrr_at_3\n value: 40.777\n - type: mrr_at_5\n value: 42.319\n - type: ndcg_at_1\n value: 33.004\n - type: ndcg_at_10\n value: 46.022\n - type: ndcg_at_100\n value: 51.542\n - type: ndcg_at_1000\n value: 53.742000000000004\n - type: ndcg_at_3\n value: 39.795\n - type: ndcg_at_5\n value: 42.272\n - type: precision_at_1\n value: 33.004\n - type: precision_at_10\n value: 9.012\n - type: precision_at_100\n value: 1.7770000000000001\n - type: precision_at_1000\n value: 0.26\n - type: precision_at_3\n value: 19.038\n - type: precision_at_5\n value: 13.675999999999998\n - type: recall_at_1\n value: 27.139999999999997\n - type: recall_at_10\n value: 60.961\n - type: recall_at_100\n value: 84.451\n - type: recall_at_1000\n value: 98.113\n - type: recall_at_3\n value: 43.001\n - type: recall_at_5\n value: 49.896\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: mteb/climate-fever\n config: default\n split: test\n revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380\n metrics:\n - type: map_at_1\n value: 22.076999999999998\n - type: map_at_10\n value: 35.44\n - type: map_at_100\n value: 37.651\n - type: map_at_1000\n value: 37.824999999999996\n - type: map_at_3\n value: 30.764999999999997\n - type: map_at_5\n value: 33.26\n - type: mrr_at_1\n value: 50.163000000000004\n - type: mrr_at_10\n value: 61.207\n - type: mrr_at_100\n value: 61.675000000000004\n - type: mrr_at_1000\n value: 61.692\n - type: mrr_at_3\n value: 58.60999999999999\n - type: mrr_at_5\n value: 60.307\n - type: ndcg_at_1\n value: 50.163000000000004\n - type: ndcg_at_10\n value: 45.882\n - type: ndcg_at_100\n value: 53.239999999999995\n - type: ndcg_at_1000\n value: 55.852000000000004\n - type: ndcg_at_3\n value: 40.514\n - type: ndcg_at_5\n value: 42.038\n - type: precision_at_1\n value: 50.163000000000004\n - type: precision_at_10\n value: 13.466000000000001\n - type: precision_at_100\n value: 2.164\n - type: precision_at_1000\n value: 0.266\n - type: precision_at_3\n value: 29.707\n - type: precision_at_5\n value: 21.694\n - type: recall_at_1\n value: 22.076999999999998\n - type: recall_at_10\n value: 50.193\n - type: recall_at_100\n value: 74.993\n - type: recall_at_1000\n value: 89.131\n - type: recall_at_3\n value: 35.472\n - type: recall_at_5\n value: 41.814\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: mteb/dbpedia\n config: default\n split: test\n revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659\n metrics:\n - type: map_at_1\n value: 9.953\n - type: map_at_10\n value: 24.515\n - type: map_at_100\n value: 36.173\n - type: map_at_1000\n value: 38.351\n - type: map_at_3\n value: 16.592000000000002\n - type: map_at_5\n value: 20.036\n - type: mrr_at_1\n value: 74.25\n - type: mrr_at_10\n value: 81.813\n - type: mrr_at_100\n value: 82.006\n - type: mrr_at_1000\n value: 82.011\n - type: mrr_at_3\n value: 80.875\n - type: mrr_at_5\n value: 81.362\n - type: ndcg_at_1\n value: 62.5\n - type: ndcg_at_10\n value: 52.42\n - type: ndcg_at_100\n value: 56.808\n - type: ndcg_at_1000\n value: 63.532999999999994\n - type: ndcg_at_3\n value: 56.654\n - type: ndcg_at_5\n value: 54.18300000000001\n - type: precision_at_1\n value: 74.25\n - type: precision_at_10\n value: 42.699999999999996\n - type: precision_at_100\n value: 13.675\n - type: precision_at_1000\n value: 2.664\n - type: precision_at_3\n value: 60.5\n - type: precision_at_5\n value: 52.800000000000004\n - type: recall_at_1\n value: 9.953\n - type: recall_at_10\n value: 30.253999999999998\n - type: recall_at_100\n value: 62.516000000000005\n - type: recall_at_1000\n value: 84.163\n - type: recall_at_3\n value: 18.13\n - type: recall_at_5\n value: 22.771\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 79.455\n - type: f1\n value: 74.16798697647569\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: mteb/fever\n config: default\n split: test\n revision: bea83ef9e8fb933d90a2f1d5515737465d613e12\n metrics:\n - type: map_at_1\n value: 87.531\n - type: map_at_10\n value: 93.16799999999999\n - type: map_at_100\n value: 93.341\n - type: map_at_1000\n value: 93.349\n - type: map_at_3\n value: 92.444\n - type: map_at_5\n value: 92.865\n - type: mrr_at_1\n value: 94.014\n - type: mrr_at_10\n value: 96.761\n - type: mrr_at_100\n value: 96.762\n - type: mrr_at_1000\n value: 96.762\n - type: mrr_at_3\n value: 96.672\n - type: mrr_at_5\n value: 96.736\n - type: ndcg_at_1\n value: 94.014\n - type: ndcg_at_10\n value: 95.112\n - type: ndcg_at_100\n value: 95.578\n - type: ndcg_at_1000\n value: 95.68900000000001\n - type: ndcg_at_3\n value: 94.392\n - type: ndcg_at_5\n value: 94.72500000000001\n - type: precision_at_1\n value: 94.014\n - type: precision_at_10\n value: 11.065\n - type: precision_at_100\n value: 1.157\n - type: precision_at_1000\n value: 0.11800000000000001\n - type: precision_at_3\n value: 35.259\n - type: precision_at_5\n value: 21.599\n - type: recall_at_1\n value: 87.531\n - type: recall_at_10\n value: 97.356\n - type: recall_at_100\n value: 98.965\n - type: recall_at_1000\n value: 99.607\n - type: recall_at_3\n value: 95.312\n - type: recall_at_5\n value: 96.295\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: mteb/fiqa\n config: default\n split: test\n revision: 27a168819829fe9bcd655c2df245fb19452e8e06\n metrics:\n - type: map_at_1\n value: 32.055\n - type: map_at_10\n value: 53.114\n - type: map_at_100\n value: 55.235\n - type: map_at_1000\n value: 55.345\n - type: map_at_3\n value: 45.854\n - type: map_at_5\n value: 50.025\n - type: mrr_at_1\n value: 60.34\n - type: mrr_at_10\n value: 68.804\n - type: mrr_at_100\n value: 69.309\n - type: mrr_at_1000\n value: 69.32199999999999\n - type: mrr_at_3\n value: 66.40899999999999\n - type: mrr_at_5\n value: 67.976\n - type: ndcg_at_1\n value: 60.34\n - type: ndcg_at_10\n value: 62.031000000000006\n - type: ndcg_at_100\n value: 68.00500000000001\n - type: ndcg_at_1000\n value: 69.286\n - type: ndcg_at_3\n value: 56.355999999999995\n - type: ndcg_at_5\n value: 58.687\n - type: precision_at_1\n value: 60.34\n - type: precision_at_10\n value: 17.176\n - type: precision_at_100\n value: 2.36\n - type: precision_at_1000\n value: 0.259\n - type: precision_at_3\n value: 37.14\n - type: precision_at_5\n value: 27.809\n - type: recall_at_1\n value: 32.055\n - type: recall_at_10\n value: 70.91\n - type: recall_at_100\n value: 91.83\n - type: recall_at_1000\n value: 98.871\n - type: recall_at_3\n value: 51.202999999999996\n - type: recall_at_5\n value: 60.563\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: mteb/hotpotqa\n config: default\n split: test\n revision: ab518f4d6fcca38d87c25209f94beba119d02014\n metrics:\n - type: map_at_1\n value: 43.68\n - type: map_at_10\n value: 64.389\n - type: map_at_100\n value: 65.24\n - type: map_at_1000\n value: 65.303\n - type: map_at_3\n value: 61.309000000000005\n - type: map_at_5\n value: 63.275999999999996\n - type: mrr_at_1\n value: 87.36\n - type: mrr_at_10\n value: 91.12\n - type: mrr_at_100\n value: 91.227\n - type: mrr_at_1000\n value: 91.229\n - type: mrr_at_3\n value: 90.57600000000001\n - type: mrr_at_5\n value: 90.912\n - type: ndcg_at_1\n value: 87.36\n - type: ndcg_at_10\n value: 73.076\n - type: ndcg_at_100\n value: 75.895\n - type: ndcg_at_1000\n value: 77.049\n - type: ndcg_at_3\n value: 68.929\n - type: ndcg_at_5\n value: 71.28\n - type: precision_at_1\n value: 87.36\n - type: precision_at_10\n value: 14.741000000000001\n - type: precision_at_100\n value: 1.694\n - type: precision_at_1000\n value: 0.185\n - type: precision_at_3\n value: 43.043\n - type: precision_at_5\n value: 27.681\n - type: recall_at_1\n value: 43.68\n - type: recall_at_10\n value: 73.707\n - type: recall_at_100\n value: 84.7\n - type: recall_at_1000\n value: 92.309\n - type: recall_at_3\n value: 64.564\n - type: recall_at_5\n value: 69.203\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 96.75399999999999\n - type: ap\n value: 95.29389839242187\n - type: f1\n value: 96.75348377433475\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: mteb/msmarco\n config: default\n split: dev\n revision: c5a29a104738b98a9e76336939199e264163d4a0\n metrics:\n - type: map_at_1\n value: 25.176\n - type: map_at_10\n value: 38.598\n - type: map_at_100\n value: 39.707\n - type: map_at_1000\n value: 39.744\n - type: map_at_3\n value: 34.566\n - type: map_at_5\n value: 36.863\n - type: mrr_at_1\n value: 25.874000000000002\n - type: mrr_at_10\n value: 39.214\n - type: mrr_at_100\n value: 40.251\n - type: mrr_at_1000\n value: 40.281\n - type: mrr_at_3\n value: 35.291\n - type: mrr_at_5\n value: 37.545\n - type: ndcg_at_1\n value: 25.874000000000002\n - type: ndcg_at_10\n value: 45.98\n - type: ndcg_at_100\n value: 51.197\n - type: ndcg_at_1000\n value: 52.073\n - type: ndcg_at_3\n value: 37.785999999999994\n - type: ndcg_at_5\n value: 41.870000000000005\n - type: precision_at_1\n value: 25.874000000000002\n - type: precision_at_10\n value: 7.181\n - type: precision_at_100\n value: 0.979\n - type: precision_at_1000\n value: 0.106\n - type: precision_at_3\n value: 16.051000000000002\n - type: precision_at_5\n value: 11.713\n - type: recall_at_1\n value: 25.176\n - type: recall_at_10\n value: 68.67699999999999\n - type: recall_at_100\n value: 92.55\n - type: recall_at_1000\n value: 99.164\n - type: recall_at_3\n value: 46.372\n - type: recall_at_5\n value: 56.16\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 99.03784769721841\n - type: f1\n value: 98.97791641821495\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 91.88326493388054\n - type: f1\n value: 73.74809928034335\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 85.41358439811701\n - type: f1\n value: 83.503679460639\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 89.77135171486215\n - type: f1\n value: 88.89843747468366\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 46.22695362087359\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 44.132372165849425\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 33.35680810650402\n - type: mrr\n value: 34.72625715637218\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: mteb/nfcorpus\n config: default\n split: test\n revision: ec0fa4fe99da2ff19ca1214b7966684033a58814\n metrics:\n - type: map_at_1\n value: 7.165000000000001\n - type: map_at_10\n value: 15.424\n - type: map_at_100\n value: 20.28\n - type: map_at_1000\n value: 22.065\n - type: map_at_3\n value: 11.236\n - type: map_at_5\n value: 13.025999999999998\n - type: mrr_at_1\n value: 51.702999999999996\n - type: mrr_at_10\n value: 59.965\n - type: mrr_at_100\n value: 60.667\n - type: mrr_at_1000\n value: 60.702999999999996\n - type: mrr_at_3\n value: 58.772000000000006\n - type: mrr_at_5\n value: 59.267\n - type: ndcg_at_1\n value: 49.536\n - type: ndcg_at_10\n value: 40.6\n - type: ndcg_at_100\n value: 37.848\n - type: ndcg_at_1000\n value: 46.657\n - type: ndcg_at_3\n value: 46.117999999999995\n - type: ndcg_at_5\n value: 43.619\n - type: precision_at_1\n value: 51.393\n - type: precision_at_10\n value: 30.31\n - type: precision_at_100\n value: 9.972\n - type: precision_at_1000\n value: 2.329\n - type: precision_at_3\n value: 43.137\n - type: precision_at_5\n value: 37.585\n - type: recall_at_1\n value: 7.165000000000001\n - type: recall_at_10\n value: 19.689999999999998\n - type: recall_at_100\n value: 39.237\n - type: recall_at_1000\n value: 71.417\n - type: recall_at_3\n value: 12.247\n - type: recall_at_5\n value: 14.902999999999999\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: mteb/nq\n config: default\n split: test\n revision: b774495ed302d8c44a3a7ea25c90dbce03968f31\n metrics:\n - type: map_at_1\n value: 42.653999999999996\n - type: map_at_10\n value: 59.611999999999995\n - type: map_at_100\n value: 60.32300000000001\n - type: map_at_1000\n value: 60.336\n - type: map_at_3\n value: 55.584999999999994\n - type: map_at_5\n value: 58.19\n - type: mrr_at_1\n value: 47.683\n - type: mrr_at_10\n value: 62.06700000000001\n - type: mrr_at_100\n value: 62.537\n - type: mrr_at_1000\n value: 62.544999999999995\n - type: mrr_at_3\n value: 59.178\n - type: mrr_at_5\n value: 61.034\n - type: ndcg_at_1\n value: 47.654\n - type: ndcg_at_10\n value: 67.001\n - type: ndcg_at_100\n value: 69.73899999999999\n - type: ndcg_at_1000\n value: 69.986\n - type: ndcg_at_3\n value: 59.95700000000001\n - type: ndcg_at_5\n value: 64.025\n - type: precision_at_1\n value: 47.654\n - type: precision_at_10\n value: 10.367999999999999\n - type: precision_at_100\n value: 1.192\n - type: precision_at_1000\n value: 0.121\n - type: precision_at_3\n value: 26.651000000000003\n - type: precision_at_5\n value: 18.459\n - type: recall_at_1\n value: 42.653999999999996\n - type: recall_at_10\n value: 86.619\n - type: recall_at_100\n value: 98.04899999999999\n - type: recall_at_1000\n value: 99.812\n - type: recall_at_3\n value: 68.987\n - type: recall_at_5\n value: 78.158\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: mteb/quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 72.538\n - type: map_at_10\n value: 86.702\n - type: map_at_100\n value: 87.31\n - type: map_at_1000\n value: 87.323\n - type: map_at_3\n value: 83.87\n - type: map_at_5\n value: 85.682\n - type: mrr_at_1\n value: 83.31\n - type: mrr_at_10\n value: 89.225\n - type: mrr_at_100\n value: 89.30399999999999\n - type: mrr_at_1000\n value: 89.30399999999999\n - type: mrr_at_3\n value: 88.44300000000001\n - type: mrr_at_5\n value: 89.005\n - type: ndcg_at_1\n value: 83.32000000000001\n - type: ndcg_at_10\n value: 90.095\n - type: ndcg_at_100\n value: 91.12\n - type: ndcg_at_1000\n value: 91.179\n - type: ndcg_at_3\n value: 87.606\n - type: ndcg_at_5\n value: 89.031\n - type: precision_at_1\n value: 83.32000000000001\n - type: precision_at_10\n value: 13.641\n - type: precision_at_100\n value: 1.541\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 38.377\n - type: precision_at_5\n value: 25.162000000000003\n - type: recall_at_1\n value: 72.538\n - type: recall_at_10\n value: 96.47200000000001\n - type: recall_at_100\n value: 99.785\n - type: recall_at_1000\n value: 99.99900000000001\n - type: recall_at_3\n value: 89.278\n - type: recall_at_5\n value: 93.367\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 73.55219145406065\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 74.13437105242755\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: mteb/scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 6.873\n - type: map_at_10\n value: 17.944\n - type: map_at_100\n value: 21.171\n - type: map_at_1000\n value: 21.528\n - type: map_at_3\n value: 12.415\n - type: map_at_5\n value: 15.187999999999999\n - type: mrr_at_1\n value: 33.800000000000004\n - type: mrr_at_10\n value: 46.455\n - type: mrr_at_100\n value: 47.378\n - type: mrr_at_1000\n value: 47.394999999999996\n - type: mrr_at_3\n value: 42.367\n - type: mrr_at_5\n value: 44.972\n - type: ndcg_at_1\n value: 33.800000000000004\n - type: ndcg_at_10\n value: 28.907\n - type: ndcg_at_100\n value: 39.695\n - type: ndcg_at_1000\n value: 44.582\n - type: ndcg_at_3\n value: 26.949\n - type: ndcg_at_5\n value: 23.988\n - type: precision_at_1\n value: 33.800000000000004\n - type: precision_at_10\n value: 15.079999999999998\n - type: precision_at_100\n value: 3.056\n - type: precision_at_1000\n value: 0.42100000000000004\n - type: precision_at_3\n value: 25.167\n - type: precision_at_5\n value: 21.26\n - type: recall_at_1\n value: 6.873\n - type: recall_at_10\n value: 30.568\n - type: recall_at_100\n value: 62.062\n - type: recall_at_1000\n value: 85.37700000000001\n - type: recall_at_3\n value: 15.312999999999999\n - type: recall_at_5\n value: 21.575\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 82.37009118256057\n - type: cos_sim_spearman\n value: 79.27986395671529\n - type: euclidean_pearson\n value: 79.18037715442115\n - type: euclidean_spearman\n value: 79.28004791561621\n - type: manhattan_pearson\n value: 79.34062972800541\n - type: manhattan_spearman\n value: 79.43106695543402\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 87.48474767383833\n - type: cos_sim_spearman\n value: 79.54505388752513\n - type: euclidean_pearson\n value: 83.43282704179565\n - type: euclidean_spearman\n value: 79.54579919925405\n - type: manhattan_pearson\n value: 83.77564492427952\n - type: manhattan_spearman\n value: 79.84558396989286\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 88.803698035802\n - type: cos_sim_spearman\n value: 88.83451367754881\n - type: euclidean_pearson\n value: 88.28939285711628\n - type: euclidean_spearman\n value: 88.83528996073112\n - type: manhattan_pearson\n value: 88.28017412671795\n - type: manhattan_spearman\n value: 88.9228828016344\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 85.27469288153428\n - type: cos_sim_spearman\n value: 83.87477064876288\n - type: euclidean_pearson\n value: 84.2601737035379\n - type: euclidean_spearman\n value: 83.87431082479074\n - type: manhattan_pearson\n value: 84.3621547772745\n - type: manhattan_spearman\n value: 84.12094375000423\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 88.12749863201587\n - type: cos_sim_spearman\n value: 88.54287568368565\n - type: euclidean_pearson\n value: 87.90429700607999\n - type: euclidean_spearman\n value: 88.5437689576261\n - type: manhattan_pearson\n value: 88.19276653356833\n - type: manhattan_spearman\n value: 88.99995393814679\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 85.68398747560902\n - type: cos_sim_spearman\n value: 86.48815303460574\n - type: euclidean_pearson\n value: 85.52356631237954\n - type: euclidean_spearman\n value: 86.486391949551\n - type: manhattan_pearson\n value: 85.67267981761788\n - type: manhattan_spearman\n value: 86.7073696332485\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 88.9057107443124\n - type: cos_sim_spearman\n value: 88.7312168757697\n - type: euclidean_pearson\n value: 88.72810439714794\n - type: euclidean_spearman\n value: 88.71976185854771\n - type: manhattan_pearson\n value: 88.50433745949111\n - type: manhattan_spearman\n value: 88.51726175544195\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 67.59391795109886\n - type: cos_sim_spearman\n value: 66.87613008631367\n - type: euclidean_pearson\n value: 69.23198488262217\n - type: euclidean_spearman\n value: 66.85427723013692\n - type: manhattan_pearson\n value: 69.50730124841084\n - type: manhattan_spearman\n value: 67.10404669820792\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 87.0820605344619\n - type: cos_sim_spearman\n value: 86.8518089863434\n - type: euclidean_pearson\n value: 86.31087134689284\n - type: euclidean_spearman\n value: 86.8518520517941\n - type: manhattan_pearson\n value: 86.47203796160612\n - type: manhattan_spearman\n value: 87.1080149734421\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 89.09255369305481\n - type: mrr\n value: 97.10323445617563\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: mteb/scifact\n config: default\n split: test\n revision: 0228b52cf27578f30900b9e5271d331663a030d7\n metrics:\n - type: map_at_1\n value: 61.260999999999996\n - type: map_at_10\n value: 74.043\n - type: map_at_100\n value: 74.37700000000001\n - type: map_at_1000\n value: 74.384\n - type: map_at_3\n value: 71.222\n - type: map_at_5\n value: 72.875\n - type: mrr_at_1\n value: 64.333\n - type: mrr_at_10\n value: 74.984\n - type: mrr_at_100\n value: 75.247\n - type: mrr_at_1000\n value: 75.25500000000001\n - type: mrr_at_3\n value: 73.167\n - type: mrr_at_5\n value: 74.35000000000001\n - type: ndcg_at_1\n value: 64.333\n - type: ndcg_at_10\n value: 79.06\n - type: ndcg_at_100\n value: 80.416\n - type: ndcg_at_1000\n value: 80.55600000000001\n - type: ndcg_at_3\n value: 74.753\n - type: ndcg_at_5\n value: 76.97500000000001\n - type: precision_at_1\n value: 64.333\n - type: precision_at_10\n value: 10.567\n - type: precision_at_100\n value: 1.1199999999999999\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_3\n value: 29.889\n - type: precision_at_5\n value: 19.533\n - type: recall_at_1\n value: 61.260999999999996\n - type: recall_at_10\n value: 93.167\n - type: recall_at_100\n value: 99.0\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 81.667\n - type: recall_at_5\n value: 87.394\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.71980198019801\n - type: cos_sim_ap\n value: 92.81616007802704\n - type: cos_sim_f1\n value: 85.17548454688318\n - type: cos_sim_precision\n value: 89.43894389438944\n - type: cos_sim_recall\n value: 81.3\n - type: dot_accuracy\n value: 99.71980198019801\n - type: dot_ap\n value: 92.81398760591358\n - type: dot_f1\n value: 85.17548454688318\n - type: dot_precision\n value: 89.43894389438944\n - type: dot_recall\n value: 81.3\n - type: euclidean_accuracy\n value: 99.71980198019801\n - type: euclidean_ap\n value: 92.81560637245072\n - type: euclidean_f1\n value: 85.17548454688318\n - type: euclidean_precision\n value: 89.43894389438944\n - type: euclidean_recall\n value: 81.3\n - type: manhattan_accuracy\n value: 99.73069306930694\n - type: manhattan_ap\n value: 93.14005487480794\n - type: manhattan_f1\n value: 85.56263269639068\n - type: manhattan_precision\n value: 91.17647058823529\n - type: manhattan_recall\n value: 80.60000000000001\n - type: max_accuracy\n value: 99.73069306930694\n - type: max_ap\n value: 93.14005487480794\n - type: max_f1\n value: 85.56263269639068\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 79.86443362395185\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 49.40897096662564\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 55.66040806627947\n - type: mrr\n value: 56.58670475766064\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 31.51015090598575\n - type: cos_sim_spearman\n value: 31.35016454939226\n - type: dot_pearson\n value: 31.5150068731\n - type: dot_spearman\n value: 31.34790869023487\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: mteb/trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.254\n - type: map_at_10\n value: 2.064\n - type: map_at_100\n value: 12.909\n - type: map_at_1000\n value: 31.761\n - type: map_at_3\n value: 0.738\n - type: map_at_5\n value: 1.155\n - type: mrr_at_1\n value: 96.0\n - type: mrr_at_10\n value: 98.0\n - type: mrr_at_100\n value: 98.0\n - type: mrr_at_1000\n value: 98.0\n - type: mrr_at_3\n value: 98.0\n - type: mrr_at_5\n value: 98.0\n - type: ndcg_at_1\n value: 93.0\n - type: ndcg_at_10\n value: 82.258\n - type: ndcg_at_100\n value: 64.34\n - type: ndcg_at_1000\n value: 57.912\n - type: ndcg_at_3\n value: 90.827\n - type: ndcg_at_5\n value: 86.79\n - type: precision_at_1\n value: 96.0\n - type: precision_at_10\n value: 84.8\n - type: precision_at_100\n value: 66.0\n - type: precision_at_1000\n value: 25.356\n - type: precision_at_3\n value: 94.667\n - type: precision_at_5\n value: 90.4\n - type: recall_at_1\n value: 0.254\n - type: recall_at_10\n value: 2.1950000000000003\n - type: recall_at_100\n value: 16.088\n - type: recall_at_1000\n value: 54.559000000000005\n - type: recall_at_3\n value: 0.75\n - type: recall_at_5\n value: 1.191\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: mteb/touche2020\n config: default\n split: test\n revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f\n metrics:\n - type: map_at_1\n value: 2.976\n - type: map_at_10\n value: 11.389000000000001\n - type: map_at_100\n value: 18.429000000000002\n - type: map_at_1000\n value: 20.113\n - type: map_at_3\n value: 6.483\n - type: map_at_5\n value: 8.770999999999999\n - type: mrr_at_1\n value: 40.816\n - type: mrr_at_10\n value: 58.118\n - type: mrr_at_100\n value: 58.489999999999995\n - type: mrr_at_1000\n value: 58.489999999999995\n - type: mrr_at_3\n value: 53.061\n - type: mrr_at_5\n value: 57.041\n - type: ndcg_at_1\n value: 40.816\n - type: ndcg_at_10\n value: 30.567\n - type: ndcg_at_100\n value: 42.44\n - type: ndcg_at_1000\n value: 53.480000000000004\n - type: ndcg_at_3\n value: 36.016\n - type: ndcg_at_5\n value: 34.257\n - type: precision_at_1\n value: 42.857\n - type: precision_at_10\n value: 25.714\n - type: precision_at_100\n value: 8.429\n - type: precision_at_1000\n value: 1.5939999999999999\n - type: precision_at_3\n value: 36.735\n - type: precision_at_5\n value: 33.878\n - type: recall_at_1\n value: 2.976\n - type: recall_at_10\n value: 17.854999999999997\n - type: recall_at_100\n value: 51.833\n - type: recall_at_1000\n value: 86.223\n - type: recall_at_3\n value: 7.887\n - type: recall_at_5\n value: 12.026\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 85.1174\n - type: ap\n value: 30.169441069345748\n - type: f1\n value: 69.79254701873245\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 72.58347481607245\n - type: f1\n value: 72.74877295564937\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 53.90586138221305\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 87.35769207844072\n - type: cos_sim_ap\n value: 77.9645072410354\n - type: cos_sim_f1\n value: 71.32352941176471\n - type: cos_sim_precision\n value: 66.5903890160183\n - type: cos_sim_recall\n value: 76.78100263852242\n - type: dot_accuracy\n value: 87.37557370209214\n - type: dot_ap\n value: 77.96250046429908\n - type: dot_f1\n value: 71.28932757557064\n - type: dot_precision\n value: 66.95249130938586\n - type: dot_recall\n value: 76.22691292875989\n - type: euclidean_accuracy\n value: 87.35173153722357\n - type: euclidean_ap\n value: 77.96520460741593\n - type: euclidean_f1\n value: 71.32470733210104\n - type: euclidean_precision\n value: 66.91329479768785\n - type: euclidean_recall\n value: 76.35883905013192\n - type: manhattan_accuracy\n value: 87.25636287774931\n - type: manhattan_ap\n value: 77.77752485611796\n - type: manhattan_f1\n value: 71.18148599269183\n - type: manhattan_precision\n value: 66.10859728506787\n - type: manhattan_recall\n value: 77.0976253298153\n - type: max_accuracy\n value: 87.37557370209214\n - type: max_ap\n value: 77.96520460741593\n - type: max_f1\n value: 71.32470733210104\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 89.38176737687739\n - type: cos_sim_ap\n value: 86.58811861657401\n - type: cos_sim_f1\n value: 79.09430644097604\n - type: cos_sim_precision\n value: 75.45085977911366\n - type: cos_sim_recall\n value: 83.10748383122882\n - type: dot_accuracy\n value: 89.38370784336554\n - type: dot_ap\n value: 86.58840606004333\n - type: dot_f1\n value: 79.10179860068133\n - type: dot_precision\n value: 75.44546153308643\n - type: dot_recall\n value: 83.13058207576223\n - type: euclidean_accuracy\n value: 89.38564830985369\n - type: euclidean_ap\n value: 86.58820721061164\n - type: euclidean_f1\n value: 79.09070942235888\n - type: euclidean_precision\n value: 75.38729937194697\n - type: euclidean_recall\n value: 83.17677856482906\n - type: manhattan_accuracy\n value: 89.40699344122326\n - type: manhattan_ap\n value: 86.60631843011362\n - type: manhattan_f1\n value: 79.14949970570925\n - type: manhattan_precision\n value: 75.78191039729502\n - type: manhattan_recall\n value: 82.83030489682784\n - type: max_accuracy\n value: 89.40699344122326\n - type: max_ap\n value: 86.60631843011362\n - type: max_f1\n value: 79.14949970570925\n - task:\n type: STS\n dataset:\n name: MTEB AFQMC\n type: C-MTEB/AFQMC\n config: default\n split: validation\n revision: b44c3b011063adb25877c13823db83bb193913c4\n metrics:\n - type: cos_sim_pearson\n value: 65.58442135663871\n - type: cos_sim_spearman\n value: 72.2538631361313\n - type: euclidean_pearson\n value: 70.97255486607429\n - type: euclidean_spearman\n value: 72.25374250228647\n - type: manhattan_pearson\n value: 70.83250199989911\n - type: manhattan_spearman\n value: 72.14819496536272\n - task:\n type: STS\n dataset:\n name: MTEB ATEC\n type: C-MTEB/ATEC\n config: default\n split: test\n revision: 0f319b1142f28d00e055a6770f3f726ae9b7d865\n metrics:\n - type: cos_sim_pearson\n value: 59.99478404929932\n - type: cos_sim_spearman\n value: 62.61836216999812\n - type: euclidean_pearson\n value: 66.86429811933593\n - type: euclidean_spearman\n value: 62.6183520374191\n - type: manhattan_pearson\n value: 66.8063778911633\n - type: manhattan_spearman\n value: 62.569607573241115\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (zh)\n type: mteb/amazon_reviews_multi\n config: zh\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 53.98400000000001\n - type: f1\n value: 51.21447361350723\n - task:\n type: STS\n dataset:\n name: MTEB BQ\n type: C-MTEB/BQ\n config: default\n split: test\n revision: e3dda5e115e487b39ec7e618c0c6a29137052a55\n metrics:\n - type: cos_sim_pearson\n value: 79.11941660686553\n - type: cos_sim_spearman\n value: 81.25029594540435\n - type: euclidean_pearson\n value: 82.06973504238826\n - type: euclidean_spearman\n value: 81.2501989488524\n - type: manhattan_pearson\n value: 82.10094630392753\n - type: manhattan_spearman\n value: 81.27987244392389\n - task:\n type: Clustering\n dataset:\n name: MTEB CLSClusteringP2P\n type: C-MTEB/CLSClusteringP2P\n config: default\n split: test\n revision: 4b6227591c6c1a73bc76b1055f3b7f3588e72476\n metrics:\n - type: v_measure\n value: 47.07270168705156\n - task:\n type: Clustering\n dataset:\n name: MTEB CLSClusteringS2S\n type: C-MTEB/CLSClusteringS2S\n config: default\n split: test\n revision: e458b3f5414b62b7f9f83499ac1f5497ae2e869f\n metrics:\n - type: v_measure\n value: 45.98511703185043\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv1\n type: C-MTEB/CMedQAv1-reranking\n config: default\n split: test\n revision: 8d7f1e942507dac42dc58017c1a001c3717da7df\n metrics:\n - type: map\n value: 88.19895157194931\n - type: mrr\n value: 90.21424603174603\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv2\n type: C-MTEB/CMedQAv2-reranking\n config: default\n split: test\n revision: 23d186750531a14a0357ca22cd92d712fd512ea0\n metrics:\n - type: map\n value: 88.03317320980119\n - type: mrr\n value: 89.9461507936508\n - task:\n type: Retrieval\n dataset:\n name: MTEB CmedqaRetrieval\n type: C-MTEB/CmedqaRetrieval\n config: default\n split: dev\n revision: cd540c506dae1cf9e9a59c3e06f42030d54e7301\n metrics:\n - type: map_at_1\n value: 29.037000000000003\n - type: map_at_10\n value: 42.001\n - type: map_at_100\n value: 43.773\n - type: map_at_1000\n value: 43.878\n - type: map_at_3\n value: 37.637\n - type: map_at_5\n value: 40.034\n - type: mrr_at_1\n value: 43.136\n - type: mrr_at_10\n value: 51.158\n - type: mrr_at_100\n value: 52.083\n - type: mrr_at_1000\n value: 52.12\n - type: mrr_at_3\n value: 48.733\n - type: mrr_at_5\n value: 50.025\n - type: ndcg_at_1\n value: 43.136\n - type: ndcg_at_10\n value: 48.685\n - type: ndcg_at_100\n value: 55.513\n - type: ndcg_at_1000\n value: 57.242000000000004\n - type: ndcg_at_3\n value: 43.329\n - type: ndcg_at_5\n value: 45.438\n - type: precision_at_1\n value: 43.136\n - type: precision_at_10\n value: 10.56\n - type: precision_at_100\n value: 1.6129999999999998\n - type: precision_at_1000\n value: 0.184\n - type: precision_at_3\n value: 24.064\n - type: precision_at_5\n value: 17.269000000000002\n - type: recall_at_1\n value: 29.037000000000003\n - type: recall_at_10\n value: 59.245000000000005\n - type: recall_at_100\n value: 87.355\n - type: recall_at_1000\n value: 98.74000000000001\n - type: recall_at_3\n value: 42.99\n - type: recall_at_5\n value: 49.681999999999995\n - task:\n type: PairClassification\n dataset:\n name: MTEB Cmnli\n type: C-MTEB/CMNLI\n config: default\n split: validation\n revision: 41bc36f332156f7adc9e38f53777c959b2ae9766\n metrics:\n - type: cos_sim_accuracy\n value: 82.68190018039687\n - type: cos_sim_ap\n value: 90.18017125327886\n - type: cos_sim_f1\n value: 83.64080906868193\n - type: cos_sim_precision\n value: 79.7076890489303\n - type: cos_sim_recall\n value: 87.98223053542202\n - type: dot_accuracy\n value: 82.68190018039687\n - type: dot_ap\n value: 90.18782350103646\n - type: dot_f1\n value: 83.64242087729039\n - type: dot_precision\n value: 79.65313028764805\n - type: dot_recall\n value: 88.05237315875614\n - type: euclidean_accuracy\n value: 82.68190018039687\n - type: euclidean_ap\n value: 90.1801957900632\n - type: euclidean_f1\n value: 83.63636363636364\n - type: euclidean_precision\n value: 79.52772506852203\n - type: euclidean_recall\n value: 88.19265840542437\n - type: manhattan_accuracy\n value: 82.14070956103427\n - type: manhattan_ap\n value: 89.96178420101427\n - type: manhattan_f1\n value: 83.21087838578791\n - type: manhattan_precision\n value: 78.35605121850475\n - type: manhattan_recall\n value: 88.70703764320785\n - type: max_accuracy\n value: 82.68190018039687\n - type: max_ap\n value: 90.18782350103646\n - type: max_f1\n value: 83.64242087729039\n - task:\n type: Retrieval\n dataset:\n name: MTEB CovidRetrieval\n type: C-MTEB/CovidRetrieval\n config: default\n split: dev\n revision: 1271c7809071a13532e05f25fb53511ffce77117\n metrics:\n - type: map_at_1\n value: 72.234\n - type: map_at_10\n value: 80.10000000000001\n - type: map_at_100\n value: 80.36\n - type: map_at_1000\n value: 80.363\n - type: map_at_3\n value: 78.315\n - type: map_at_5\n value: 79.607\n - type: mrr_at_1\n value: 72.392\n - type: mrr_at_10\n value: 80.117\n - type: mrr_at_100\n value: 80.36999999999999\n - type: mrr_at_1000\n value: 80.373\n - type: mrr_at_3\n value: 78.469\n - type: mrr_at_5\n value: 79.633\n - type: ndcg_at_1\n value: 72.392\n - type: ndcg_at_10\n value: 83.651\n - type: ndcg_at_100\n value: 84.749\n - type: ndcg_at_1000\n value: 84.83000000000001\n - type: ndcg_at_3\n value: 80.253\n - type: ndcg_at_5\n value: 82.485\n - type: precision_at_1\n value: 72.392\n - type: precision_at_10\n value: 9.557\n - type: precision_at_100\n value: 1.004\n - type: precision_at_1000\n value: 0.101\n - type: precision_at_3\n value: 28.732000000000003\n - type: precision_at_5\n value: 18.377\n - type: recall_at_1\n value: 72.234\n - type: recall_at_10\n value: 94.573\n - type: recall_at_100\n value: 99.368\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 85.669\n - type: recall_at_5\n value: 91.01700000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB DuRetrieval\n type: C-MTEB/DuRetrieval\n config: default\n split: dev\n revision: a1a333e290fe30b10f3f56498e3a0d911a693ced\n metrics:\n - type: map_at_1\n value: 26.173999999999996\n - type: map_at_10\n value: 80.04\n - type: map_at_100\n value: 82.94500000000001\n - type: map_at_1000\n value: 82.98100000000001\n - type: map_at_3\n value: 55.562999999999995\n - type: map_at_5\n value: 69.89800000000001\n - type: mrr_at_1\n value: 89.5\n - type: mrr_at_10\n value: 92.996\n - type: mrr_at_100\n value: 93.06400000000001\n - type: mrr_at_1000\n value: 93.065\n - type: mrr_at_3\n value: 92.658\n - type: mrr_at_5\n value: 92.84599999999999\n - type: ndcg_at_1\n value: 89.5\n - type: ndcg_at_10\n value: 87.443\n - type: ndcg_at_100\n value: 90.253\n - type: ndcg_at_1000\n value: 90.549\n - type: ndcg_at_3\n value: 85.874\n - type: ndcg_at_5\n value: 84.842\n - type: precision_at_1\n value: 89.5\n - type: precision_at_10\n value: 41.805\n - type: precision_at_100\n value: 4.827\n - type: precision_at_1000\n value: 0.49\n - type: precision_at_3\n value: 76.85\n - type: precision_at_5\n value: 64.8\n - type: recall_at_1\n value: 26.173999999999996\n - type: recall_at_10\n value: 89.101\n - type: recall_at_100\n value: 98.08099999999999\n - type: recall_at_1000\n value: 99.529\n - type: recall_at_3\n value: 57.902\n - type: recall_at_5\n value: 74.602\n - task:\n type: Retrieval\n dataset:\n name: MTEB EcomRetrieval\n type: C-MTEB/EcomRetrieval\n config: default\n split: dev\n revision: 687de13dc7294d6fd9be10c6945f9e8fec8166b9\n metrics:\n - type: map_at_1\n value: 56.10000000000001\n - type: map_at_10\n value: 66.15299999999999\n - type: map_at_100\n value: 66.625\n - type: map_at_1000\n value: 66.636\n - type: map_at_3\n value: 63.632999999999996\n - type: map_at_5\n value: 65.293\n - type: mrr_at_1\n value: 56.10000000000001\n - type: mrr_at_10\n value: 66.15299999999999\n - type: mrr_at_100\n value: 66.625\n - type: mrr_at_1000\n value: 66.636\n - type: mrr_at_3\n value: 63.632999999999996\n - type: mrr_at_5\n value: 65.293\n - type: ndcg_at_1\n value: 56.10000000000001\n - type: ndcg_at_10\n value: 71.146\n - type: ndcg_at_100\n value: 73.27799999999999\n - type: ndcg_at_1000\n value: 73.529\n - type: ndcg_at_3\n value: 66.09\n - type: ndcg_at_5\n value: 69.08999999999999\n - type: precision_at_1\n value: 56.10000000000001\n - type: precision_at_10\n value: 8.68\n - type: precision_at_100\n value: 0.964\n - type: precision_at_1000\n value: 0.098\n - type: precision_at_3\n value: 24.4\n - type: precision_at_5\n value: 16.1\n - type: recall_at_1\n value: 56.10000000000001\n - type: recall_at_10\n value: 86.8\n - type: recall_at_100\n value: 96.39999999999999\n - type: recall_at_1000\n value: 98.3\n - type: recall_at_3\n value: 73.2\n - type: recall_at_5\n value: 80.5\n - task:\n type: Classification\n dataset:\n name: MTEB IFlyTek\n type: C-MTEB/IFlyTek-classification\n config: default\n split: validation\n revision: 421605374b29664c5fc098418fe20ada9bd55f8a\n metrics:\n - type: accuracy\n value: 54.52096960369373\n - type: f1\n value: 40.930845295808695\n - task:\n type: Classification\n dataset:\n name: MTEB JDReview\n type: C-MTEB/JDReview-classification\n config: default\n split: test\n revision: b7c64bd89eb87f8ded463478346f76731f07bf8b\n metrics:\n - type: accuracy\n value: 86.51031894934334\n - type: ap\n value: 55.9516014323483\n - type: f1\n value: 81.54813679326381\n - task:\n type: STS\n dataset:\n name: MTEB LCQMC\n type: C-MTEB/LCQMC\n config: default\n split: test\n revision: 17f9b096f80380fce5ed12a9be8be7784b337daf\n metrics:\n - type: cos_sim_pearson\n value: 69.67437838574276\n - type: cos_sim_spearman\n value: 73.81314174653045\n - type: euclidean_pearson\n value: 72.63430276680275\n - type: euclidean_spearman\n value: 73.81358736777001\n - type: manhattan_pearson\n value: 72.58743833842829\n - type: manhattan_spearman\n value: 73.7590419009179\n - task:\n type: Reranking\n dataset:\n name: MTEB MMarcoReranking\n type: C-MTEB/Mmarco-reranking\n config: default\n split: dev\n revision: None\n metrics:\n - type: map\n value: 31.648613483640254\n - type: mrr\n value: 30.37420634920635\n - task:\n type: Retrieval\n dataset:\n name: MTEB MMarcoRetrieval\n type: C-MTEB/MMarcoRetrieval\n config: default\n split: dev\n revision: 539bbde593d947e2a124ba72651aafc09eb33fc2\n metrics:\n - type: map_at_1\n value: 73.28099999999999\n - type: map_at_10\n value: 81.977\n - type: map_at_100\n value: 82.222\n - type: map_at_1000\n value: 82.22699999999999\n - type: map_at_3\n value: 80.441\n - type: map_at_5\n value: 81.46600000000001\n - type: mrr_at_1\n value: 75.673\n - type: mrr_at_10\n value: 82.41000000000001\n - type: mrr_at_100\n value: 82.616\n - type: mrr_at_1000\n value: 82.621\n - type: mrr_at_3\n value: 81.094\n - type: mrr_at_5\n value: 81.962\n - type: ndcg_at_1\n value: 75.673\n - type: ndcg_at_10\n value: 85.15599999999999\n - type: ndcg_at_100\n value: 86.151\n - type: ndcg_at_1000\n value: 86.26899999999999\n - type: ndcg_at_3\n value: 82.304\n - type: ndcg_at_5\n value: 84.009\n - type: precision_at_1\n value: 75.673\n - type: precision_at_10\n value: 10.042\n - type: precision_at_100\n value: 1.052\n - type: precision_at_1000\n value: 0.106\n - type: precision_at_3\n value: 30.673000000000002\n - type: precision_at_5\n value: 19.326999999999998\n - type: recall_at_1\n value: 73.28099999999999\n - type: recall_at_10\n value: 94.446\n - type: recall_at_100\n value: 98.737\n - type: recall_at_1000\n value: 99.649\n - type: recall_at_3\n value: 86.984\n - type: recall_at_5\n value: 91.024\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (zh-CN)\n type: mteb/amazon_massive_intent\n config: zh-CN\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 81.08607935440484\n - type: f1\n value: 78.24879986066307\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (zh-CN)\n type: mteb/amazon_massive_scenario\n config: zh-CN\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 86.05917955615332\n - type: f1\n value: 85.05279279434997\n - task:\n type: Retrieval\n dataset:\n name: MTEB MedicalRetrieval\n type: C-MTEB/MedicalRetrieval\n config: default\n split: dev\n revision: 2039188fb5800a9803ba5048df7b76e6fb151fc6\n metrics:\n - type: map_at_1\n value: 56.2\n - type: map_at_10\n value: 62.57899999999999\n - type: map_at_100\n value: 63.154999999999994\n - type: map_at_1000\n value: 63.193\n - type: map_at_3\n value: 61.217\n - type: map_at_5\n value: 62.012\n - type: mrr_at_1\n value: 56.3\n - type: mrr_at_10\n value: 62.629000000000005\n - type: mrr_at_100\n value: 63.205999999999996\n - type: mrr_at_1000\n value: 63.244\n - type: mrr_at_3\n value: 61.267\n - type: mrr_at_5\n value: 62.062\n - type: ndcg_at_1\n value: 56.2\n - type: ndcg_at_10\n value: 65.592\n - type: ndcg_at_100\n value: 68.657\n - type: ndcg_at_1000\n value: 69.671\n - type: ndcg_at_3\n value: 62.808\n - type: ndcg_at_5\n value: 64.24499999999999\n - type: precision_at_1\n value: 56.2\n - type: precision_at_10\n value: 7.5\n - type: precision_at_100\n value: 0.899\n - type: precision_at_1000\n value: 0.098\n - type: precision_at_3\n value: 22.467000000000002\n - type: precision_at_5\n value: 14.180000000000001\n - type: recall_at_1\n value: 56.2\n - type: recall_at_10\n value: 75.0\n - type: recall_at_100\n value: 89.9\n - type: recall_at_1000\n value: 97.89999999999999\n - type: recall_at_3\n value: 67.4\n - type: recall_at_5\n value: 70.89999999999999\n - task:\n type: Classification\n dataset:\n name: MTEB MultilingualSentiment\n type: C-MTEB/MultilingualSentiment-classification\n config: default\n split: validation\n revision: 46958b007a63fdbf239b7672c25d0bea67b5ea1a\n metrics:\n - type: accuracy\n value: 76.87666666666667\n - type: f1\n value: 76.7317686219665\n - task:\n type: PairClassification\n dataset:\n name: MTEB Ocnli\n type: C-MTEB/OCNLI\n config: default\n split: validation\n revision: 66e76a618a34d6d565d5538088562851e6daa7ec\n metrics:\n - type: cos_sim_accuracy\n value: 79.64266377910124\n - type: cos_sim_ap\n value: 84.78274442344829\n - type: cos_sim_f1\n value: 81.16947472745292\n - type: cos_sim_precision\n value: 76.47058823529412\n - type: cos_sim_recall\n value: 86.48363252375924\n - type: dot_accuracy\n value: 79.64266377910124\n - type: dot_ap\n value: 84.7851404063692\n - type: dot_f1\n value: 81.16947472745292\n - type: dot_precision\n value: 76.47058823529412\n - type: dot_recall\n value: 86.48363252375924\n - type: euclidean_accuracy\n value: 79.64266377910124\n - type: euclidean_ap\n value: 84.78068373762378\n - type: euclidean_f1\n value: 81.14794656110837\n - type: euclidean_precision\n value: 76.35009310986965\n - type: euclidean_recall\n value: 86.58922914466737\n - type: manhattan_accuracy\n value: 79.48023822414727\n - type: manhattan_ap\n value: 84.72928897427576\n - type: manhattan_f1\n value: 81.32084770823064\n - type: manhattan_precision\n value: 76.24768946395564\n - type: manhattan_recall\n value: 87.11721224920802\n - type: max_accuracy\n value: 79.64266377910124\n - type: max_ap\n value: 84.7851404063692\n - type: max_f1\n value: 81.32084770823064\n - task:\n type: Classification\n dataset:\n name: MTEB OnlineShopping\n type: C-MTEB/OnlineShopping-classification\n config: default\n split: test\n revision: e610f2ebd179a8fda30ae534c3878750a96db120\n metrics:\n - type: accuracy\n value: 94.3\n - type: ap\n value: 92.8664032274438\n - type: f1\n value: 94.29311102997727\n - task:\n type: STS\n dataset:\n name: MTEB PAWSX\n type: C-MTEB/PAWSX\n config: default\n split: test\n revision: 9c6a90e430ac22b5779fb019a23e820b11a8b5e1\n metrics:\n - type: cos_sim_pearson\n value: 48.51392279882909\n - type: cos_sim_spearman\n value: 54.06338895994974\n - type: euclidean_pearson\n value: 52.58480559573412\n - type: euclidean_spearman\n value: 54.06417276612201\n - type: manhattan_pearson\n value: 52.69525121721343\n - type: manhattan_spearman\n value: 54.048147455389675\n - task:\n type: STS\n dataset:\n name: MTEB QBQTC\n type: C-MTEB/QBQTC\n config: default\n split: test\n revision: 790b0510dc52b1553e8c49f3d2afb48c0e5c48b7\n metrics:\n - type: cos_sim_pearson\n value: 29.728387290757325\n - type: cos_sim_spearman\n value: 31.366121633635284\n - type: euclidean_pearson\n value: 29.14588368552961\n - type: euclidean_spearman\n value: 31.36764411112844\n - type: manhattan_pearson\n value: 29.63517350523121\n - type: manhattan_spearman\n value: 31.94157020583762\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (zh)\n type: mteb/sts22-crosslingual-sts\n config: zh\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 63.64868296271406\n - type: cos_sim_spearman\n value: 66.12800618164744\n - type: euclidean_pearson\n value: 63.21405767340238\n - type: euclidean_spearman\n value: 66.12786567790748\n - type: manhattan_pearson\n value: 64.04300276525848\n - type: manhattan_spearman\n value: 66.5066857145652\n - task:\n type: STS\n dataset:\n name: MTEB STSB\n type: C-MTEB/STSB\n config: default\n split: test\n revision: 0cde68302b3541bb8b3c340dc0644b0b745b3dc0\n metrics:\n - type: cos_sim_pearson\n value: 81.2302623912794\n - type: cos_sim_spearman\n value: 81.16833673266562\n - type: euclidean_pearson\n value: 79.47647843876024\n - type: euclidean_spearman\n value: 81.16944349524972\n - type: manhattan_pearson\n value: 79.84947238492208\n - type: manhattan_spearman\n value: 81.64626599410026\n - task:\n type: Reranking\n dataset:\n name: MTEB T2Reranking\n type: C-MTEB/T2Reranking\n config: default\n split: dev\n revision: 76631901a18387f85eaa53e5450019b87ad58ef9\n metrics:\n - type: map\n value: 67.80129586475687\n - type: mrr\n value: 77.77402311635554\n - task:\n type: Retrieval\n dataset:\n name: MTEB T2Retrieval\n type: C-MTEB/T2Retrieval\n config: default\n split: dev\n revision: 8731a845f1bf500a4f111cf1070785c793d10e64\n metrics:\n - type: map_at_1\n value: 28.666999999999998\n - type: map_at_10\n value: 81.063\n - type: map_at_100\n value: 84.504\n - type: map_at_1000\n value: 84.552\n - type: map_at_3\n value: 56.897\n - type: map_at_5\n value: 70.073\n - type: mrr_at_1\n value: 92.087\n - type: mrr_at_10\n value: 94.132\n - type: mrr_at_100\n value: 94.19800000000001\n - type: mrr_at_1000\n value: 94.19999999999999\n - type: mrr_at_3\n value: 93.78999999999999\n - type: mrr_at_5\n value: 94.002\n - type: ndcg_at_1\n value: 92.087\n - type: ndcg_at_10\n value: 87.734\n - type: ndcg_at_100\n value: 90.736\n - type: ndcg_at_1000\n value: 91.184\n - type: ndcg_at_3\n value: 88.78\n - type: ndcg_at_5\n value: 87.676\n - type: precision_at_1\n value: 92.087\n - type: precision_at_10\n value: 43.46\n - type: precision_at_100\n value: 5.07\n - type: precision_at_1000\n value: 0.518\n - type: precision_at_3\n value: 77.49000000000001\n - type: precision_at_5\n value: 65.194\n - type: recall_at_1\n value: 28.666999999999998\n - type: recall_at_10\n value: 86.632\n - type: recall_at_100\n value: 96.646\n - type: recall_at_1000\n value: 98.917\n - type: recall_at_3\n value: 58.333999999999996\n - type: recall_at_5\n value: 72.974\n - task:\n type: Classification\n dataset:\n name: MTEB TNews\n type: C-MTEB/TNews-classification\n config: default\n split: validation\n revision: 317f262bf1e6126357bbe89e875451e4b0938fe4\n metrics:\n - type: accuracy\n value: 52.971999999999994\n - type: f1\n value: 50.2898280984929\n - task:\n type: Clustering\n dataset:\n name: MTEB ThuNewsClusteringP2P\n type: C-MTEB/ThuNewsClusteringP2P\n config: default\n split: test\n revision: 5798586b105c0434e4f0fe5e767abe619442cf93\n metrics:\n - type: v_measure\n value: 86.0797948663824\n - task:\n type: Clustering\n dataset:\n name: MTEB ThuNewsClusteringS2S\n type: C-MTEB/ThuNewsClusteringS2S\n config: default\n split: test\n revision: 8a8b2caeda43f39e13c4bc5bea0f8a667896e10d\n metrics:\n - type: v_measure\n value: 85.10759092255017\n - task:\n type: Retrieval\n dataset:\n name: MTEB VideoRetrieval\n type: C-MTEB/VideoRetrieval\n config: default\n split: dev\n revision: 58c2597a5943a2ba48f4668c3b90d796283c5639\n metrics:\n - type: map_at_1\n value: 65.60000000000001\n - type: map_at_10\n value: 74.773\n - type: map_at_100\n value: 75.128\n - type: map_at_1000\n value: 75.136\n - type: map_at_3\n value: 73.05\n - type: map_at_5\n value: 74.13499999999999\n - type: mrr_at_1\n value: 65.60000000000001\n - type: mrr_at_10\n value: 74.773\n - type: mrr_at_100\n value: 75.128\n - type: mrr_at_1000\n value: 75.136\n - type: mrr_at_3\n value: 73.05\n - type: mrr_at_5\n value: 74.13499999999999\n - type: ndcg_at_1\n value: 65.60000000000001\n - type: ndcg_at_10\n value: 78.84299999999999\n - type: ndcg_at_100\n value: 80.40899999999999\n - type: ndcg_at_1000\n value: 80.57\n - type: ndcg_at_3\n value: 75.40599999999999\n - type: ndcg_at_5\n value: 77.351\n - type: precision_at_1\n value: 65.60000000000001\n - type: precision_at_10\n value: 9.139999999999999\n - type: precision_at_100\n value: 0.984\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 27.400000000000002\n - type: precision_at_5\n value: 17.380000000000003\n - type: recall_at_1\n value: 65.60000000000001\n - type: recall_at_10\n value: 91.4\n - type: recall_at_100\n value: 98.4\n - type: recall_at_1000\n value: 99.6\n - type: recall_at_3\n value: 82.19999999999999\n - type: recall_at_5\n value: 86.9\n - task:\n type: Classification\n dataset:\n name: MTEB Waimai\n type: C-MTEB/waimai-classification\n config: default\n split: test\n revision: 339287def212450dcaa9df8c22bf93e9980c7023\n metrics:\n - type: accuracy\n value: 89.47\n - type: ap\n value: 75.59561751845389\n - type: f1\n value: 87.95207751382563\n - task:\n type: Clustering\n dataset:\n name: MTEB AlloProfClusteringP2P\n type: lyon-nlp/alloprof\n config: default\n split: test\n revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b\n metrics:\n - type: v_measure\n value: 76.05592323841036\n - type: v_measure\n value: 64.51718058866508\n - task:\n type: Reranking\n dataset:\n name: MTEB AlloprofReranking\n type: lyon-nlp/mteb-fr-reranking-alloprof-s2p\n config: default\n split: test\n revision: 666fdacebe0291776e86f29345663dfaf80a0db9\n metrics:\n - type: map\n value: 73.08278490943373\n - type: mrr\n value: 74.66561454570449\n - task:\n type: Retrieval\n dataset:\n name: MTEB AlloprofRetrieval\n type: lyon-nlp/alloprof\n config: default\n split: test\n revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b\n metrics:\n - type: map_at_1\n value: 38.912\n - type: map_at_10\n value: 52.437999999999995\n - type: map_at_100\n value: 53.38\n - type: map_at_1000\n value: 53.427\n - type: map_at_3\n value: 48.879\n - type: map_at_5\n value: 50.934000000000005\n - type: mrr_at_1\n value: 44.085\n - type: mrr_at_10\n value: 55.337\n - type: mrr_at_100\n value: 56.016999999999996\n - type: mrr_at_1000\n value: 56.043\n - type: mrr_at_3\n value: 52.55499999999999\n - type: mrr_at_5\n value: 54.20399999999999\n - type: ndcg_at_1\n value: 44.085\n - type: ndcg_at_10\n value: 58.876\n - type: ndcg_at_100\n value: 62.714000000000006\n - type: ndcg_at_1000\n value: 63.721000000000004\n - type: ndcg_at_3\n value: 52.444\n - type: ndcg_at_5\n value: 55.692\n - type: precision_at_1\n value: 44.085\n - type: precision_at_10\n value: 9.21\n - type: precision_at_100\n value: 1.164\n - type: precision_at_1000\n value: 0.128\n - type: precision_at_3\n value: 23.043\n - type: precision_at_5\n value: 15.898000000000001\n - type: recall_at_1\n value: 38.912\n - type: recall_at_10\n value: 75.577\n - type: recall_at_100\n value: 92.038\n - type: recall_at_1000\n value: 99.325\n - type: recall_at_3\n value: 58.592\n - type: recall_at_5\n value: 66.235\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (fr)\n type: mteb/amazon_reviews_multi\n config: fr\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 55.532000000000004\n - type: f1\n value: 52.5783943471605\n - task:\n type: Retrieval\n dataset:\n name: MTEB BSARDRetrieval\n type: maastrichtlawtech/bsard\n config: default\n split: test\n revision: 5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59\n metrics:\n - type: map_at_1\n value: 8.108\n - type: map_at_10\n value: 14.710999999999999\n - type: map_at_100\n value: 15.891\n - type: map_at_1000\n value: 15.983\n - type: map_at_3\n value: 12.237\n - type: map_at_5\n value: 13.679\n - type: mrr_at_1\n value: 8.108\n - type: mrr_at_10\n value: 14.710999999999999\n - type: mrr_at_100\n value: 15.891\n - type: mrr_at_1000\n value: 15.983\n - type: mrr_at_3\n value: 12.237\n - type: mrr_at_5\n value: 13.679\n - type: ndcg_at_1\n value: 8.108\n - type: ndcg_at_10\n value: 18.796\n - type: ndcg_at_100\n value: 25.098\n - type: ndcg_at_1000\n value: 27.951999999999998\n - type: ndcg_at_3\n value: 13.712\n - type: ndcg_at_5\n value: 16.309\n - type: precision_at_1\n value: 8.108\n - type: precision_at_10\n value: 3.198\n - type: precision_at_100\n value: 0.626\n - type: precision_at_1000\n value: 0.086\n - type: precision_at_3\n value: 6.006\n - type: precision_at_5\n value: 4.865\n - type: recall_at_1\n value: 8.108\n - type: recall_at_10\n value: 31.982\n - type: recall_at_100\n value: 62.613\n - type: recall_at_1000\n value: 86.036\n - type: recall_at_3\n value: 18.018\n - type: recall_at_5\n value: 24.324\n - task:\n type: Clustering\n dataset:\n name: MTEB HALClusteringS2S\n type: lyon-nlp/clustering-hal-s2s\n config: default\n split: test\n revision: e06ebbbb123f8144bef1a5d18796f3dec9ae2915\n metrics:\n - type: v_measure\n value: 30.833269778867116\n - task:\n type: Clustering\n dataset:\n name: MTEB MLSUMClusteringP2P\n type: mlsum\n config: default\n split: test\n revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7\n metrics:\n - type: v_measure\n value: 50.0281928004713\n - type: v_measure\n value: 43.699961510636534\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (fr)\n type: mteb/mtop_domain\n config: fr\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 96.68963357344191\n - type: f1\n value: 96.45175170820961\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (fr)\n type: mteb/mtop_intent\n config: fr\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 87.46946445349202\n - type: f1\n value: 65.79860440988624\n - task:\n type: Classification\n dataset:\n name: MTEB MasakhaNEWSClassification (fra)\n type: masakhane/masakhanews\n config: fra\n split: test\n revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60\n metrics:\n - type: accuracy\n value: 82.60663507109005\n - type: f1\n value: 77.20462646604777\n - task:\n type: Clustering\n dataset:\n name: MTEB MasakhaNEWSClusteringP2P (fra)\n type: masakhane/masakhanews\n config: fra\n split: test\n revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60\n metrics:\n - type: v_measure\n value: 60.19311264967803\n - type: v_measure\n value: 63.6235764409785\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (fr)\n type: mteb/amazon_massive_intent\n config: fr\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 81.65097511768661\n - type: f1\n value: 78.77796091490924\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (fr)\n type: mteb/amazon_massive_scenario\n config: fr\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 86.64425016812373\n - type: f1\n value: 85.4912728670017\n - task:\n type: Retrieval\n dataset:\n name: MTEB MintakaRetrieval (fr)\n type: jinaai/mintakaqa\n config: fr\n split: test\n revision: efa78cc2f74bbcd21eff2261f9e13aebe40b814e\n metrics:\n - type: map_at_1\n value: 35.913000000000004\n - type: map_at_10\n value: 48.147\n - type: map_at_100\n value: 48.91\n - type: map_at_1000\n value: 48.949\n - type: map_at_3\n value: 45.269999999999996\n - type: map_at_5\n value: 47.115\n - type: mrr_at_1\n value: 35.913000000000004\n - type: mrr_at_10\n value: 48.147\n - type: mrr_at_100\n value: 48.91\n - type: mrr_at_1000\n value: 48.949\n - type: mrr_at_3\n value: 45.269999999999996\n - type: mrr_at_5\n value: 47.115\n - type: ndcg_at_1\n value: 35.913000000000004\n - type: ndcg_at_10\n value: 54.03\n - type: ndcg_at_100\n value: 57.839\n - type: ndcg_at_1000\n value: 58.925000000000004\n - type: ndcg_at_3\n value: 48.217999999999996\n - type: ndcg_at_5\n value: 51.56699999999999\n - type: precision_at_1\n value: 35.913000000000004\n - type: precision_at_10\n value: 7.244000000000001\n - type: precision_at_100\n value: 0.9039999999999999\n - type: precision_at_1000\n value: 0.099\n - type: precision_at_3\n value: 18.905\n - type: precision_at_5\n value: 12.981000000000002\n - type: recall_at_1\n value: 35.913000000000004\n - type: recall_at_10\n value: 72.441\n - type: recall_at_100\n value: 90.41799999999999\n - type: recall_at_1000\n value: 99.099\n - type: recall_at_3\n value: 56.716\n - type: recall_at_5\n value: 64.90599999999999\n - task:\n type: PairClassification\n dataset:\n name: MTEB OpusparcusPC (fr)\n type: GEM/opusparcus\n config: fr\n split: test\n revision: 9e9b1f8ef51616073f47f306f7f47dd91663f86a\n metrics:\n - type: cos_sim_accuracy\n value: 99.90069513406156\n - type: cos_sim_ap\n value: 100.0\n - type: cos_sim_f1\n value: 99.95032290114257\n - type: cos_sim_precision\n value: 100.0\n - type: cos_sim_recall\n value: 99.90069513406156\n - type: dot_accuracy\n value: 99.90069513406156\n - type: dot_ap\n value: 100.0\n - type: dot_f1\n value: 99.95032290114257\n - type: dot_precision\n value: 100.0\n - type: dot_recall\n value: 99.90069513406156\n - type: euclidean_accuracy\n value: 99.90069513406156\n - type: euclidean_ap\n value: 100.0\n - type: euclidean_f1\n value: 99.95032290114257\n - type: euclidean_precision\n value: 100.0\n - type: euclidean_recall\n value: 99.90069513406156\n - type: manhattan_accuracy\n value: 99.90069513406156\n - type: manhattan_ap\n value: 100.0\n - type: manhattan_f1\n value: 99.95032290114257\n - type: manhattan_precision\n value: 100.0\n - type: manhattan_recall\n value: 99.90069513406156\n - type: max_accuracy\n value: 99.90069513406156\n - type: max_ap\n value: 100.0\n - type: max_f1\n value: 99.95032290114257\n - task:\n type: PairClassification\n dataset:\n name: MTEB PawsX (fr)\n type: paws-x\n config: fr\n split: test\n revision: 8a04d940a42cd40658986fdd8e3da561533a3646\n metrics:\n - type: cos_sim_accuracy\n value: 75.25\n - type: cos_sim_ap\n value: 80.86376001270014\n - type: cos_sim_f1\n value: 73.65945437441204\n - type: cos_sim_precision\n value: 64.02289452166802\n - type: cos_sim_recall\n value: 86.71096345514951\n - type: dot_accuracy\n value: 75.25\n - type: dot_ap\n value: 80.93686107633002\n - type: dot_f1\n value: 73.65945437441204\n - type: dot_precision\n value: 64.02289452166802\n - type: dot_recall\n value: 86.71096345514951\n - type: euclidean_accuracy\n value: 75.25\n - type: euclidean_ap\n value: 80.86379136218862\n - type: euclidean_f1\n value: 73.65945437441204\n - type: euclidean_precision\n value: 64.02289452166802\n - type: euclidean_recall\n value: 86.71096345514951\n - type: manhattan_accuracy\n value: 75.3\n - type: manhattan_ap\n value: 80.87826606097734\n - type: manhattan_f1\n value: 73.68421052631581\n - type: manhattan_precision\n value: 64.0\n - type: manhattan_recall\n value: 86.82170542635659\n - type: max_accuracy\n value: 75.3\n - type: max_ap\n value: 80.93686107633002\n - type: max_f1\n value: 73.68421052631581\n - task:\n type: STS\n dataset:\n name: MTEB SICKFr\n type: Lajavaness/SICK-fr\n config: default\n split: test\n revision: e077ab4cf4774a1e36d86d593b150422fafd8e8a\n metrics:\n - type: cos_sim_pearson\n value: 81.42349425981143\n - type: cos_sim_spearman\n value: 78.90454327031226\n - type: euclidean_pearson\n value: 78.39086497435166\n - type: euclidean_spearman\n value: 78.9046133980509\n - type: manhattan_pearson\n value: 78.63743094286502\n - type: manhattan_spearman\n value: 79.12136348449269\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (fr)\n type: mteb/sts22-crosslingual-sts\n config: fr\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 81.452697919749\n - type: cos_sim_spearman\n value: 82.58116836039301\n - type: euclidean_pearson\n value: 81.04038478932786\n - type: euclidean_spearman\n value: 82.58116836039301\n - type: manhattan_pearson\n value: 81.37075396187771\n - type: manhattan_spearman\n value: 82.73678231355368\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmarkMultilingualSTS (fr)\n type: stsb_multi_mt\n config: fr\n split: test\n revision: 93d57ef91790589e3ce9c365164337a8a78b7632\n metrics:\n - type: cos_sim_pearson\n value: 85.7419764013806\n - type: cos_sim_spearman\n value: 85.46085808849622\n - type: euclidean_pearson\n value: 83.70449639870063\n - type: euclidean_spearman\n value: 85.46159013076233\n - type: manhattan_pearson\n value: 83.95259510313929\n - type: manhattan_spearman\n value: 85.8029724659458\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEvalFr\n type: lyon-nlp/summarization-summeval-fr-p2p\n config: default\n split: test\n revision: b385812de6a9577b6f4d0f88c6a6e35395a94054\n metrics:\n - type: cos_sim_pearson\n value: 32.61063271753325\n - type: cos_sim_spearman\n value: 31.454589417353603\n - type: dot_pearson\n value: 32.6106288643431\n - type: dot_spearman\n value: 31.454589417353603\n - task:\n type: Reranking\n dataset:\n name: MTEB SyntecReranking\n type: lyon-nlp/mteb-fr-reranking-syntec-s2p\n config: default\n split: test\n revision: b205c5084a0934ce8af14338bf03feb19499c84d\n metrics:\n - type: map\n value: 84.31666666666666\n - type: mrr\n value: 84.31666666666666\n - task:\n type: Retrieval\n dataset:\n name: MTEB SyntecRetrieval\n type: lyon-nlp/mteb-fr-retrieval-syntec-s2p\n config: default\n split: test\n revision: 77f7e271bf4a92b24fce5119f3486b583ca016ff\n metrics:\n - type: map_at_1\n value: 63.0\n - type: map_at_10\n value: 73.471\n - type: map_at_100\n value: 73.87\n - type: map_at_1000\n value: 73.87\n - type: map_at_3\n value: 70.5\n - type: map_at_5\n value: 73.05\n - type: mrr_at_1\n value: 63.0\n - type: mrr_at_10\n value: 73.471\n - type: mrr_at_100\n value: 73.87\n - type: mrr_at_1000\n value: 73.87\n - type: mrr_at_3\n value: 70.5\n - type: mrr_at_5\n value: 73.05\n - type: ndcg_at_1\n value: 63.0\n - type: ndcg_at_10\n value: 78.255\n - type: ndcg_at_100\n value: 79.88\n - type: ndcg_at_1000\n value: 79.88\n - type: ndcg_at_3\n value: 72.702\n - type: ndcg_at_5\n value: 77.264\n - type: precision_at_1\n value: 63.0\n - type: precision_at_10\n value: 9.3\n - type: precision_at_100\n value: 1.0\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 26.333000000000002\n - type: precision_at_5\n value: 18.0\n - type: recall_at_1\n value: 63.0\n - type: recall_at_10\n value: 93.0\n - type: recall_at_100\n value: 100.0\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 79.0\n - type: recall_at_5\n value: 90.0\n - task:\n type: Retrieval\n dataset:\n name: MTEB XPQARetrieval (fr)\n type: jinaai/xpqa\n config: fr\n split: test\n revision: c99d599f0a6ab9b85b065da6f9d94f9cf731679f\n metrics:\n - type: map_at_1\n value: 40.338\n - type: map_at_10\n value: 61.927\n - type: map_at_100\n value: 63.361999999999995\n - type: map_at_1000\n value: 63.405\n - type: map_at_3\n value: 55.479\n - type: map_at_5\n value: 59.732\n - type: mrr_at_1\n value: 63.551\n - type: mrr_at_10\n value: 71.006\n - type: mrr_at_100\n value: 71.501\n - type: mrr_at_1000\n value: 71.509\n - type: mrr_at_3\n value: 69.07\n - type: mrr_at_5\n value: 70.165\n - type: ndcg_at_1\n value: 63.551\n - type: ndcg_at_10\n value: 68.297\n - type: ndcg_at_100\n value: 73.13199999999999\n - type: ndcg_at_1000\n value: 73.751\n - type: ndcg_at_3\n value: 62.999\n - type: ndcg_at_5\n value: 64.89\n - type: precision_at_1\n value: 63.551\n - type: precision_at_10\n value: 15.661\n - type: precision_at_100\n value: 1.9789999999999999\n - type: precision_at_1000\n value: 0.207\n - type: precision_at_3\n value: 38.273\n - type: precision_at_5\n value: 27.61\n - type: recall_at_1\n value: 40.338\n - type: recall_at_10\n value: 77.267\n - type: recall_at_100\n value: 95.892\n - type: recall_at_1000\n value: 99.75500000000001\n - type: recall_at_3\n value: 60.36\n - type: recall_at_5\n value: 68.825\n - task:\n type: Clustering\n dataset:\n name: MTEB 8TagsClustering\n type: PL-MTEB/8tags-clustering\n config: default\n split: test\n revision: None\n metrics:\n - type: v_measure\n value: 51.36126303874126\n - task:\n type: Classification\n dataset:\n name: MTEB AllegroReviews\n type: PL-MTEB/allegro-reviews\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 67.13717693836979\n - type: f1\n value: 57.27609848003782\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna-PL\n type: clarin-knext/arguana-pl\n config: default\n split: test\n revision: 63fc86750af76253e8c760fc9e534bbf24d260a2\n metrics:\n - type: map_at_1\n value: 35.276999999999994\n - type: map_at_10\n value: 51.086\n - type: map_at_100\n value: 51.788000000000004\n - type: map_at_1000\n value: 51.791\n - type: map_at_3\n value: 46.147\n - type: map_at_5\n value: 49.078\n - type: mrr_at_1\n value: 35.917\n - type: mrr_at_10\n value: 51.315999999999995\n - type: mrr_at_100\n value: 52.018\n - type: mrr_at_1000\n value: 52.022\n - type: mrr_at_3\n value: 46.349000000000004\n - type: mrr_at_5\n value: 49.297000000000004\n - type: ndcg_at_1\n value: 35.276999999999994\n - type: ndcg_at_10\n value: 59.870999999999995\n - type: ndcg_at_100\n value: 62.590999999999994\n - type: ndcg_at_1000\n value: 62.661\n - type: ndcg_at_3\n value: 49.745\n - type: ndcg_at_5\n value: 55.067\n - type: precision_at_1\n value: 35.276999999999994\n - type: precision_at_10\n value: 8.791\n - type: precision_at_100\n value: 0.991\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 20.057\n - type: precision_at_5\n value: 14.637\n - type: recall_at_1\n value: 35.276999999999994\n - type: recall_at_10\n value: 87.909\n - type: recall_at_100\n value: 99.14699999999999\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 60.171\n - type: recall_at_5\n value: 73.18599999999999\n - task:\n type: Classification\n dataset:\n name: MTEB CBD\n type: PL-MTEB/cbd\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 78.03000000000002\n - type: ap\n value: 29.12548553897622\n - type: f1\n value: 66.54857118886073\n - task:\n type: PairClassification\n dataset:\n name: MTEB CDSC-E\n type: PL-MTEB/cdsce-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 89.0\n - type: cos_sim_ap\n value: 76.75437826834582\n - type: cos_sim_f1\n value: 66.4850136239782\n - type: cos_sim_precision\n value: 68.92655367231639\n - type: cos_sim_recall\n value: 64.21052631578948\n - type: dot_accuracy\n value: 89.0\n - type: dot_ap\n value: 76.75437826834582\n - type: dot_f1\n value: 66.4850136239782\n - type: dot_precision\n value: 68.92655367231639\n - type: dot_recall\n value: 64.21052631578948\n - type: euclidean_accuracy\n value: 89.0\n - type: euclidean_ap\n value: 76.75437826834582\n - type: euclidean_f1\n value: 66.4850136239782\n - type: euclidean_precision\n value: 68.92655367231639\n - type: euclidean_recall\n value: 64.21052631578948\n - type: manhattan_accuracy\n value: 89.0\n - type: manhattan_ap\n value: 76.66074220647083\n - type: manhattan_f1\n value: 66.47058823529412\n - type: manhattan_precision\n value: 75.33333333333333\n - type: manhattan_recall\n value: 59.473684210526315\n - type: max_accuracy\n value: 89.0\n - type: max_ap\n value: 76.75437826834582\n - type: max_f1\n value: 66.4850136239782\n - task:\n type: STS\n dataset:\n name: MTEB CDSC-R\n type: PL-MTEB/cdscr-sts\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_pearson\n value: 93.12903172428328\n - type: cos_sim_spearman\n value: 92.66381487060741\n - type: euclidean_pearson\n value: 90.37278396708922\n - type: euclidean_spearman\n value: 92.66381487060741\n - type: manhattan_pearson\n value: 90.32503296540962\n - type: manhattan_spearman\n value: 92.6902938354313\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia-PL\n type: clarin-knext/dbpedia-pl\n config: default\n split: test\n revision: 76afe41d9af165cc40999fcaa92312b8b012064a\n metrics:\n - type: map_at_1\n value: 8.83\n - type: map_at_10\n value: 18.326\n - type: map_at_100\n value: 26.496\n - type: map_at_1000\n value: 28.455000000000002\n - type: map_at_3\n value: 12.933\n - type: map_at_5\n value: 15.168000000000001\n - type: mrr_at_1\n value: 66.0\n - type: mrr_at_10\n value: 72.76700000000001\n - type: mrr_at_100\n value: 73.203\n - type: mrr_at_1000\n value: 73.219\n - type: mrr_at_3\n value: 71.458\n - type: mrr_at_5\n value: 72.246\n - type: ndcg_at_1\n value: 55.375\n - type: ndcg_at_10\n value: 41.3\n - type: ndcg_at_100\n value: 45.891\n - type: ndcg_at_1000\n value: 52.905\n - type: ndcg_at_3\n value: 46.472\n - type: ndcg_at_5\n value: 43.734\n - type: precision_at_1\n value: 66.0\n - type: precision_at_10\n value: 33.074999999999996\n - type: precision_at_100\n value: 11.094999999999999\n - type: precision_at_1000\n value: 2.374\n - type: precision_at_3\n value: 48.583\n - type: precision_at_5\n value: 42.0\n - type: recall_at_1\n value: 8.83\n - type: recall_at_10\n value: 22.587\n - type: recall_at_100\n value: 50.61600000000001\n - type: recall_at_1000\n value: 73.559\n - type: recall_at_3\n value: 13.688\n - type: recall_at_5\n value: 16.855\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA-PL\n type: clarin-knext/fiqa-pl\n config: default\n split: test\n revision: 2e535829717f8bf9dc829b7f911cc5bbd4e6608e\n metrics:\n - type: map_at_1\n value: 20.587\n - type: map_at_10\n value: 33.095\n - type: map_at_100\n value: 35.24\n - type: map_at_1000\n value: 35.429\n - type: map_at_3\n value: 28.626\n - type: map_at_5\n value: 31.136999999999997\n - type: mrr_at_1\n value: 40.586\n - type: mrr_at_10\n value: 49.033\n - type: mrr_at_100\n value: 49.952999999999996\n - type: mrr_at_1000\n value: 49.992\n - type: mrr_at_3\n value: 46.553\n - type: mrr_at_5\n value: 48.035\n - type: ndcg_at_1\n value: 40.586\n - type: ndcg_at_10\n value: 41.046\n - type: ndcg_at_100\n value: 48.586\n - type: ndcg_at_1000\n value: 51.634\n - type: ndcg_at_3\n value: 36.773\n - type: ndcg_at_5\n value: 38.389\n - type: precision_at_1\n value: 40.586\n - type: precision_at_10\n value: 11.466\n - type: precision_at_100\n value: 1.909\n - type: precision_at_1000\n value: 0.245\n - type: precision_at_3\n value: 24.434\n - type: precision_at_5\n value: 18.426000000000002\n - type: recall_at_1\n value: 20.587\n - type: recall_at_10\n value: 47.986000000000004\n - type: recall_at_100\n value: 75.761\n - type: recall_at_1000\n value: 94.065\n - type: recall_at_3\n value: 33.339\n - type: recall_at_5\n value: 39.765\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA-PL\n type: clarin-knext/hotpotqa-pl\n config: default\n split: test\n revision: a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907\n metrics:\n - type: map_at_1\n value: 40.878\n - type: map_at_10\n value: 58.775999999999996\n - type: map_at_100\n value: 59.632\n - type: map_at_1000\n value: 59.707\n - type: map_at_3\n value: 56.074\n - type: map_at_5\n value: 57.629\n - type: mrr_at_1\n value: 81.756\n - type: mrr_at_10\n value: 86.117\n - type: mrr_at_100\n value: 86.299\n - type: mrr_at_1000\n value: 86.30600000000001\n - type: mrr_at_3\n value: 85.345\n - type: mrr_at_5\n value: 85.832\n - type: ndcg_at_1\n value: 81.756\n - type: ndcg_at_10\n value: 67.608\n - type: ndcg_at_100\n value: 70.575\n - type: ndcg_at_1000\n value: 71.99600000000001\n - type: ndcg_at_3\n value: 63.723\n - type: ndcg_at_5\n value: 65.70700000000001\n - type: precision_at_1\n value: 81.756\n - type: precision_at_10\n value: 13.619\n - type: precision_at_100\n value: 1.5939999999999999\n - type: precision_at_1000\n value: 0.178\n - type: precision_at_3\n value: 39.604\n - type: precision_at_5\n value: 25.332\n - type: recall_at_1\n value: 40.878\n - type: recall_at_10\n value: 68.096\n - type: recall_at_100\n value: 79.696\n - type: recall_at_1000\n value: 89.082\n - type: recall_at_3\n value: 59.406000000000006\n - type: recall_at_5\n value: 63.329\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO-PL\n type: clarin-knext/msmarco-pl\n config: default\n split: test\n revision: 8634c07806d5cce3a6138e260e59b81760a0a640\n metrics:\n - type: map_at_1\n value: 2.1839999999999997\n - type: map_at_10\n value: 11.346\n - type: map_at_100\n value: 30.325000000000003\n - type: map_at_1000\n value: 37.806\n - type: map_at_3\n value: 4.842\n - type: map_at_5\n value: 6.891\n - type: mrr_at_1\n value: 86.047\n - type: mrr_at_10\n value: 89.14699999999999\n - type: mrr_at_100\n value: 89.46600000000001\n - type: mrr_at_1000\n value: 89.46600000000001\n - type: mrr_at_3\n value: 89.14699999999999\n - type: mrr_at_5\n value: 89.14699999999999\n - type: ndcg_at_1\n value: 67.829\n - type: ndcg_at_10\n value: 62.222\n - type: ndcg_at_100\n value: 55.337\n - type: ndcg_at_1000\n value: 64.076\n - type: ndcg_at_3\n value: 68.12700000000001\n - type: ndcg_at_5\n value: 64.987\n - type: precision_at_1\n value: 86.047\n - type: precision_at_10\n value: 69.535\n - type: precision_at_100\n value: 32.93\n - type: precision_at_1000\n value: 6.6049999999999995\n - type: precision_at_3\n value: 79.845\n - type: precision_at_5\n value: 75.349\n - type: recall_at_1\n value: 2.1839999999999997\n - type: recall_at_10\n value: 12.866\n - type: recall_at_100\n value: 43.505\n - type: recall_at_1000\n value: 72.366\n - type: recall_at_3\n value: 4.947\n - type: recall_at_5\n value: 7.192\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (pl)\n type: mteb/amazon_massive_intent\n config: pl\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 80.75319435104238\n - type: f1\n value: 77.58961444860606\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (pl)\n type: mteb/amazon_massive_scenario\n config: pl\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 85.54472091459313\n - type: f1\n value: 84.29498563572106\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus-PL\n type: clarin-knext/nfcorpus-pl\n config: default\n split: test\n revision: 9a6f9567fda928260afed2de480d79c98bf0bec0\n metrics:\n - type: map_at_1\n value: 4.367\n - type: map_at_10\n value: 10.38\n - type: map_at_100\n value: 13.516\n - type: map_at_1000\n value: 14.982000000000001\n - type: map_at_3\n value: 7.367\n - type: map_at_5\n value: 8.59\n - type: mrr_at_1\n value: 41.486000000000004\n - type: mrr_at_10\n value: 48.886\n - type: mrr_at_100\n value: 49.657000000000004\n - type: mrr_at_1000\n value: 49.713\n - type: mrr_at_3\n value: 46.904\n - type: mrr_at_5\n value: 48.065000000000005\n - type: ndcg_at_1\n value: 40.402\n - type: ndcg_at_10\n value: 30.885\n - type: ndcg_at_100\n value: 28.393\n - type: ndcg_at_1000\n value: 37.428\n - type: ndcg_at_3\n value: 35.394999999999996\n - type: ndcg_at_5\n value: 33.391999999999996\n - type: precision_at_1\n value: 41.486000000000004\n - type: precision_at_10\n value: 23.437\n - type: precision_at_100\n value: 7.638\n - type: precision_at_1000\n value: 2.0389999999999997\n - type: precision_at_3\n value: 32.817\n - type: precision_at_5\n value: 28.915999999999997\n - type: recall_at_1\n value: 4.367\n - type: recall_at_10\n value: 14.655000000000001\n - type: recall_at_100\n value: 29.665999999999997\n - type: recall_at_1000\n value: 62.073\n - type: recall_at_3\n value: 8.51\n - type: recall_at_5\n value: 10.689\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ-PL\n type: clarin-knext/nq-pl\n config: default\n split: test\n revision: f171245712cf85dd4700b06bef18001578d0ca8d\n metrics:\n - type: map_at_1\n value: 28.616000000000003\n - type: map_at_10\n value: 41.626000000000005\n - type: map_at_100\n value: 42.689\n - type: map_at_1000\n value: 42.733\n - type: map_at_3\n value: 37.729\n - type: map_at_5\n value: 39.879999999999995\n - type: mrr_at_1\n value: 32.068000000000005\n - type: mrr_at_10\n value: 44.029\n - type: mrr_at_100\n value: 44.87\n - type: mrr_at_1000\n value: 44.901\n - type: mrr_at_3\n value: 40.687\n - type: mrr_at_5\n value: 42.625\n - type: ndcg_at_1\n value: 32.068000000000005\n - type: ndcg_at_10\n value: 48.449999999999996\n - type: ndcg_at_100\n value: 53.13\n - type: ndcg_at_1000\n value: 54.186\n - type: ndcg_at_3\n value: 40.983999999999995\n - type: ndcg_at_5\n value: 44.628\n - type: precision_at_1\n value: 32.068000000000005\n - type: precision_at_10\n value: 7.9750000000000005\n - type: precision_at_100\n value: 1.061\n - type: precision_at_1000\n value: 0.116\n - type: precision_at_3\n value: 18.404999999999998\n - type: precision_at_5\n value: 13.111\n - type: recall_at_1\n value: 28.616000000000003\n - type: recall_at_10\n value: 66.956\n - type: recall_at_100\n value: 87.657\n - type: recall_at_1000\n value: 95.548\n - type: recall_at_3\n value: 47.453\n - type: recall_at_5\n value: 55.87800000000001\n - task:\n type: Classification\n dataset:\n name: MTEB PAC\n type: laugustyniak/abusive-clauses-pl\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 69.04141326382856\n - type: ap\n value: 77.47589122111044\n - type: f1\n value: 66.6332277374775\n - task:\n type: PairClassification\n dataset:\n name: MTEB PPC\n type: PL-MTEB/ppc-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 86.4\n - type: cos_sim_ap\n value: 94.1044939667201\n - type: cos_sim_f1\n value: 88.78048780487805\n - type: cos_sim_precision\n value: 87.22044728434504\n - type: cos_sim_recall\n value: 90.39735099337747\n - type: dot_accuracy\n value: 86.4\n - type: dot_ap\n value: 94.1044939667201\n - type: dot_f1\n value: 88.78048780487805\n - type: dot_precision\n value: 87.22044728434504\n - type: dot_recall\n value: 90.39735099337747\n - type: euclidean_accuracy\n value: 86.4\n - type: euclidean_ap\n value: 94.1044939667201\n - type: euclidean_f1\n value: 88.78048780487805\n - type: euclidean_precision\n value: 87.22044728434504\n - type: euclidean_recall\n value: 90.39735099337747\n - type: manhattan_accuracy\n value: 86.4\n - type: manhattan_ap\n value: 94.11438365697387\n - type: manhattan_f1\n value: 88.77968877968877\n - type: manhattan_precision\n value: 87.84440842787681\n - type: manhattan_recall\n value: 89.73509933774835\n - type: max_accuracy\n value: 86.4\n - type: max_ap\n value: 94.11438365697387\n - type: max_f1\n value: 88.78048780487805\n - task:\n type: PairClassification\n dataset:\n name: MTEB PSC\n type: PL-MTEB/psc-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 97.86641929499072\n - type: cos_sim_ap\n value: 99.36904211868182\n - type: cos_sim_f1\n value: 96.56203288490283\n - type: cos_sim_precision\n value: 94.72140762463343\n - type: cos_sim_recall\n value: 98.47560975609755\n - type: dot_accuracy\n value: 97.86641929499072\n - type: dot_ap\n value: 99.36904211868183\n - type: dot_f1\n value: 96.56203288490283\n - type: dot_precision\n value: 94.72140762463343\n - type: dot_recall\n value: 98.47560975609755\n - type: euclidean_accuracy\n value: 97.86641929499072\n - type: euclidean_ap\n value: 99.36904211868183\n - type: euclidean_f1\n value: 96.56203288490283\n - type: euclidean_precision\n value: 94.72140762463343\n - type: euclidean_recall\n value: 98.47560975609755\n - type: manhattan_accuracy\n value: 98.14471243042672\n - type: manhattan_ap\n value: 99.43359540492416\n - type: manhattan_f1\n value: 96.98795180722892\n - type: manhattan_precision\n value: 95.83333333333334\n - type: manhattan_recall\n value: 98.17073170731707\n - type: max_accuracy\n value: 98.14471243042672\n - type: max_ap\n value: 99.43359540492416\n - type: max_f1\n value: 96.98795180722892\n - task:\n type: Classification\n dataset:\n name: MTEB PolEmo2.0-IN\n type: PL-MTEB/polemo2_in\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 89.39058171745152\n - type: f1\n value: 86.8552093529568\n - task:\n type: Classification\n dataset:\n name: MTEB PolEmo2.0-OUT\n type: PL-MTEB/polemo2_out\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 74.97975708502024\n - type: f1\n value: 58.73081628832407\n - task:\n type: Retrieval\n dataset:\n name: MTEB Quora-PL\n type: clarin-knext/quora-pl\n config: default\n split: test\n revision: 0be27e93455051e531182b85e85e425aba12e9d4\n metrics:\n - type: map_at_1\n value: 64.917\n - type: map_at_10\n value: 78.74600000000001\n - type: map_at_100\n value: 79.501\n - type: map_at_1000\n value: 79.524\n - type: map_at_3\n value: 75.549\n - type: map_at_5\n value: 77.495\n - type: mrr_at_1\n value: 74.9\n - type: mrr_at_10\n value: 82.112\n - type: mrr_at_100\n value: 82.314\n - type: mrr_at_1000\n value: 82.317\n - type: mrr_at_3\n value: 80.745\n - type: mrr_at_5\n value: 81.607\n - type: ndcg_at_1\n value: 74.83999999999999\n - type: ndcg_at_10\n value: 83.214\n - type: ndcg_at_100\n value: 84.997\n - type: ndcg_at_1000\n value: 85.207\n - type: ndcg_at_3\n value: 79.547\n - type: ndcg_at_5\n value: 81.46600000000001\n - type: precision_at_1\n value: 74.83999999999999\n - type: precision_at_10\n value: 12.822\n - type: precision_at_100\n value: 1.506\n - type: precision_at_1000\n value: 0.156\n - type: precision_at_3\n value: 34.903\n - type: precision_at_5\n value: 23.16\n - type: recall_at_1\n value: 64.917\n - type: recall_at_10\n value: 92.27199999999999\n - type: recall_at_100\n value: 98.715\n - type: recall_at_1000\n value: 99.854\n - type: recall_at_3\n value: 82.04599999999999\n - type: recall_at_5\n value: 87.2\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS-PL\n type: clarin-knext/scidocs-pl\n config: default\n split: test\n revision: 45452b03f05560207ef19149545f168e596c9337\n metrics:\n - type: map_at_1\n value: 3.51\n - type: map_at_10\n value: 9.046999999999999\n - type: map_at_100\n value: 10.823\n - type: map_at_1000\n value: 11.144\n - type: map_at_3\n value: 6.257\n - type: map_at_5\n value: 7.648000000000001\n - type: mrr_at_1\n value: 17.299999999999997\n - type: mrr_at_10\n value: 27.419\n - type: mrr_at_100\n value: 28.618\n - type: mrr_at_1000\n value: 28.685\n - type: mrr_at_3\n value: 23.817\n - type: mrr_at_5\n value: 25.927\n - type: ndcg_at_1\n value: 17.299999999999997\n - type: ndcg_at_10\n value: 16.084\n - type: ndcg_at_100\n value: 23.729\n - type: ndcg_at_1000\n value: 29.476999999999997\n - type: ndcg_at_3\n value: 14.327000000000002\n - type: ndcg_at_5\n value: 13.017999999999999\n - type: precision_at_1\n value: 17.299999999999997\n - type: precision_at_10\n value: 8.63\n - type: precision_at_100\n value: 1.981\n - type: precision_at_1000\n value: 0.336\n - type: precision_at_3\n value: 13.4\n - type: precision_at_5\n value: 11.700000000000001\n - type: recall_at_1\n value: 3.51\n - type: recall_at_10\n value: 17.518\n - type: recall_at_100\n value: 40.275\n - type: recall_at_1000\n value: 68.203\n - type: recall_at_3\n value: 8.155\n - type: recall_at_5\n value: 11.875\n - task:\n type: PairClassification\n dataset:\n name: MTEB SICK-E-PL\n type: PL-MTEB/sicke-pl-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 86.30248675091724\n - type: cos_sim_ap\n value: 83.6756734006714\n - type: cos_sim_f1\n value: 74.97367497367497\n - type: cos_sim_precision\n value: 73.91003460207612\n - type: cos_sim_recall\n value: 76.06837606837607\n - type: dot_accuracy\n value: 86.30248675091724\n - type: dot_ap\n value: 83.6756734006714\n - type: dot_f1\n value: 74.97367497367497\n - type: dot_precision\n value: 73.91003460207612\n - type: dot_recall\n value: 76.06837606837607\n - type: euclidean_accuracy\n value: 86.30248675091724\n - type: euclidean_ap\n value: 83.67566984333091\n - type: euclidean_f1\n value: 74.97367497367497\n - type: euclidean_precision\n value: 73.91003460207612\n - type: euclidean_recall\n value: 76.06837606837607\n - type: manhattan_accuracy\n value: 86.28210354667753\n - type: manhattan_ap\n value: 83.64216119130171\n - type: manhattan_f1\n value: 74.92152075340078\n - type: manhattan_precision\n value: 73.4107997265892\n - type: manhattan_recall\n value: 76.49572649572649\n - type: max_accuracy\n value: 86.30248675091724\n - type: max_ap\n value: 83.6756734006714\n - type: max_f1\n value: 74.97367497367497\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R-PL\n type: PL-MTEB/sickr-pl-sts\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_pearson\n value: 82.23295940859121\n - type: cos_sim_spearman\n value: 78.89329160768719\n - type: euclidean_pearson\n value: 79.56019107076818\n - type: euclidean_spearman\n value: 78.89330209904084\n - type: manhattan_pearson\n value: 79.76098513973719\n - type: manhattan_spearman\n value: 79.05490162570123\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (pl)\n type: mteb/sts22-crosslingual-sts\n config: pl\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 37.732606308062486\n - type: cos_sim_spearman\n value: 41.01645667030284\n - type: euclidean_pearson\n value: 26.61722556367085\n - type: euclidean_spearman\n value: 41.01645667030284\n - type: manhattan_pearson\n value: 26.60917378970807\n - type: manhattan_spearman\n value: 41.51335727617614\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact-PL\n type: clarin-knext/scifact-pl\n config: default\n split: test\n revision: 47932a35f045ef8ed01ba82bf9ff67f6e109207e\n metrics:\n - type: map_at_1\n value: 54.31700000000001\n - type: map_at_10\n value: 65.564\n - type: map_at_100\n value: 66.062\n - type: map_at_1000\n value: 66.08699999999999\n - type: map_at_3\n value: 62.592999999999996\n - type: map_at_5\n value: 63.888\n - type: mrr_at_1\n value: 56.99999999999999\n - type: mrr_at_10\n value: 66.412\n - type: mrr_at_100\n value: 66.85900000000001\n - type: mrr_at_1000\n value: 66.88\n - type: mrr_at_3\n value: 64.22200000000001\n - type: mrr_at_5\n value: 65.206\n - type: ndcg_at_1\n value: 56.99999999999999\n - type: ndcg_at_10\n value: 70.577\n - type: ndcg_at_100\n value: 72.879\n - type: ndcg_at_1000\n value: 73.45\n - type: ndcg_at_3\n value: 65.5\n - type: ndcg_at_5\n value: 67.278\n - type: precision_at_1\n value: 56.99999999999999\n - type: precision_at_10\n value: 9.667\n - type: precision_at_100\n value: 1.083\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_3\n value: 26.0\n - type: precision_at_5\n value: 16.933\n - type: recall_at_1\n value: 54.31700000000001\n - type: recall_at_10\n value: 85.056\n - type: recall_at_100\n value: 95.667\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 71.0\n - type: recall_at_5\n value: 75.672\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID-PL\n type: clarin-knext/trec-covid-pl\n config: default\n split: test\n revision: 81bcb408f33366c2a20ac54adafad1ae7e877fdd\n metrics:\n - type: map_at_1\n value: 0.245\n - type: map_at_10\n value: 2.051\n - type: map_at_100\n value: 12.009\n - type: map_at_1000\n value: 27.448\n - type: map_at_3\n value: 0.721\n - type: map_at_5\n value: 1.13\n - type: mrr_at_1\n value: 88.0\n - type: mrr_at_10\n value: 93.0\n - type: mrr_at_100\n value: 93.0\n - type: mrr_at_1000\n value: 93.0\n - type: mrr_at_3\n value: 93.0\n - type: mrr_at_5\n value: 93.0\n - type: ndcg_at_1\n value: 85.0\n - type: ndcg_at_10\n value: 80.303\n - type: ndcg_at_100\n value: 61.23499999999999\n - type: ndcg_at_1000\n value: 52.978\n - type: ndcg_at_3\n value: 84.419\n - type: ndcg_at_5\n value: 82.976\n - type: precision_at_1\n value: 88.0\n - type: precision_at_10\n value: 83.39999999999999\n - type: precision_at_100\n value: 61.96\n - type: precision_at_1000\n value: 22.648\n - type: precision_at_3\n value: 89.333\n - type: precision_at_5\n value: 87.2\n - type: recall_at_1\n value: 0.245\n - type: recall_at_10\n value: 2.193\n - type: recall_at_100\n value: 14.938\n - type: recall_at_1000\n value: 48.563\n - type: recall_at_3\n value: 0.738\n - type: recall_at_5\n value: 1.173\n---\n\n# niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF\nThis model was converted to GGUF format from [`Alibaba-NLP/gte-Qwen2-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.\nRefer to the [original model card](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) for more details on the model.\n\n## Use with llama.cpp\nInstall llama.cpp through brew (works on Mac and Linux)\n\n```bash\nbrew install llama.cpp\n\n```\nInvoke the llama.cpp server or the CLI.\n\n### CLI:\n```bash\nllama-cli --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -p \"The meaning to life and the universe is\"\n```\n\n### Server:\n```bash\nllama-server --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -c 2048\n```\n\nNote: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.\n\nStep 1: Clone llama.cpp from GitHub.\n```\ngit clone https://github.com/ggerganov/llama.cpp\n```\n\nStep 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).\n```\ncd llama.cpp && LLAMA_CURL=1 make\n```\n\nStep 3: Run inference through the main binary.\n```\n./llama-cli --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -p \"The meaning to life and the universe is\"\n```\nor \n```\n./llama-server --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -c 2048\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2491,"cells":{"id":{"kind":"string","value":"Dulfary/roberta-large-bne-capitel-ner_spanish"},"author":{"kind":"string","value":"Dulfary"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","roberta","token-classification","generated_from_trainer","es","base_model:PlanTL-GOB-ES/roberta-large-bne-capitel-ner","base_model:finetune:PlanTL-GOB-ES/roberta-large-bne-capitel-ner","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"roberta\",\n \"token-classification\",\n \"generated_from_trainer\",\n \"es\",\n \"base_model:PlanTL-GOB-ES/roberta-large-bne-capitel-ner\",\n \"base_model:finetune:PlanTL-GOB-ES/roberta-large-bne-capitel-ner\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-25T05:00:28Z","string":"2024-07-25T05:00:28Z"},"last_modified":{"kind":"string","value":"2024-07-25T05:42:16+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: PlanTL-GOB-ES/roberta-large-bne-capitel-ner\nlanguage:\n- es\nlicense: apache-2.0\nmetrics:\n- precision\n- recall\n- f1\n- accuracy\ntags:\n- token-classification\n- generated_from_trainer\nwidget:\n- text: 'Karen Lopez, resisdente de la 105 # 58- 41, se encuentra en el área de cuidados\n intensivos'\nmodel-index:\n- name: roberta-large-bne-capitel-ner_spanish\n results: []\n---\n\n\n\n# roberta-large-bne-capitel-ner_spanish\n\nThis model is a fine-tuned version of [PlanTL-GOB-ES/roberta-large-bne-capitel-ner](https://huggingface.co/PlanTL-GOB-ES/roberta-large-bne-capitel-ner) on the MEDDOCAN dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.1915\n- Precision: 0.8269\n- Recall: 0.6719\n- F1: 0.7414\n- Accuracy: 0.9561\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3\n\n### Training results\n\n\n\n### Framework versions\n\n- Transformers 4.42.4\n- Pytorch 2.3.1+cu121\n- Datasets 2.20.0\n- Tokenizers 0.19.1"},"matched_bigbio_names":{"kind":"list like","value":["MEDDOCAN"],"string":"[\n \"MEDDOCAN\"\n]"}}},{"rowIdx":2492,"cells":{"id":{"kind":"string","value":"huskyhong/noname-ai-v2_5"},"author":{"kind":"string","value":"huskyhong"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","qwen","text-generation","custom_code","autotrain_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"qwen\",\n \"text-generation\",\n \"custom_code\",\n \"autotrain_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-26T08:51:03Z","string":"2024-07-26T08:51:03Z"},"last_modified":{"kind":"string","value":"2024-08-09T14:16:29+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\n[切换到中文版本](README_zh.md) \n\n[Switch to English Version](README.md)\n\n# Noname AI\n\nProjects related to Noname AI/Noname AI, involving AI programs aimed at generating Noname skill codes by inputting skill effects.\n\n[modelscope Online Experience](https://www.modelscope.cn/studios/huskyhong/nonameai)\n\nDue to limited computing power, the online experience version is only a lightweight CPU version with limited precision. If needed, please choose the GPU version or full version for inference.\nFine-tuned from QWen.\n\n## Configuration Requirements\n\nTo better meet usage requirements, please try to meet the following requirements:\n\n- Computer (required)\n- Hard disk storage space of 20G or more (required)\n- If using the full non-quantized version/GPU version lazy one-click package, for computers with NVIDIA graphics cards, GPU inference is used, requiring half of the graphics memory + computer physical memory (physical memory does not include virtual memory) >= 16G\n- If using the full non-quantized version/CPU version lazy one-click package, CPU inference is used, requiring memory (including virtual memory) to be as close as possible to >= 32G for computers without graphics cards\n- If using the lightweight version/GPU version lightweight lazy one-click package, for computers with NVIDIA graphics cards, GPU inference is used, requiring half of the graphics memory + computer physical memory (physical memory does not include virtual memory) >= 4G\n- If using the lightweight version/CPU version lightweight lazy one-click package, CPU inference is used, requiring memory (including virtual memory) to be as close as possible to >= 12G for computers without graphics cards\n\n## Usage\n\n### Full Model Method\n\n1. Install Python and the corresponding Python compiler.\n - Note: Python compatible versions are 3.8, 3.9, 3.10, 3.11. Please do not install versions that are too high or too low.\n2. Enter the following command in the terminal to install the required environment:\n\n ```bash\n pip install -r requirements.txt\n ```\n\n3. Run the program using the following Python code. The model will be automatically downloaded, and the code defaults to version 2.0 full version.\n```python\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\nfrom transformers.generation import GenerationConfig\ntokenizer = AutoTokenizer.from_pretrained(\"huskyhong/noname-ai-v2_5\", trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained(\"huskyhong/noname-ai-v2_5\", device_map=\"auto\", trust_remote_code=True).eval() # Load the model using GPU\n# model = AutoModelForCausalLM.from_pretrained(\"huskyhong/noname-ai-v2_5\", device_map=\"cpu\", trust_remote_code=True).eval() # Load the model using CPU\n5model.generation_config = GenerationConfig.from_pretrained(\"huskyhong/noname-ai-v2_5\", trust_remote_code=True) # You can specify different generation lengths, top_p, 和 other related hyperparameters\n# For the first generation model, replace \"huskyhong/noname-ai-v2_5\" with \"huskyhong/noname-ai-v1\". For lightweight version v2.5 model, replace \"huskyhong/noname-ai-v2_5\" with \"huskyhong/noname-ai-v2_5-light\"\n\nprompt = \"请帮我编写一个技能,技能效果如下:\" + input(\"请输入技能效果:\")\nresponse, history = model.chat(tokenizer, prompt, history = [])\nprint(response)\n\nprompt = \"请帮我编写一张卡牌,卡牌效果如下::\" + input(\"请输入卡牌效果:\")\nresponse, history = model.chat(tokenizer, prompt, history = [])\nprint(response)\n```\nAlternatively, you can use Hugging Face's pipeline for inference.\n```python\nfrom transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, GenerationConfig\ngenerator = pipeline(\n \"text-generation\",\n model=\"huskyhong/noname-ai-v2_5\",\n tokenizer=\"huskyhong/noname-ai-v2_5\",\n device=0, # Choose GPU device. If you want to use CPU, you can set device=-1\n trust_remote_code=True\n)\n\nprompt = \"请帮我编写一个技能,技能效果如下:\" + input(\"请输入技能效果:\")\nresponse = generator(prompt, max_length=50, top_p=0.95) # You can adjust parameters such as generation length, top_p as needed\nprint(response[0]['generated_text'])\n\nprompt = \"请帮我编写一张卡牌,卡牌效果如下:\" + input(\"请输入卡牌效果:\")\nresponse = generator(prompt, max_length=50, top_p=0.95) # You can adjust parameters such as generation length, top_p as needed\nprint(response[0]['generated_text'])\n```\n\n4. If automatic downloading fails, you can manually download the model files and modify \"huskyhong/noname-ai-v2\" to the corresponding location in the code. \n Download links for the second-generation model: \n - [v2.5 Hugging Face address (full version)](https://huggingface.co/huskyhong/noname-ai-v2_5)\n - [v2.5 Hugging Face address (lightweight version)](https://huggingface.co/huskyhong/noname-ai-v2_5-light)\n - [Baidu Netdisk address](https://pan.baidu.com/s/1m9RfGqnuQbRYROE_UzuG-Q?pwd=6666) Baidu Netdisk extraction code: 6666 \n Download links for the first-generation model:\n - [Hugging Face address](https://huggingface.co/huskyhong/noname-ai-v1)\n - [Baidu Netdisk address](https://pan.baidu.com/s/1Ox471XuHF_gJbcPPnSZe7g?pwd=6666) Baidu Netdisk extraction code: 6666 \nRemember to choose whether to load the model using GPU or CPU, and replace `your_model_name` with your actual model path.\n\n## Lazy One-Click Package\n\n- One-click installation, no worries. \n- Please choose the appropriate lazy one-click package according to your own configuration. \n- [Lazy One-Click Package Baidu Netdisk Download Address (Updated to v2.5)](https://pan.baidu.com/s/1zIcRZtQv5oIdu7_abie9Vw?pwd=6666) Baidu Netdisk extraction code: 6666 \n- [Lazy One-Click Package 123 Netdisk Download Address (Updated to v2.5)](https://www.123pan.com/s/lOcnjv-pnOG3.html) 123 Netdisk extraction code: 6666 \n- Please pay attention to the version time of the lazy one-click package to ensure that the version is the latest! \n- Lazy package related videos \n- [Comparison of Effects of Lazy Package v2.5](https://www.bilibili.com/video/BV1KKY4e8EaC/) \n\n## Web Version/Server Deployment\n - Install Python \n - Install dependencies \n ```bash\n pip install -r requirements.txt\n ``` \n - Install Streamlit\n ```bash\n pip install streamlit\n ```\n - Allow port 8501 on the server (can also be changed to others, corresponding to webdemo.py file)\n - Run webdemo\n ```bash\n streamlit run webdemo.py\n ```\n## Training \nTraining requires installing new dependencies: \n```python\npip install peft deepspeed\n``` \nClone the project和download the v2.3 version of the model files, taking the lightweight version as an example: \n```bash\ngit lfs install\ngit clone https://github.com/204313508/noname_llm.git\ngit clone https://huggingface.co/huskyhong/noname-ai-v2_3-light\ncd noname_llm/finetune\n``` \nModify the parameters required for training in the finetune script, such as model and dataset locations, then enter the following command to start training: \n```bash\nbash finetune.sh\n``` \nPlease refer to the [Fine-tuning Guide](./finetune/README.md) for detailed steps.\n \n## Web Version/Server Example\n![webdemo1](./webdemo1.png)\n![webdemo2](./webdemo2.png)\n\n## Notes\n\n- AI generation is subject to uncontrollable factors, and the generated code does not guarantee 100% effectiveness. Bugs, redundant code, or additional special characters may still occur and require manual modification.\n- (Important) Follow AI specifications. This AI model is for learning and communication purposes only. Please do not use it for illegal or commercial purposes. The purpose of releasing this model is to encourage better learning and communication, and all related information involved in the model is public. I bear no responsibility for malicious use of this AI model.\n\n## Other Content\n\nIf you have any related questions, please raise them in the official GitHub issue.\n\n## Demo Images\nThese demo images are based on version 2.3 release. \n![demo](./demo.png)\n\n\n## Sponsorship\n- Shamelessly begging for sponsorship\n![sponsor](./sponsor.jpg)\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2493,"cells":{"id":{"kind":"string","value":"BookingCare/multilingual-e5-base-similarity-v1-onnx-quantized"},"author":{"kind":"string","value":"BookingCare"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","onnx","xlm-roberta","mteb","Sentence Transformers","sentence-similarity","multilingual","af","am","ar","as","az","be","bg","bn","br","bs","ca","cs","cy","da","de","el","en","eo","es","et","eu","fa","fi","fr","fy","ga","gd","gl","gu","ha","he","hi","hr","hu","hy","id","is","it","ja","jv","ka","kk","km","kn","ko","ku","ky","la","lo","lt","lv","mg","mk","ml","mn","mr","ms","my","ne","nl","no","om","or","pa","pl","ps","pt","ro","ru","sa","sd","si","sk","sl","so","sq","sr","su","sv","sw","ta","te","th","tl","tr","ug","uk","ur","uz","vi","xh","yi","zh","arxiv:2402.05672","arxiv:2108.08787","arxiv:2104.08663","arxiv:2210.07316","license:mit","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"onnx\",\n \"xlm-roberta\",\n \"mteb\",\n \"Sentence Transformers\",\n \"sentence-similarity\",\n \"multilingual\",\n \"af\",\n \"am\",\n \"ar\",\n \"as\",\n \"az\",\n \"be\",\n \"bg\",\n \"bn\",\n \"br\",\n \"bs\",\n \"ca\",\n \"cs\",\n \"cy\",\n \"da\",\n \"de\",\n \"el\",\n \"en\",\n \"eo\",\n \"es\",\n \"et\",\n \"eu\",\n \"fa\",\n \"fi\",\n \"fr\",\n \"fy\",\n \"ga\",\n \"gd\",\n \"gl\",\n \"gu\",\n \"ha\",\n \"he\",\n \"hi\",\n \"hr\",\n \"hu\",\n \"hy\",\n \"id\",\n \"is\",\n \"it\",\n \"ja\",\n \"jv\",\n \"ka\",\n \"kk\",\n \"km\",\n \"kn\",\n \"ko\",\n \"ku\",\n \"ky\",\n \"la\",\n \"lo\",\n \"lt\",\n \"lv\",\n \"mg\",\n \"mk\",\n \"ml\",\n \"mn\",\n \"mr\",\n \"ms\",\n \"my\",\n \"ne\",\n \"nl\",\n \"no\",\n \"om\",\n \"or\",\n \"pa\",\n \"pl\",\n \"ps\",\n \"pt\",\n \"ro\",\n \"ru\",\n \"sa\",\n \"sd\",\n \"si\",\n \"sk\",\n \"sl\",\n \"so\",\n \"sq\",\n \"sr\",\n \"su\",\n \"sv\",\n \"sw\",\n \"ta\",\n \"te\",\n \"th\",\n \"tl\",\n \"tr\",\n \"ug\",\n \"uk\",\n \"ur\",\n \"uz\",\n \"vi\",\n \"xh\",\n \"yi\",\n \"zh\",\n \"arxiv:2402.05672\",\n \"arxiv:2108.08787\",\n \"arxiv:2104.08663\",\n \"arxiv:2210.07316\",\n \"license:mit\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-06T09:48:26Z","string":"2024-08-06T09:48:26Z"},"last_modified":{"kind":"string","value":"2024-12-05T08:51:34+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- multilingual\n- af\n- am\n- ar\n- as\n- az\n- be\n- bg\n- bn\n- br\n- bs\n- ca\n- cs\n- cy\n- da\n- de\n- el\n- en\n- eo\n- es\n- et\n- eu\n- fa\n- fi\n- fr\n- fy\n- ga\n- gd\n- gl\n- gu\n- ha\n- he\n- hi\n- hr\n- hu\n- hy\n- id\n- is\n- it\n- ja\n- jv\n- ka\n- kk\n- km\n- kn\n- ko\n- ku\n- ky\n- la\n- lo\n- lt\n- lv\n- mg\n- mk\n- ml\n- mn\n- mr\n- ms\n- my\n- ne\n- nl\n- 'no'\n- om\n- or\n- pa\n- pl\n- ps\n- pt\n- ro\n- ru\n- sa\n- sd\n- si\n- sk\n- sl\n- so\n- sq\n- sr\n- su\n- sv\n- sw\n- ta\n- te\n- th\n- tl\n- tr\n- ug\n- uk\n- ur\n- uz\n- vi\n- xh\n- yi\n- zh\nlicense: mit\ntags:\n- mteb\n- Sentence Transformers\n- sentence-similarity\n- sentence-transformers\nmodel-index:\n- name: multilingual-e5-base\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 78.97014925373135\n - type: ap\n value: 43.69351129103008\n - type: f1\n value: 73.38075030070492\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (de)\n type: mteb/amazon_counterfactual\n config: de\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 71.7237687366167\n - type: ap\n value: 82.22089859962671\n - type: f1\n value: 69.95532758884401\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en-ext)\n type: mteb/amazon_counterfactual\n config: en-ext\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 79.65517241379312\n - type: ap\n value: 28.507918657094738\n - type: f1\n value: 66.84516013726119\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (ja)\n type: mteb/amazon_counterfactual\n config: ja\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 73.32976445396146\n - type: ap\n value: 20.720481637566014\n - type: f1\n value: 59.78002763416003\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 90.63775\n - type: ap\n value: 87.22277903861716\n - type: f1\n value: 90.60378636386807\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 44.546\n - type: f1\n value: 44.05666638370923\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (de)\n type: mteb/amazon_reviews_multi\n config: de\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 41.828\n - type: f1\n value: 41.2710255644252\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (es)\n type: mteb/amazon_reviews_multi\n config: es\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 40.534\n - type: f1\n value: 39.820743174270326\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (fr)\n type: mteb/amazon_reviews_multi\n config: fr\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 39.684\n - type: f1\n value: 39.11052682815307\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (ja)\n type: mteb/amazon_reviews_multi\n config: ja\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 37.436\n - type: f1\n value: 37.07082931930871\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (zh)\n type: mteb/amazon_reviews_multi\n config: zh\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 37.226000000000006\n - type: f1\n value: 36.65372077739185\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 22.831000000000003\n - type: map_at_10\n value: 36.42\n - type: map_at_100\n value: 37.699\n - type: map_at_1000\n value: 37.724000000000004\n - type: map_at_3\n value: 32.207\n - type: map_at_5\n value: 34.312\n - type: mrr_at_1\n value: 23.257\n - type: mrr_at_10\n value: 36.574\n - type: mrr_at_100\n value: 37.854\n - type: mrr_at_1000\n value: 37.878\n - type: mrr_at_3\n value: 32.385000000000005\n - type: mrr_at_5\n value: 34.48\n - type: ndcg_at_1\n value: 22.831000000000003\n - type: ndcg_at_10\n value: 44.230000000000004\n - type: ndcg_at_100\n value: 49.974000000000004\n - type: ndcg_at_1000\n value: 50.522999999999996\n - type: ndcg_at_3\n value: 35.363\n - type: ndcg_at_5\n value: 39.164\n - type: precision_at_1\n value: 22.831000000000003\n - type: precision_at_10\n value: 6.935\n - type: precision_at_100\n value: 0.9520000000000001\n - type: precision_at_1000\n value: 0.099\n - type: precision_at_3\n value: 14.841\n - type: precision_at_5\n value: 10.754\n - type: recall_at_1\n value: 22.831000000000003\n - type: recall_at_10\n value: 69.346\n - type: recall_at_100\n value: 95.235\n - type: recall_at_1000\n value: 99.36\n - type: recall_at_3\n value: 44.523\n - type: recall_at_5\n value: 53.769999999999996\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 40.27789869854063\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 35.41979463347428\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 58.22752045109304\n - type: mrr\n value: 71.51112430198303\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 84.71147646622866\n - type: cos_sim_spearman\n value: 85.059167046486\n - type: euclidean_pearson\n value: 75.88421613600647\n - type: euclidean_spearman\n value: 75.12821787150585\n - type: manhattan_pearson\n value: 75.22005646957604\n - type: manhattan_spearman\n value: 74.42880434453272\n - task:\n type: BitextMining\n dataset:\n name: MTEB BUCC (de-en)\n type: mteb/bucc-bitext-mining\n config: de-en\n split: test\n revision: d51519689f32196a32af33b075a01d0e7c51e252\n metrics:\n - type: accuracy\n value: 99.23799582463465\n - type: f1\n value: 99.12665274878218\n - type: precision\n value: 99.07098121085595\n - type: recall\n value: 99.23799582463465\n - task:\n type: BitextMining\n dataset:\n name: MTEB BUCC (fr-en)\n type: mteb/bucc-bitext-mining\n config: fr-en\n split: test\n revision: d51519689f32196a32af33b075a01d0e7c51e252\n metrics:\n - type: accuracy\n value: 97.88685890380806\n - type: f1\n value: 97.59336708489249\n - type: precision\n value: 97.44662117543473\n - type: recall\n value: 97.88685890380806\n - task:\n type: BitextMining\n dataset:\n name: MTEB BUCC (ru-en)\n type: mteb/bucc-bitext-mining\n config: ru-en\n split: test\n revision: d51519689f32196a32af33b075a01d0e7c51e252\n metrics:\n - type: accuracy\n value: 97.47142362313821\n - type: f1\n value: 97.1989377670015\n - type: precision\n value: 97.06384944001847\n - type: recall\n value: 97.47142362313821\n - task:\n type: BitextMining\n dataset:\n name: MTEB BUCC (zh-en)\n type: mteb/bucc-bitext-mining\n config: zh-en\n split: test\n revision: d51519689f32196a32af33b075a01d0e7c51e252\n metrics:\n - type: accuracy\n value: 98.4728804634018\n - type: f1\n value: 98.2973494821836\n - type: precision\n value: 98.2095839915745\n - type: recall\n value: 98.4728804634018\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 82.74025974025975\n - type: f1\n value: 82.67420447730439\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 35.0380848063507\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 29.45956405670166\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 32.122\n - type: map_at_10\n value: 42.03\n - type: map_at_100\n value: 43.364000000000004\n - type: map_at_1000\n value: 43.474000000000004\n - type: map_at_3\n value: 38.804\n - type: map_at_5\n value: 40.585\n - type: mrr_at_1\n value: 39.914\n - type: mrr_at_10\n value: 48.227\n - type: mrr_at_100\n value: 49.018\n - type: mrr_at_1000\n value: 49.064\n - type: mrr_at_3\n value: 45.994\n - type: mrr_at_5\n value: 47.396\n - type: ndcg_at_1\n value: 39.914\n - type: ndcg_at_10\n value: 47.825\n - type: ndcg_at_100\n value: 52.852\n - type: ndcg_at_1000\n value: 54.891\n - type: ndcg_at_3\n value: 43.517\n - type: ndcg_at_5\n value: 45.493\n - type: precision_at_1\n value: 39.914\n - type: precision_at_10\n value: 8.956\n - type: precision_at_100\n value: 1.388\n - type: precision_at_1000\n value: 0.182\n - type: precision_at_3\n value: 20.791999999999998\n - type: precision_at_5\n value: 14.821000000000002\n - type: recall_at_1\n value: 32.122\n - type: recall_at_10\n value: 58.294999999999995\n - type: recall_at_100\n value: 79.726\n - type: recall_at_1000\n value: 93.099\n - type: recall_at_3\n value: 45.017\n - type: recall_at_5\n value: 51.002\n - type: map_at_1\n value: 29.677999999999997\n - type: map_at_10\n value: 38.684000000000005\n - type: map_at_100\n value: 39.812999999999995\n - type: map_at_1000\n value: 39.945\n - type: map_at_3\n value: 35.831\n - type: map_at_5\n value: 37.446\n - type: mrr_at_1\n value: 37.771\n - type: mrr_at_10\n value: 44.936\n - type: mrr_at_100\n value: 45.583\n - type: mrr_at_1000\n value: 45.634\n - type: mrr_at_3\n value: 42.771\n - type: mrr_at_5\n value: 43.994\n - type: ndcg_at_1\n value: 37.771\n - type: ndcg_at_10\n value: 44.059\n - type: ndcg_at_100\n value: 48.192\n - type: ndcg_at_1000\n value: 50.375\n - type: ndcg_at_3\n value: 40.172000000000004\n - type: ndcg_at_5\n value: 41.899\n - type: precision_at_1\n value: 37.771\n - type: precision_at_10\n value: 8.286999999999999\n - type: precision_at_100\n value: 1.322\n - type: precision_at_1000\n value: 0.178\n - type: precision_at_3\n value: 19.406000000000002\n - type: precision_at_5\n value: 13.745\n - type: recall_at_1\n value: 29.677999999999997\n - type: recall_at_10\n value: 53.071\n - type: recall_at_100\n value: 70.812\n - type: recall_at_1000\n value: 84.841\n - type: recall_at_3\n value: 41.016000000000005\n - type: recall_at_5\n value: 46.22\n - type: map_at_1\n value: 42.675000000000004\n - type: map_at_10\n value: 53.93599999999999\n - type: map_at_100\n value: 54.806999999999995\n - type: map_at_1000\n value: 54.867\n - type: map_at_3\n value: 50.934000000000005\n - type: map_at_5\n value: 52.583\n - type: mrr_at_1\n value: 48.339\n - type: mrr_at_10\n value: 57.265\n - type: mrr_at_100\n value: 57.873\n - type: mrr_at_1000\n value: 57.906\n - type: mrr_at_3\n value: 55.193000000000005\n - type: mrr_at_5\n value: 56.303000000000004\n - type: ndcg_at_1\n value: 48.339\n - type: ndcg_at_10\n value: 59.19799999999999\n - type: ndcg_at_100\n value: 62.743\n - type: ndcg_at_1000\n value: 63.99399999999999\n - type: ndcg_at_3\n value: 54.367\n - type: ndcg_at_5\n value: 56.548\n - type: precision_at_1\n value: 48.339\n - type: precision_at_10\n value: 9.216000000000001\n - type: precision_at_100\n value: 1.1809999999999998\n - type: precision_at_1000\n value: 0.134\n - type: precision_at_3\n value: 23.72\n - type: precision_at_5\n value: 16.025\n - type: recall_at_1\n value: 42.675000000000004\n - type: recall_at_10\n value: 71.437\n - type: recall_at_100\n value: 86.803\n - type: recall_at_1000\n value: 95.581\n - type: recall_at_3\n value: 58.434\n - type: recall_at_5\n value: 63.754\n - type: map_at_1\n value: 23.518\n - type: map_at_10\n value: 30.648999999999997\n - type: map_at_100\n value: 31.508999999999997\n - type: map_at_1000\n value: 31.604\n - type: map_at_3\n value: 28.247\n - type: map_at_5\n value: 29.65\n - type: mrr_at_1\n value: 25.650000000000002\n - type: mrr_at_10\n value: 32.771\n - type: mrr_at_100\n value: 33.554\n - type: mrr_at_1000\n value: 33.629999999999995\n - type: mrr_at_3\n value: 30.433\n - type: mrr_at_5\n value: 31.812\n - type: ndcg_at_1\n value: 25.650000000000002\n - type: ndcg_at_10\n value: 34.929\n - type: ndcg_at_100\n value: 39.382\n - type: ndcg_at_1000\n value: 41.913\n - type: ndcg_at_3\n value: 30.292\n - type: ndcg_at_5\n value: 32.629999999999995\n - type: precision_at_1\n value: 25.650000000000002\n - type: precision_at_10\n value: 5.311\n - type: precision_at_100\n value: 0.792\n - type: precision_at_1000\n value: 0.105\n - type: precision_at_3\n value: 12.58\n - type: precision_at_5\n value: 8.994\n - type: recall_at_1\n value: 23.518\n - type: recall_at_10\n value: 46.19\n - type: recall_at_100\n value: 67.123\n - type: recall_at_1000\n value: 86.442\n - type: recall_at_3\n value: 33.678000000000004\n - type: recall_at_5\n value: 39.244\n - type: map_at_1\n value: 15.891\n - type: map_at_10\n value: 22.464000000000002\n - type: map_at_100\n value: 23.483\n - type: map_at_1000\n value: 23.613\n - type: map_at_3\n value: 20.080000000000002\n - type: map_at_5\n value: 21.526\n - type: mrr_at_1\n value: 20.025000000000002\n - type: mrr_at_10\n value: 26.712999999999997\n - type: mrr_at_100\n value: 27.650000000000002\n - type: mrr_at_1000\n value: 27.737000000000002\n - type: mrr_at_3\n value: 24.274\n - type: mrr_at_5\n value: 25.711000000000002\n - type: ndcg_at_1\n value: 20.025000000000002\n - type: ndcg_at_10\n value: 27.028999999999996\n - type: ndcg_at_100\n value: 32.064\n - type: ndcg_at_1000\n value: 35.188\n - type: ndcg_at_3\n value: 22.512999999999998\n - type: ndcg_at_5\n value: 24.89\n - type: precision_at_1\n value: 20.025000000000002\n - type: precision_at_10\n value: 4.776\n - type: precision_at_100\n value: 0.8500000000000001\n - type: precision_at_1000\n value: 0.125\n - type: precision_at_3\n value: 10.531\n - type: precision_at_5\n value: 7.811\n - type: recall_at_1\n value: 15.891\n - type: recall_at_10\n value: 37.261\n - type: recall_at_100\n value: 59.12\n - type: recall_at_1000\n value: 81.356\n - type: recall_at_3\n value: 24.741\n - type: recall_at_5\n value: 30.753999999999998\n - type: map_at_1\n value: 27.544\n - type: map_at_10\n value: 36.283\n - type: map_at_100\n value: 37.467\n - type: map_at_1000\n value: 37.574000000000005\n - type: map_at_3\n value: 33.528999999999996\n - type: map_at_5\n value: 35.028999999999996\n - type: mrr_at_1\n value: 34.166999999999994\n - type: mrr_at_10\n value: 41.866\n - type: mrr_at_100\n value: 42.666\n - type: mrr_at_1000\n value: 42.716\n - type: mrr_at_3\n value: 39.541\n - type: mrr_at_5\n value: 40.768\n - type: ndcg_at_1\n value: 34.166999999999994\n - type: ndcg_at_10\n value: 41.577\n - type: ndcg_at_100\n value: 46.687\n - type: ndcg_at_1000\n value: 48.967\n - type: ndcg_at_3\n value: 37.177\n - type: ndcg_at_5\n value: 39.097\n - type: precision_at_1\n value: 34.166999999999994\n - type: precision_at_10\n value: 7.420999999999999\n - type: precision_at_100\n value: 1.165\n - type: precision_at_1000\n value: 0.154\n - type: precision_at_3\n value: 17.291999999999998\n - type: precision_at_5\n value: 12.166\n - type: recall_at_1\n value: 27.544\n - type: recall_at_10\n value: 51.99399999999999\n - type: recall_at_100\n value: 73.738\n - type: recall_at_1000\n value: 89.33\n - type: recall_at_3\n value: 39.179\n - type: recall_at_5\n value: 44.385999999999996\n - type: map_at_1\n value: 26.661\n - type: map_at_10\n value: 35.475\n - type: map_at_100\n value: 36.626999999999995\n - type: map_at_1000\n value: 36.741\n - type: map_at_3\n value: 32.818000000000005\n - type: map_at_5\n value: 34.397\n - type: mrr_at_1\n value: 32.647999999999996\n - type: mrr_at_10\n value: 40.784\n - type: mrr_at_100\n value: 41.602\n - type: mrr_at_1000\n value: 41.661\n - type: mrr_at_3\n value: 38.68\n - type: mrr_at_5\n value: 39.838\n - type: ndcg_at_1\n value: 32.647999999999996\n - type: ndcg_at_10\n value: 40.697\n - type: ndcg_at_100\n value: 45.799\n - type: ndcg_at_1000\n value: 48.235\n - type: ndcg_at_3\n value: 36.516\n - type: ndcg_at_5\n value: 38.515\n - type: precision_at_1\n value: 32.647999999999996\n - type: precision_at_10\n value: 7.202999999999999\n - type: precision_at_100\n value: 1.1360000000000001\n - type: precision_at_1000\n value: 0.151\n - type: precision_at_3\n value: 17.314\n - type: precision_at_5\n value: 12.145999999999999\n - type: recall_at_1\n value: 26.661\n - type: recall_at_10\n value: 50.995000000000005\n - type: recall_at_100\n value: 73.065\n - type: recall_at_1000\n value: 89.781\n - type: recall_at_3\n value: 39.073\n - type: recall_at_5\n value: 44.395\n - type: map_at_1\n value: 25.946583333333333\n - type: map_at_10\n value: 33.79725\n - type: map_at_100\n value: 34.86408333333333\n - type: map_at_1000\n value: 34.9795\n - type: map_at_3\n value: 31.259999999999998\n - type: map_at_5\n value: 32.71541666666666\n - type: mrr_at_1\n value: 30.863749999999996\n - type: mrr_at_10\n value: 37.99183333333333\n - type: mrr_at_100\n value: 38.790499999999994\n - type: mrr_at_1000\n value: 38.85575000000001\n - type: mrr_at_3\n value: 35.82083333333333\n - type: mrr_at_5\n value: 37.07533333333333\n - type: ndcg_at_1\n value: 30.863749999999996\n - type: ndcg_at_10\n value: 38.52141666666667\n - type: ndcg_at_100\n value: 43.17966666666667\n - type: ndcg_at_1000\n value: 45.64608333333333\n - type: ndcg_at_3\n value: 34.333000000000006\n - type: ndcg_at_5\n value: 36.34975\n - type: precision_at_1\n value: 30.863749999999996\n - type: precision_at_10\n value: 6.598999999999999\n - type: precision_at_100\n value: 1.0502500000000001\n - type: precision_at_1000\n value: 0.14400000000000002\n - type: precision_at_3\n value: 15.557583333333334\n - type: precision_at_5\n value: 11.020000000000001\n - type: recall_at_1\n value: 25.946583333333333\n - type: recall_at_10\n value: 48.36991666666666\n - type: recall_at_100\n value: 69.02408333333334\n - type: recall_at_1000\n value: 86.43858333333331\n - type: recall_at_3\n value: 36.4965\n - type: recall_at_5\n value: 41.76258333333334\n - type: map_at_1\n value: 22.431\n - type: map_at_10\n value: 28.889\n - type: map_at_100\n value: 29.642000000000003\n - type: map_at_1000\n value: 29.742\n - type: map_at_3\n value: 26.998\n - type: map_at_5\n value: 28.172000000000004\n - type: mrr_at_1\n value: 25.307000000000002\n - type: mrr_at_10\n value: 31.763\n - type: mrr_at_100\n value: 32.443\n - type: mrr_at_1000\n value: 32.531\n - type: mrr_at_3\n value: 29.959000000000003\n - type: mrr_at_5\n value: 31.063000000000002\n - type: ndcg_at_1\n value: 25.307000000000002\n - type: ndcg_at_10\n value: 32.586999999999996\n - type: ndcg_at_100\n value: 36.5\n - type: ndcg_at_1000\n value: 39.133\n - type: ndcg_at_3\n value: 29.25\n - type: ndcg_at_5\n value: 31.023\n - type: precision_at_1\n value: 25.307000000000002\n - type: precision_at_10\n value: 4.954\n - type: precision_at_100\n value: 0.747\n - type: precision_at_1000\n value: 0.104\n - type: precision_at_3\n value: 12.577\n - type: precision_at_5\n value: 8.741999999999999\n - type: recall_at_1\n value: 22.431\n - type: recall_at_10\n value: 41.134\n - type: recall_at_100\n value: 59.28600000000001\n - type: recall_at_1000\n value: 78.857\n - type: recall_at_3\n value: 31.926\n - type: recall_at_5\n value: 36.335\n - type: map_at_1\n value: 17.586\n - type: map_at_10\n value: 23.304\n - type: map_at_100\n value: 24.159\n - type: map_at_1000\n value: 24.281\n - type: map_at_3\n value: 21.316\n - type: map_at_5\n value: 22.383\n - type: mrr_at_1\n value: 21.645\n - type: mrr_at_10\n value: 27.365000000000002\n - type: mrr_at_100\n value: 28.108\n - type: mrr_at_1000\n value: 28.192\n - type: mrr_at_3\n value: 25.482\n - type: mrr_at_5\n value: 26.479999999999997\n - type: ndcg_at_1\n value: 21.645\n - type: ndcg_at_10\n value: 27.306\n - type: ndcg_at_100\n value: 31.496000000000002\n - type: ndcg_at_1000\n value: 34.53\n - type: ndcg_at_3\n value: 23.73\n - type: ndcg_at_5\n value: 25.294\n - type: precision_at_1\n value: 21.645\n - type: precision_at_10\n value: 4.797\n - type: precision_at_100\n value: 0.8059999999999999\n - type: precision_at_1000\n value: 0.121\n - type: precision_at_3\n value: 10.850999999999999\n - type: precision_at_5\n value: 7.736\n - type: recall_at_1\n value: 17.586\n - type: recall_at_10\n value: 35.481\n - type: recall_at_100\n value: 54.534000000000006\n - type: recall_at_1000\n value: 76.456\n - type: recall_at_3\n value: 25.335\n - type: recall_at_5\n value: 29.473\n - type: map_at_1\n value: 25.095\n - type: map_at_10\n value: 32.374\n - type: map_at_100\n value: 33.537\n - type: map_at_1000\n value: 33.634\n - type: map_at_3\n value: 30.089\n - type: map_at_5\n value: 31.433\n - type: mrr_at_1\n value: 29.198\n - type: mrr_at_10\n value: 36.01\n - type: mrr_at_100\n value: 37.022\n - type: mrr_at_1000\n value: 37.083\n - type: mrr_at_3\n value: 33.94\n - type: mrr_at_5\n value: 35.148\n - type: ndcg_at_1\n value: 29.198\n - type: ndcg_at_10\n value: 36.729\n - type: ndcg_at_100\n value: 42.114000000000004\n - type: ndcg_at_1000\n value: 44.592\n - type: ndcg_at_3\n value: 32.644\n - type: ndcg_at_5\n value: 34.652\n - type: precision_at_1\n value: 29.198\n - type: precision_at_10\n value: 5.970000000000001\n - type: precision_at_100\n value: 0.967\n - type: precision_at_1000\n value: 0.129\n - type: precision_at_3\n value: 14.396999999999998\n - type: precision_at_5\n value: 10.093\n - type: recall_at_1\n value: 25.095\n - type: recall_at_10\n value: 46.392\n - type: recall_at_100\n value: 69.706\n - type: recall_at_1000\n value: 87.738\n - type: recall_at_3\n value: 35.303000000000004\n - type: recall_at_5\n value: 40.441\n - type: map_at_1\n value: 26.857999999999997\n - type: map_at_10\n value: 34.066\n - type: map_at_100\n value: 35.671\n - type: map_at_1000\n value: 35.881\n - type: map_at_3\n value: 31.304\n - type: map_at_5\n value: 32.885\n - type: mrr_at_1\n value: 32.411\n - type: mrr_at_10\n value: 38.987\n - type: mrr_at_100\n value: 39.894\n - type: mrr_at_1000\n value: 39.959\n - type: mrr_at_3\n value: 36.626999999999995\n - type: mrr_at_5\n value: 38.011\n - type: ndcg_at_1\n value: 32.411\n - type: ndcg_at_10\n value: 39.208\n - type: ndcg_at_100\n value: 44.626\n - type: ndcg_at_1000\n value: 47.43\n - type: ndcg_at_3\n value: 35.091\n - type: ndcg_at_5\n value: 37.119\n - type: precision_at_1\n value: 32.411\n - type: precision_at_10\n value: 7.51\n - type: precision_at_100\n value: 1.486\n - type: precision_at_1000\n value: 0.234\n - type: precision_at_3\n value: 16.14\n - type: precision_at_5\n value: 11.976\n - type: recall_at_1\n value: 26.857999999999997\n - type: recall_at_10\n value: 47.407\n - type: recall_at_100\n value: 72.236\n - type: recall_at_1000\n value: 90.77\n - type: recall_at_3\n value: 35.125\n - type: recall_at_5\n value: 40.522999999999996\n - type: map_at_1\n value: 21.3\n - type: map_at_10\n value: 27.412999999999997\n - type: map_at_100\n value: 28.29\n - type: map_at_1000\n value: 28.398\n - type: map_at_3\n value: 25.169999999999998\n - type: map_at_5\n value: 26.496\n - type: mrr_at_1\n value: 23.29\n - type: mrr_at_10\n value: 29.215000000000003\n - type: mrr_at_100\n value: 30.073\n - type: mrr_at_1000\n value: 30.156\n - type: mrr_at_3\n value: 26.956000000000003\n - type: mrr_at_5\n value: 28.38\n - type: ndcg_at_1\n value: 23.29\n - type: ndcg_at_10\n value: 31.113000000000003\n - type: ndcg_at_100\n value: 35.701\n - type: ndcg_at_1000\n value: 38.505\n - type: ndcg_at_3\n value: 26.727\n - type: ndcg_at_5\n value: 29.037000000000003\n - type: precision_at_1\n value: 23.29\n - type: precision_at_10\n value: 4.787\n - type: precision_at_100\n value: 0.763\n - type: precision_at_1000\n value: 0.11100000000000002\n - type: precision_at_3\n value: 11.091\n - type: precision_at_5\n value: 7.985\n - type: recall_at_1\n value: 21.3\n - type: recall_at_10\n value: 40.782000000000004\n - type: recall_at_100\n value: 62.13999999999999\n - type: recall_at_1000\n value: 83.012\n - type: recall_at_3\n value: 29.131\n - type: recall_at_5\n value: 34.624\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: climate-fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 9.631\n - type: map_at_10\n value: 16.634999999999998\n - type: map_at_100\n value: 18.23\n - type: map_at_1000\n value: 18.419\n - type: map_at_3\n value: 13.66\n - type: map_at_5\n value: 15.173\n - type: mrr_at_1\n value: 21.368000000000002\n - type: mrr_at_10\n value: 31.56\n - type: mrr_at_100\n value: 32.58\n - type: mrr_at_1000\n value: 32.633\n - type: mrr_at_3\n value: 28.241\n - type: mrr_at_5\n value: 30.225\n - type: ndcg_at_1\n value: 21.368000000000002\n - type: ndcg_at_10\n value: 23.855999999999998\n - type: ndcg_at_100\n value: 30.686999999999998\n - type: ndcg_at_1000\n value: 34.327000000000005\n - type: ndcg_at_3\n value: 18.781\n - type: ndcg_at_5\n value: 20.73\n - type: precision_at_1\n value: 21.368000000000002\n - type: precision_at_10\n value: 7.564\n - type: precision_at_100\n value: 1.496\n - type: precision_at_1000\n value: 0.217\n - type: precision_at_3\n value: 13.876\n - type: precision_at_5\n value: 11.062\n - type: recall_at_1\n value: 9.631\n - type: recall_at_10\n value: 29.517\n - type: recall_at_100\n value: 53.452\n - type: recall_at_1000\n value: 74.115\n - type: recall_at_3\n value: 17.605999999999998\n - type: recall_at_5\n value: 22.505\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: dbpedia-entity\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 8.885\n - type: map_at_10\n value: 18.798000000000002\n - type: map_at_100\n value: 26.316\n - type: map_at_1000\n value: 27.869\n - type: map_at_3\n value: 13.719000000000001\n - type: map_at_5\n value: 15.716\n - type: mrr_at_1\n value: 66\n - type: mrr_at_10\n value: 74.263\n - type: mrr_at_100\n value: 74.519\n - type: mrr_at_1000\n value: 74.531\n - type: mrr_at_3\n value: 72.458\n - type: mrr_at_5\n value: 73.321\n - type: ndcg_at_1\n value: 53.87499999999999\n - type: ndcg_at_10\n value: 40.355999999999995\n - type: ndcg_at_100\n value: 44.366\n - type: ndcg_at_1000\n value: 51.771\n - type: ndcg_at_3\n value: 45.195\n - type: ndcg_at_5\n value: 42.187000000000005\n - type: precision_at_1\n value: 66\n - type: precision_at_10\n value: 31.75\n - type: precision_at_100\n value: 10.11\n - type: precision_at_1000\n value: 1.9800000000000002\n - type: precision_at_3\n value: 48.167\n - type: precision_at_5\n value: 40.050000000000004\n - type: recall_at_1\n value: 8.885\n - type: recall_at_10\n value: 24.471999999999998\n - type: recall_at_100\n value: 49.669000000000004\n - type: recall_at_1000\n value: 73.383\n - type: recall_at_3\n value: 14.872\n - type: recall_at_5\n value: 18.262999999999998\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 45.18\n - type: f1\n value: 40.26878691789978\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 62.751999999999995\n - type: map_at_10\n value: 74.131\n - type: map_at_100\n value: 74.407\n - type: map_at_1000\n value: 74.423\n - type: map_at_3\n value: 72.329\n - type: map_at_5\n value: 73.555\n - type: mrr_at_1\n value: 67.282\n - type: mrr_at_10\n value: 78.292\n - type: mrr_at_100\n value: 78.455\n - type: mrr_at_1000\n value: 78.458\n - type: mrr_at_3\n value: 76.755\n - type: mrr_at_5\n value: 77.839\n - type: ndcg_at_1\n value: 67.282\n - type: ndcg_at_10\n value: 79.443\n - type: ndcg_at_100\n value: 80.529\n - type: ndcg_at_1000\n value: 80.812\n - type: ndcg_at_3\n value: 76.281\n - type: ndcg_at_5\n value: 78.235\n - type: precision_at_1\n value: 67.282\n - type: precision_at_10\n value: 10.078\n - type: precision_at_100\n value: 1.082\n - type: precision_at_1000\n value: 0.11199999999999999\n - type: precision_at_3\n value: 30.178\n - type: precision_at_5\n value: 19.232\n - type: recall_at_1\n value: 62.751999999999995\n - type: recall_at_10\n value: 91.521\n - type: recall_at_100\n value: 95.997\n - type: recall_at_1000\n value: 97.775\n - type: recall_at_3\n value: 83.131\n - type: recall_at_5\n value: 87.93299999999999\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: fiqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 18.861\n - type: map_at_10\n value: 30.252000000000002\n - type: map_at_100\n value: 32.082\n - type: map_at_1000\n value: 32.261\n - type: map_at_3\n value: 25.909\n - type: map_at_5\n value: 28.296\n - type: mrr_at_1\n value: 37.346000000000004\n - type: mrr_at_10\n value: 45.802\n - type: mrr_at_100\n value: 46.611999999999995\n - type: mrr_at_1000\n value: 46.659\n - type: mrr_at_3\n value: 43.056\n - type: mrr_at_5\n value: 44.637\n - type: ndcg_at_1\n value: 37.346000000000004\n - type: ndcg_at_10\n value: 38.169\n - type: ndcg_at_100\n value: 44.864\n - type: ndcg_at_1000\n value: 47.974\n - type: ndcg_at_3\n value: 33.619\n - type: ndcg_at_5\n value: 35.317\n - type: precision_at_1\n value: 37.346000000000004\n - type: precision_at_10\n value: 10.693999999999999\n - type: precision_at_100\n value: 1.775\n - type: precision_at_1000\n value: 0.231\n - type: precision_at_3\n value: 22.325\n - type: precision_at_5\n value: 16.852\n - type: recall_at_1\n value: 18.861\n - type: recall_at_10\n value: 45.672000000000004\n - type: recall_at_100\n value: 70.60499999999999\n - type: recall_at_1000\n value: 89.216\n - type: recall_at_3\n value: 30.361\n - type: recall_at_5\n value: 36.998999999999995\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: hotpotqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 37.852999999999994\n - type: map_at_10\n value: 59.961\n - type: map_at_100\n value: 60.78\n - type: map_at_1000\n value: 60.843\n - type: map_at_3\n value: 56.39999999999999\n - type: map_at_5\n value: 58.646\n - type: mrr_at_1\n value: 75.70599999999999\n - type: mrr_at_10\n value: 82.321\n - type: mrr_at_100\n value: 82.516\n - type: mrr_at_1000\n value: 82.525\n - type: mrr_at_3\n value: 81.317\n - type: mrr_at_5\n value: 81.922\n - type: ndcg_at_1\n value: 75.70599999999999\n - type: ndcg_at_10\n value: 68.557\n - type: ndcg_at_100\n value: 71.485\n - type: ndcg_at_1000\n value: 72.71600000000001\n - type: ndcg_at_3\n value: 63.524\n - type: ndcg_at_5\n value: 66.338\n - type: precision_at_1\n value: 75.70599999999999\n - type: precision_at_10\n value: 14.463000000000001\n - type: precision_at_100\n value: 1.677\n - type: precision_at_1000\n value: 0.184\n - type: precision_at_3\n value: 40.806\n - type: precision_at_5\n value: 26.709\n - type: recall_at_1\n value: 37.852999999999994\n - type: recall_at_10\n value: 72.316\n - type: recall_at_100\n value: 83.842\n - type: recall_at_1000\n value: 91.999\n - type: recall_at_3\n value: 61.209\n - type: recall_at_5\n value: 66.77199999999999\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 85.46039999999999\n - type: ap\n value: 79.9812521351881\n - type: f1\n value: 85.31722909702084\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: msmarco\n config: default\n split: dev\n revision: None\n metrics:\n - type: map_at_1\n value: 22.704\n - type: map_at_10\n value: 35.329\n - type: map_at_100\n value: 36.494\n - type: map_at_1000\n value: 36.541000000000004\n - type: map_at_3\n value: 31.476\n - type: map_at_5\n value: 33.731\n - type: mrr_at_1\n value: 23.294999999999998\n - type: mrr_at_10\n value: 35.859\n - type: mrr_at_100\n value: 36.968\n - type: mrr_at_1000\n value: 37.008\n - type: mrr_at_3\n value: 32.085\n - type: mrr_at_5\n value: 34.299\n - type: ndcg_at_1\n value: 23.324\n - type: ndcg_at_10\n value: 42.274\n - type: ndcg_at_100\n value: 47.839999999999996\n - type: ndcg_at_1000\n value: 48.971\n - type: ndcg_at_3\n value: 34.454\n - type: ndcg_at_5\n value: 38.464\n - type: precision_at_1\n value: 23.324\n - type: precision_at_10\n value: 6.648\n - type: precision_at_100\n value: 0.9440000000000001\n - type: precision_at_1000\n value: 0.104\n - type: precision_at_3\n value: 14.674999999999999\n - type: precision_at_5\n value: 10.850999999999999\n - type: recall_at_1\n value: 22.704\n - type: recall_at_10\n value: 63.660000000000004\n - type: recall_at_100\n value: 89.29899999999999\n - type: recall_at_1000\n value: 97.88900000000001\n - type: recall_at_3\n value: 42.441\n - type: recall_at_5\n value: 52.04\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 93.1326949384405\n - type: f1\n value: 92.89743579612082\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (de)\n type: mteb/mtop_domain\n config: de\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 89.62524654832347\n - type: f1\n value: 88.65106082263151\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (es)\n type: mteb/mtop_domain\n config: es\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 90.59039359573046\n - type: f1\n value: 90.31532892105662\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (fr)\n type: mteb/mtop_domain\n config: fr\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 86.21046038208581\n - type: f1\n value: 86.41459529813113\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (hi)\n type: mteb/mtop_domain\n config: hi\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 87.3180351380423\n - type: f1\n value: 86.71383078226444\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (th)\n type: mteb/mtop_domain\n config: th\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 86.24231464737792\n - type: f1\n value: 86.31845567592403\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 75.27131782945736\n - type: f1\n value: 57.52079940417103\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (de)\n type: mteb/mtop_intent\n config: de\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 71.2341504649197\n - type: f1\n value: 51.349951558039244\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (es)\n type: mteb/mtop_intent\n config: es\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 71.27418278852569\n - type: f1\n value: 50.1714985749095\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (fr)\n type: mteb/mtop_intent\n config: fr\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 67.68243031631694\n - type: f1\n value: 50.1066160836192\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (hi)\n type: mteb/mtop_intent\n config: hi\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 69.2362854069559\n - type: f1\n value: 48.821279948766424\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (th)\n type: mteb/mtop_intent\n config: th\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 71.71428571428571\n - type: f1\n value: 53.94611389496195\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (af)\n type: mteb/amazon_massive_intent\n config: af\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 59.97646267652992\n - type: f1\n value: 57.26797883561521\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (am)\n type: mteb/amazon_massive_intent\n config: am\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 53.65501008742435\n - type: f1\n value: 50.416258382177034\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ar)\n type: mteb/amazon_massive_intent\n config: ar\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 57.45796906523201\n - type: f1\n value: 53.306690547422185\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (az)\n type: mteb/amazon_massive_intent\n config: az\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 62.59246805648957\n - type: f1\n value: 59.818381969051494\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (bn)\n type: mteb/amazon_massive_intent\n config: bn\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 61.126429051782104\n - type: f1\n value: 58.25993593933026\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (cy)\n type: mteb/amazon_massive_intent\n config: cy\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 50.057162071284466\n - type: f1\n value: 46.96095728790911\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (da)\n type: mteb/amazon_massive_intent\n config: da\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 66.64425016812375\n - type: f1\n value: 62.858291698755764\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (de)\n type: mteb/amazon_massive_intent\n config: de\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 66.08944182918628\n - type: f1\n value: 62.44639030604241\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (el)\n type: mteb/amazon_massive_intent\n config: el\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 64.68056489576328\n - type: f1\n value: 61.775326758789504\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 72.11163416274377\n - type: f1\n value: 69.70789096927015\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (es)\n type: mteb/amazon_massive_intent\n config: es\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 68.40282447881641\n - type: f1\n value: 66.38492065671895\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (fa)\n type: mteb/amazon_massive_intent\n config: fa\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 67.24613315400134\n - type: f1\n value: 64.3348019501336\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (fi)\n type: mteb/amazon_massive_intent\n config: fi\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 65.78345662407531\n - type: f1\n value: 62.21279452354622\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (fr)\n type: mteb/amazon_massive_intent\n config: fr\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 67.9455279085407\n - type: f1\n value: 65.48193124964094\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (he)\n type: mteb/amazon_massive_intent\n config: he\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 62.05110961667788\n - type: f1\n value: 58.097856564684534\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (hi)\n type: mteb/amazon_massive_intent\n config: hi\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 64.95292535305985\n - type: f1\n value: 62.09182174767901\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (hu)\n type: mteb/amazon_massive_intent\n config: hu\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 64.97310020174848\n - type: f1\n value: 61.14252567730396\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (hy)\n type: mteb/amazon_massive_intent\n config: hy\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 60.08069939475453\n - type: f1\n value: 57.044041742492034\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (id)\n type: mteb/amazon_massive_intent\n config: id\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 66.63752521856085\n - type: f1\n value: 63.889340907205316\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (is)\n type: mteb/amazon_massive_intent\n config: is\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 56.385339609952936\n - type: f1\n value: 53.449033750088304\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (it)\n type: mteb/amazon_massive_intent\n config: it\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 68.93073301950234\n - type: f1\n value: 65.9884357824104\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ja)\n type: mteb/amazon_massive_intent\n config: ja\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 68.94418291862812\n - type: f1\n value: 66.48740222583132\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (jv)\n type: mteb/amazon_massive_intent\n config: jv\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 54.26025554808339\n - type: f1\n value: 50.19562815100793\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ka)\n type: mteb/amazon_massive_intent\n config: ka\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 48.98789509078682\n - type: f1\n value: 46.65788438676836\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (km)\n type: mteb/amazon_massive_intent\n config: km\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 44.68728984532616\n - type: f1\n value: 41.642419349541996\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (kn)\n type: mteb/amazon_massive_intent\n config: kn\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 59.19300605245461\n - type: f1\n value: 55.8626492442437\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ko)\n type: mteb/amazon_massive_intent\n config: ko\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 66.33826496301278\n - type: f1\n value: 63.89499791648792\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (lv)\n type: mteb/amazon_massive_intent\n config: lv\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 60.33960995292536\n - type: f1\n value: 57.15242464180892\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ml)\n type: mteb/amazon_massive_intent\n config: ml\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 63.09347679892402\n - type: f1\n value: 59.64733214063841\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (mn)\n type: mteb/amazon_massive_intent\n config: mn\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 58.75924680564896\n - type: f1\n value: 55.96585692366827\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ms)\n type: mteb/amazon_massive_intent\n config: ms\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 62.48486886348352\n - type: f1\n value: 59.45143559032946\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (my)\n type: mteb/amazon_massive_intent\n config: my\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 58.56422326832549\n - type: f1\n value: 54.96368702901926\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (nb)\n type: mteb/amazon_massive_intent\n config: nb\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 66.18022864828512\n - type: f1\n value: 63.05369805040634\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (nl)\n type: mteb/amazon_massive_intent\n config: nl\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 67.30329522528581\n - type: f1\n value: 64.06084612020727\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (pl)\n type: mteb/amazon_massive_intent\n config: pl\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 68.36919973100201\n - type: f1\n value: 65.12154124788887\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (pt)\n type: mteb/amazon_massive_intent\n config: pt\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 68.98117014122394\n - type: f1\n value: 66.41847559806962\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ro)\n type: mteb/amazon_massive_intent\n config: ro\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 65.53799596503026\n - type: f1\n value: 62.17067330740817\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ru)\n type: mteb/amazon_massive_intent\n config: ru\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 69.01815736381977\n - type: f1\n value: 66.24988369607843\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (sl)\n type: mteb/amazon_massive_intent\n config: sl\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 62.34700739744452\n - type: f1\n value: 59.957933424941636\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (sq)\n type: mteb/amazon_massive_intent\n config: sq\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 61.23402824478815\n - type: f1\n value: 57.98836976018471\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (sv)\n type: mteb/amazon_massive_intent\n config: sv\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 68.54068594485541\n - type: f1\n value: 65.43849680666855\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (sw)\n type: mteb/amazon_massive_intent\n config: sw\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 55.998655010087425\n - type: f1\n value: 52.83737515406804\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ta)\n type: mteb/amazon_massive_intent\n config: ta\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 58.71217215870882\n - type: f1\n value: 55.051794977833026\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (te)\n type: mteb/amazon_massive_intent\n config: te\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 59.724277067921996\n - type: f1\n value: 56.33485571838306\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (th)\n type: mteb/amazon_massive_intent\n config: th\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 65.59515803631473\n - type: f1\n value: 64.96772366193588\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (tl)\n type: mteb/amazon_massive_intent\n config: tl\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 60.860793544048406\n - type: f1\n value: 58.148845819115394\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (tr)\n type: mteb/amazon_massive_intent\n config: tr\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 67.40753194351043\n - type: f1\n value: 63.18903778054698\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (ur)\n type: mteb/amazon_massive_intent\n config: ur\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 61.52320107599194\n - type: f1\n value: 58.356144563398516\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (vi)\n type: mteb/amazon_massive_intent\n config: vi\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 66.17014122394083\n - type: f1\n value: 63.919964062638925\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (zh-CN)\n type: mteb/amazon_massive_intent\n config: zh-CN\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 69.15601882985878\n - type: f1\n value: 67.01451905761371\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (zh-TW)\n type: mteb/amazon_massive_intent\n config: zh-TW\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 64.65030262273034\n - type: f1\n value: 64.14420425129063\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (af)\n type: mteb/amazon_massive_scenario\n config: af\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 65.08742434431743\n - type: f1\n value: 63.044060042311756\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (am)\n type: mteb/amazon_massive_scenario\n config: am\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 58.52387357094821\n - type: f1\n value: 56.82398588814534\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ar)\n type: mteb/amazon_massive_scenario\n config: ar\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 62.239408204438476\n - type: f1\n value: 61.92570286170469\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (az)\n type: mteb/amazon_massive_scenario\n config: az\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 63.74915938130463\n - type: f1\n value: 62.130740689396276\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (bn)\n type: mteb/amazon_massive_scenario\n config: bn\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 65.00336247478144\n - type: f1\n value: 63.71080635228055\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (cy)\n type: mteb/amazon_massive_scenario\n config: cy\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 52.837928715534645\n - type: f1\n value: 50.390741680320836\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (da)\n type: mteb/amazon_massive_scenario\n config: da\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 72.42098184263618\n - type: f1\n value: 71.41355113538995\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (de)\n type: mteb/amazon_massive_scenario\n config: de\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 71.95359784801613\n - type: f1\n value: 71.42699340156742\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (el)\n type: mteb/amazon_massive_scenario\n config: el\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.18157363819772\n - type: f1\n value: 69.74836113037671\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 77.08137188971082\n - type: f1\n value: 76.78000685068261\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (es)\n type: mteb/amazon_massive_scenario\n config: es\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 71.5030262273033\n - type: f1\n value: 71.71620130425673\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (fa)\n type: mteb/amazon_massive_scenario\n config: fa\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.24546065904505\n - type: f1\n value: 69.07638311730359\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (fi)\n type: mteb/amazon_massive_scenario\n config: fi\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 69.12911903160726\n - type: f1\n value: 68.32651736539815\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (fr)\n type: mteb/amazon_massive_scenario\n config: fr\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 71.89307330195025\n - type: f1\n value: 71.33986549860187\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (he)\n type: mteb/amazon_massive_scenario\n config: he\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 67.44451916610626\n - type: f1\n value: 66.90192664503866\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (hi)\n type: mteb/amazon_massive_scenario\n config: hi\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 69.16274377942166\n - type: f1\n value: 68.01090953775066\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (hu)\n type: mteb/amazon_massive_scenario\n config: hu\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.75319435104237\n - type: f1\n value: 70.18035309201403\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (hy)\n type: mteb/amazon_massive_scenario\n config: hy\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 63.14391392064559\n - type: f1\n value: 61.48286540778145\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (id)\n type: mteb/amazon_massive_scenario\n config: id\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.70275722932078\n - type: f1\n value: 70.26164779846495\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (is)\n type: mteb/amazon_massive_scenario\n config: is\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 60.93813046402153\n - type: f1\n value: 58.8852862116525\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (it)\n type: mteb/amazon_massive_scenario\n config: it\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 72.320107599193\n - type: f1\n value: 72.19836409602924\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ja)\n type: mteb/amazon_massive_scenario\n config: ja\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 74.65366509751176\n - type: f1\n value: 74.55188288799579\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (jv)\n type: mteb/amazon_massive_scenario\n config: jv\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 59.694014794889036\n - type: f1\n value: 58.11353311721067\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ka)\n type: mteb/amazon_massive_scenario\n config: ka\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 54.37457969065231\n - type: f1\n value: 52.81306134311697\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (km)\n type: mteb/amazon_massive_scenario\n config: km\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 48.3086751849361\n - type: f1\n value: 45.396449765419376\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (kn)\n type: mteb/amazon_massive_scenario\n config: kn\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 62.151983860121064\n - type: f1\n value: 60.31762544281696\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ko)\n type: mteb/amazon_massive_scenario\n config: ko\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 72.44788164088769\n - type: f1\n value: 71.68150151736367\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (lv)\n type: mteb/amazon_massive_scenario\n config: lv\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 62.81439139206455\n - type: f1\n value: 62.06735559105593\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ml)\n type: mteb/amazon_massive_scenario\n config: ml\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 68.04303967720242\n - type: f1\n value: 66.68298851670133\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (mn)\n type: mteb/amazon_massive_scenario\n config: mn\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 61.43913920645595\n - type: f1\n value: 60.25605977560783\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ms)\n type: mteb/amazon_massive_scenario\n config: ms\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 66.90316072629456\n - type: f1\n value: 65.1325924692381\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (my)\n type: mteb/amazon_massive_scenario\n config: my\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 61.63752521856086\n - type: f1\n value: 59.14284778039585\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (nb)\n type: mteb/amazon_massive_scenario\n config: nb\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 71.63080026899797\n - type: f1\n value: 70.89771864626877\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (nl)\n type: mteb/amazon_massive_scenario\n config: nl\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 72.10827168796234\n - type: f1\n value: 71.71954219691159\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (pl)\n type: mteb/amazon_massive_scenario\n config: pl\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.59515803631471\n - type: f1\n value: 70.05040128099003\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (pt)\n type: mteb/amazon_massive_scenario\n config: pt\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.83389374579691\n - type: f1\n value: 70.84877936562735\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ro)\n type: mteb/amazon_massive_scenario\n config: ro\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 69.18628110289173\n - type: f1\n value: 68.97232927921841\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ru)\n type: mteb/amazon_massive_scenario\n config: ru\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 72.99260255548083\n - type: f1\n value: 72.85139492157732\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (sl)\n type: mteb/amazon_massive_scenario\n config: sl\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 65.26227303295225\n - type: f1\n value: 65.08833655469431\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (sq)\n type: mteb/amazon_massive_scenario\n config: sq\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 66.48621385339611\n - type: f1\n value: 64.43483199071298\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (sv)\n type: mteb/amazon_massive_scenario\n config: sv\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 73.14391392064559\n - type: f1\n value: 72.2580822579741\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (sw)\n type: mteb/amazon_massive_scenario\n config: sw\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 59.88567585743107\n - type: f1\n value: 58.3073765932569\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ta)\n type: mteb/amazon_massive_scenario\n config: ta\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 62.38399462004034\n - type: f1\n value: 60.82139544252606\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (te)\n type: mteb/amazon_massive_scenario\n config: te\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 62.58574310692671\n - type: f1\n value: 60.71443370385374\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (th)\n type: mteb/amazon_massive_scenario\n config: th\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 71.61398789509079\n - type: f1\n value: 70.99761812049401\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (tl)\n type: mteb/amazon_massive_scenario\n config: tl\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 62.73705447209146\n - type: f1\n value: 61.680849331794796\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (tr)\n type: mteb/amazon_massive_scenario\n config: tr\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 71.66778749159381\n - type: f1\n value: 71.17320646080115\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (ur)\n type: mteb/amazon_massive_scenario\n config: ur\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 64.640215198386\n - type: f1\n value: 63.301805157015444\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (vi)\n type: mteb/amazon_massive_scenario\n config: vi\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.00672494956288\n - type: f1\n value: 70.26005548582106\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (zh-CN)\n type: mteb/amazon_massive_scenario\n config: zh-CN\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 75.42030934767989\n - type: f1\n value: 75.2074842882598\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (zh-TW)\n type: mteb/amazon_massive_scenario\n config: zh-TW\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 70.69266980497646\n - type: f1\n value: 70.94103167391192\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 28.91697191169135\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 28.434000079573313\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 30.96683513343383\n - type: mrr\n value: 31.967364078714834\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: nfcorpus\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 5.5280000000000005\n - type: map_at_10\n value: 11.793\n - type: map_at_100\n value: 14.496999999999998\n - type: map_at_1000\n value: 15.783\n - type: map_at_3\n value: 8.838\n - type: map_at_5\n value: 10.07\n - type: mrr_at_1\n value: 43.653\n - type: mrr_at_10\n value: 51.531000000000006\n - type: mrr_at_100\n value: 52.205\n - type: mrr_at_1000\n value: 52.242999999999995\n - type: mrr_at_3\n value: 49.431999999999995\n - type: mrr_at_5\n value: 50.470000000000006\n - type: ndcg_at_1\n value: 42.415000000000006\n - type: ndcg_at_10\n value: 32.464999999999996\n - type: ndcg_at_100\n value: 28.927999999999997\n - type: ndcg_at_1000\n value: 37.629000000000005\n - type: ndcg_at_3\n value: 37.845\n - type: ndcg_at_5\n value: 35.147\n - type: precision_at_1\n value: 43.653\n - type: precision_at_10\n value: 23.932000000000002\n - type: precision_at_100\n value: 7.17\n - type: precision_at_1000\n value: 1.967\n - type: precision_at_3\n value: 35.397\n - type: precision_at_5\n value: 29.907\n - type: recall_at_1\n value: 5.5280000000000005\n - type: recall_at_10\n value: 15.568000000000001\n - type: recall_at_100\n value: 28.54\n - type: recall_at_1000\n value: 59.864\n - type: recall_at_3\n value: 9.822000000000001\n - type: recall_at_5\n value: 11.726\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: nq\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 37.041000000000004\n - type: map_at_10\n value: 52.664\n - type: map_at_100\n value: 53.477\n - type: map_at_1000\n value: 53.505\n - type: map_at_3\n value: 48.510999999999996\n - type: map_at_5\n value: 51.036\n - type: mrr_at_1\n value: 41.338\n - type: mrr_at_10\n value: 55.071000000000005\n - type: mrr_at_100\n value: 55.672\n - type: mrr_at_1000\n value: 55.689\n - type: mrr_at_3\n value: 51.82\n - type: mrr_at_5\n value: 53.852\n - type: ndcg_at_1\n value: 41.338\n - type: ndcg_at_10\n value: 60.01800000000001\n - type: ndcg_at_100\n value: 63.409000000000006\n - type: ndcg_at_1000\n value: 64.017\n - type: ndcg_at_3\n value: 52.44799999999999\n - type: ndcg_at_5\n value: 56.571000000000005\n - type: precision_at_1\n value: 41.338\n - type: precision_at_10\n value: 9.531\n - type: precision_at_100\n value: 1.145\n - type: precision_at_1000\n value: 0.12\n - type: precision_at_3\n value: 23.416\n - type: precision_at_5\n value: 16.46\n - type: recall_at_1\n value: 37.041000000000004\n - type: recall_at_10\n value: 79.76299999999999\n - type: recall_at_100\n value: 94.39\n - type: recall_at_1000\n value: 98.851\n - type: recall_at_3\n value: 60.465\n - type: recall_at_5\n value: 69.906\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 69.952\n - type: map_at_10\n value: 83.758\n - type: map_at_100\n value: 84.406\n - type: map_at_1000\n value: 84.425\n - type: map_at_3\n value: 80.839\n - type: map_at_5\n value: 82.646\n - type: mrr_at_1\n value: 80.62\n - type: mrr_at_10\n value: 86.947\n - type: mrr_at_100\n value: 87.063\n - type: mrr_at_1000\n value: 87.064\n - type: mrr_at_3\n value: 85.96000000000001\n - type: mrr_at_5\n value: 86.619\n - type: ndcg_at_1\n value: 80.63\n - type: ndcg_at_10\n value: 87.64800000000001\n - type: ndcg_at_100\n value: 88.929\n - type: ndcg_at_1000\n value: 89.054\n - type: ndcg_at_3\n value: 84.765\n - type: ndcg_at_5\n value: 86.291\n - type: precision_at_1\n value: 80.63\n - type: precision_at_10\n value: 13.314\n - type: precision_at_100\n value: 1.525\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 37.1\n - type: precision_at_5\n value: 24.372\n - type: recall_at_1\n value: 69.952\n - type: recall_at_10\n value: 94.955\n - type: recall_at_100\n value: 99.38\n - type: recall_at_1000\n value: 99.96000000000001\n - type: recall_at_3\n value: 86.60600000000001\n - type: recall_at_5\n value: 90.997\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 42.41329517878427\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 55.171278362748666\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 4.213\n - type: map_at_10\n value: 9.895\n - type: map_at_100\n value: 11.776\n - type: map_at_1000\n value: 12.084\n - type: map_at_3\n value: 7.2669999999999995\n - type: map_at_5\n value: 8.620999999999999\n - type: mrr_at_1\n value: 20.8\n - type: mrr_at_10\n value: 31.112000000000002\n - type: mrr_at_100\n value: 32.274\n - type: mrr_at_1000\n value: 32.35\n - type: mrr_at_3\n value: 28.133000000000003\n - type: mrr_at_5\n value: 29.892999999999997\n - type: ndcg_at_1\n value: 20.8\n - type: ndcg_at_10\n value: 17.163999999999998\n - type: ndcg_at_100\n value: 24.738\n - type: ndcg_at_1000\n value: 30.316\n - type: ndcg_at_3\n value: 16.665\n - type: ndcg_at_5\n value: 14.478\n - type: precision_at_1\n value: 20.8\n - type: precision_at_10\n value: 8.74\n - type: precision_at_100\n value: 1.963\n - type: precision_at_1000\n value: 0.33\n - type: precision_at_3\n value: 15.467\n - type: precision_at_5\n value: 12.6\n - type: recall_at_1\n value: 4.213\n - type: recall_at_10\n value: 17.698\n - type: recall_at_100\n value: 39.838\n - type: recall_at_1000\n value: 66.893\n - type: recall_at_3\n value: 9.418\n - type: recall_at_5\n value: 12.773000000000001\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 82.90453315738294\n - type: cos_sim_spearman\n value: 78.51197850080254\n - type: euclidean_pearson\n value: 80.09647123597748\n - type: euclidean_spearman\n value: 78.63548011514061\n - type: manhattan_pearson\n value: 80.10645285675231\n - type: manhattan_spearman\n value: 78.57861806068901\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 84.2616156846401\n - type: cos_sim_spearman\n value: 76.69713867850156\n - type: euclidean_pearson\n value: 77.97948563800394\n - type: euclidean_spearman\n value: 74.2371211567807\n - type: manhattan_pearson\n value: 77.69697879669705\n - type: manhattan_spearman\n value: 73.86529778022278\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 77.0293269315045\n - type: cos_sim_spearman\n value: 78.02555120584198\n - type: euclidean_pearson\n value: 78.25398100379078\n - type: euclidean_spearman\n value: 78.66963870599464\n - type: manhattan_pearson\n value: 78.14314682167348\n - type: manhattan_spearman\n value: 78.57692322969135\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 79.16989925136942\n - type: cos_sim_spearman\n value: 76.5996225327091\n - type: euclidean_pearson\n value: 77.8319003279786\n - type: euclidean_spearman\n value: 76.42824009468998\n - type: manhattan_pearson\n value: 77.69118862737736\n - type: manhattan_spearman\n value: 76.25568104762812\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 87.42012286935325\n - type: cos_sim_spearman\n value: 88.15654297884122\n - type: euclidean_pearson\n value: 87.34082819427852\n - type: euclidean_spearman\n value: 88.06333589547084\n - type: manhattan_pearson\n value: 87.25115596784842\n - type: manhattan_spearman\n value: 87.9559927695203\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 82.88222044996712\n - type: cos_sim_spearman\n value: 84.28476589061077\n - type: euclidean_pearson\n value: 83.17399758058309\n - type: euclidean_spearman\n value: 83.85497357244542\n - type: manhattan_pearson\n value: 83.0308397703786\n - type: manhattan_spearman\n value: 83.71554539935046\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (ko-ko)\n type: mteb/sts17-crosslingual-sts\n config: ko-ko\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 80.20682986257339\n - type: cos_sim_spearman\n value: 79.94567120362092\n - type: euclidean_pearson\n value: 79.43122480368902\n - type: euclidean_spearman\n value: 79.94802077264987\n - type: manhattan_pearson\n value: 79.32653021527081\n - type: manhattan_spearman\n value: 79.80961146709178\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (ar-ar)\n type: mteb/sts17-crosslingual-sts\n config: ar-ar\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 74.46578144394383\n - type: cos_sim_spearman\n value: 74.52496637472179\n - type: euclidean_pearson\n value: 72.2903807076809\n - type: euclidean_spearman\n value: 73.55549359771645\n - type: manhattan_pearson\n value: 72.09324837709393\n - type: manhattan_spearman\n value: 73.36743103606581\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-ar)\n type: mteb/sts17-crosslingual-sts\n config: en-ar\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 71.37272335116\n - type: cos_sim_spearman\n value: 71.26702117766037\n - type: euclidean_pearson\n value: 67.114829954434\n - type: euclidean_spearman\n value: 66.37938893947761\n - type: manhattan_pearson\n value: 66.79688574095246\n - type: manhattan_spearman\n value: 66.17292828079667\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-de)\n type: mteb/sts17-crosslingual-sts\n config: en-de\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 80.61016770129092\n - type: cos_sim_spearman\n value: 82.08515426632214\n - type: euclidean_pearson\n value: 80.557340361131\n - type: euclidean_spearman\n value: 80.37585812266175\n - type: manhattan_pearson\n value: 80.6782873404285\n - type: manhattan_spearman\n value: 80.6678073032024\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 87.00150745350108\n - type: cos_sim_spearman\n value: 87.83441972211425\n - type: euclidean_pearson\n value: 87.94826702308792\n - type: euclidean_spearman\n value: 87.46143974860725\n - type: manhattan_pearson\n value: 87.97560344306105\n - type: manhattan_spearman\n value: 87.5267102829796\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-tr)\n type: mteb/sts17-crosslingual-sts\n config: en-tr\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 64.76325252267235\n - type: cos_sim_spearman\n value: 63.32615095463905\n - type: euclidean_pearson\n value: 64.07920669155716\n - type: euclidean_spearman\n value: 61.21409893072176\n - type: manhattan_pearson\n value: 64.26308625680016\n - type: manhattan_spearman\n value: 61.2438185254079\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (es-en)\n type: mteb/sts17-crosslingual-sts\n config: es-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 75.82644463022595\n - type: cos_sim_spearman\n value: 76.50381269945073\n - type: euclidean_pearson\n value: 75.1328548315934\n - type: euclidean_spearman\n value: 75.63761139408453\n - type: manhattan_pearson\n value: 75.18610101241407\n - type: manhattan_spearman\n value: 75.30669266354164\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (es-es)\n type: mteb/sts17-crosslingual-sts\n config: es-es\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 87.49994164686832\n - type: cos_sim_spearman\n value: 86.73743986245549\n - type: euclidean_pearson\n value: 86.8272894387145\n - type: euclidean_spearman\n value: 85.97608491000507\n - type: manhattan_pearson\n value: 86.74960140396779\n - type: manhattan_spearman\n value: 85.79285984190273\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (fr-en)\n type: mteb/sts17-crosslingual-sts\n config: fr-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 79.58172210788469\n - type: cos_sim_spearman\n value: 80.17516468334607\n - type: euclidean_pearson\n value: 77.56537843470504\n - type: euclidean_spearman\n value: 77.57264627395521\n - type: manhattan_pearson\n value: 78.09703521695943\n - type: manhattan_spearman\n value: 78.15942760916954\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (it-en)\n type: mteb/sts17-crosslingual-sts\n config: it-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 79.7589932931751\n - type: cos_sim_spearman\n value: 80.15210089028162\n - type: euclidean_pearson\n value: 77.54135223516057\n - type: euclidean_spearman\n value: 77.52697996368764\n - type: manhattan_pearson\n value: 77.65734439572518\n - type: manhattan_spearman\n value: 77.77702992016121\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (nl-en)\n type: mteb/sts17-crosslingual-sts\n config: nl-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 79.16682365511267\n - type: cos_sim_spearman\n value: 79.25311267628506\n - type: euclidean_pearson\n value: 77.54882036762244\n - type: euclidean_spearman\n value: 77.33212935194827\n - type: manhattan_pearson\n value: 77.98405516064015\n - type: manhattan_spearman\n value: 77.85075717865719\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 59.10473294775917\n - type: cos_sim_spearman\n value: 61.82780474476838\n - type: euclidean_pearson\n value: 45.885111672377256\n - type: euclidean_spearman\n value: 56.88306351932454\n - type: manhattan_pearson\n value: 46.101218127323186\n - type: manhattan_spearman\n value: 56.80953694186333\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (de)\n type: mteb/sts22-crosslingual-sts\n config: de\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 45.781923079584146\n - type: cos_sim_spearman\n value: 55.95098449691107\n - type: euclidean_pearson\n value: 25.4571031323205\n - type: euclidean_spearman\n value: 49.859978118078935\n - type: manhattan_pearson\n value: 25.624938455041384\n - type: manhattan_spearman\n value: 49.99546185049401\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (es)\n type: mteb/sts22-crosslingual-sts\n config: es\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 60.00618133997907\n - type: cos_sim_spearman\n value: 66.57896677718321\n - type: euclidean_pearson\n value: 42.60118466388821\n - type: euclidean_spearman\n value: 62.8210759715209\n - type: manhattan_pearson\n value: 42.63446860604094\n - type: manhattan_spearman\n value: 62.73803068925271\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (pl)\n type: mteb/sts22-crosslingual-sts\n config: pl\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 28.460759121626943\n - type: cos_sim_spearman\n value: 34.13459007469131\n - type: euclidean_pearson\n value: 6.0917739325525195\n - type: euclidean_spearman\n value: 27.9947262664867\n - type: manhattan_pearson\n value: 6.16877864169911\n - type: manhattan_spearman\n value: 28.00664163971514\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (tr)\n type: mteb/sts22-crosslingual-sts\n config: tr\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 57.42546621771696\n - type: cos_sim_spearman\n value: 63.699663168970474\n - type: euclidean_pearson\n value: 38.12085278789738\n - type: euclidean_spearman\n value: 58.12329140741536\n - type: manhattan_pearson\n value: 37.97364549443335\n - type: manhattan_spearman\n value: 57.81545502318733\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (ar)\n type: mteb/sts22-crosslingual-sts\n config: ar\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 46.82241380954213\n - type: cos_sim_spearman\n value: 57.86569456006391\n - type: euclidean_pearson\n value: 31.80480070178813\n - type: euclidean_spearman\n value: 52.484000620130104\n - type: manhattan_pearson\n value: 31.952708554646097\n - type: manhattan_spearman\n value: 52.8560972356195\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (ru)\n type: mteb/sts22-crosslingual-sts\n config: ru\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 52.00447170498087\n - type: cos_sim_spearman\n value: 60.664116225735164\n - type: euclidean_pearson\n value: 33.87382555421702\n - type: euclidean_spearman\n value: 55.74649067458667\n - type: manhattan_pearson\n value: 33.99117246759437\n - type: manhattan_spearman\n value: 55.98749034923899\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (zh)\n type: mteb/sts22-crosslingual-sts\n config: zh\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 58.06497233105448\n - type: cos_sim_spearman\n value: 65.62968801135676\n - type: euclidean_pearson\n value: 47.482076613243905\n - type: euclidean_spearman\n value: 62.65137791498299\n - type: manhattan_pearson\n value: 47.57052626104093\n - type: manhattan_spearman\n value: 62.436916516613294\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (fr)\n type: mteb/sts22-crosslingual-sts\n config: fr\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 70.49397298562575\n - type: cos_sim_spearman\n value: 74.79604041187868\n - type: euclidean_pearson\n value: 49.661891561317795\n - type: euclidean_spearman\n value: 70.31535537621006\n - type: manhattan_pearson\n value: 49.553715741850006\n - type: manhattan_spearman\n value: 70.24779344636806\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (de-en)\n type: mteb/sts22-crosslingual-sts\n config: de-en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 55.640574515348696\n - type: cos_sim_spearman\n value: 54.927959317689\n - type: euclidean_pearson\n value: 29.00139666967476\n - type: euclidean_spearman\n value: 41.86386566971605\n - type: manhattan_pearson\n value: 29.47411067730344\n - type: manhattan_spearman\n value: 42.337438424952786\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (es-en)\n type: mteb/sts22-crosslingual-sts\n config: es-en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 68.14095292259312\n - type: cos_sim_spearman\n value: 73.99017581234789\n - type: euclidean_pearson\n value: 46.46304297872084\n - type: euclidean_spearman\n value: 60.91834114800041\n - type: manhattan_pearson\n value: 47.07072666338692\n - type: manhattan_spearman\n value: 61.70415727977926\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (it)\n type: mteb/sts22-crosslingual-sts\n config: it\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 73.27184653359575\n - type: cos_sim_spearman\n value: 77.76070252418626\n - type: euclidean_pearson\n value: 62.30586577544778\n - type: euclidean_spearman\n value: 75.14246629110978\n - type: manhattan_pearson\n value: 62.328196884927046\n - type: manhattan_spearman\n value: 75.1282792981433\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (pl-en)\n type: mteb/sts22-crosslingual-sts\n config: pl-en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 71.59448528829957\n - type: cos_sim_spearman\n value: 70.37277734222123\n - type: euclidean_pearson\n value: 57.63145565721123\n - type: euclidean_spearman\n value: 66.10113048304427\n - type: manhattan_pearson\n value: 57.18897811586808\n - type: manhattan_spearman\n value: 66.5595511215901\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (zh-en)\n type: mteb/sts22-crosslingual-sts\n config: zh-en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 66.37520607720838\n - type: cos_sim_spearman\n value: 69.92282148997948\n - type: euclidean_pearson\n value: 40.55768770125291\n - type: euclidean_spearman\n value: 55.189128944669605\n - type: manhattan_pearson\n value: 41.03566433468883\n - type: manhattan_spearman\n value: 55.61251893174558\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (es-it)\n type: mteb/sts22-crosslingual-sts\n config: es-it\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 57.791929533771835\n - type: cos_sim_spearman\n value: 66.45819707662093\n - type: euclidean_pearson\n value: 39.03686018511092\n - type: euclidean_spearman\n value: 56.01282695640428\n - type: manhattan_pearson\n value: 38.91586623619632\n - type: manhattan_spearman\n value: 56.69394943612747\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (de-fr)\n type: mteb/sts22-crosslingual-sts\n config: de-fr\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 47.82224468473866\n - type: cos_sim_spearman\n value: 59.467307194781164\n - type: euclidean_pearson\n value: 27.428459190256145\n - type: euclidean_spearman\n value: 60.83463107397519\n - type: manhattan_pearson\n value: 27.487391578496638\n - type: manhattan_spearman\n value: 61.281380460246496\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (de-pl)\n type: mteb/sts22-crosslingual-sts\n config: de-pl\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 16.306666792752644\n - type: cos_sim_spearman\n value: 39.35486427252405\n - type: euclidean_pearson\n value: -2.7887154897955435\n - type: euclidean_spearman\n value: 27.1296051831719\n - type: manhattan_pearson\n value: -3.202291270581297\n - type: manhattan_spearman\n value: 26.32895849218158\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (fr-pl)\n type: mteb/sts22-crosslingual-sts\n config: fr-pl\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 59.67006803805076\n - type: cos_sim_spearman\n value: 73.24670207647144\n - type: euclidean_pearson\n value: 46.91884681500483\n - type: euclidean_spearman\n value: 16.903085094570333\n - type: manhattan_pearson\n value: 46.88391675325812\n - type: manhattan_spearman\n value: 28.17180849095055\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 83.79555591223837\n - type: cos_sim_spearman\n value: 85.63658602085185\n - type: euclidean_pearson\n value: 85.22080894037671\n - type: euclidean_spearman\n value: 85.54113580167038\n - type: manhattan_pearson\n value: 85.1639505960118\n - type: manhattan_spearman\n value: 85.43502665436196\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 80.73900991689766\n - type: mrr\n value: 94.81624131133934\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: scifact\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 55.678000000000004\n - type: map_at_10\n value: 65.135\n - type: map_at_100\n value: 65.824\n - type: map_at_1000\n value: 65.852\n - type: map_at_3\n value: 62.736000000000004\n - type: map_at_5\n value: 64.411\n - type: mrr_at_1\n value: 58.333\n - type: mrr_at_10\n value: 66.5\n - type: mrr_at_100\n value: 67.053\n - type: mrr_at_1000\n value: 67.08\n - type: mrr_at_3\n value: 64.944\n - type: mrr_at_5\n value: 65.89399999999999\n - type: ndcg_at_1\n value: 58.333\n - type: ndcg_at_10\n value: 69.34700000000001\n - type: ndcg_at_100\n value: 72.32\n - type: ndcg_at_1000\n value: 73.014\n - type: ndcg_at_3\n value: 65.578\n - type: ndcg_at_5\n value: 67.738\n - type: precision_at_1\n value: 58.333\n - type: precision_at_10\n value: 9.033\n - type: precision_at_100\n value: 1.0670000000000002\n - type: precision_at_1000\n value: 0.11199999999999999\n - type: precision_at_3\n value: 25.444\n - type: precision_at_5\n value: 16.933\n - type: recall_at_1\n value: 55.678000000000004\n - type: recall_at_10\n value: 80.72200000000001\n - type: recall_at_100\n value: 93.93299999999999\n - type: recall_at_1000\n value: 99.333\n - type: recall_at_3\n value: 70.783\n - type: recall_at_5\n value: 75.978\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.74653465346535\n - type: cos_sim_ap\n value: 93.01476369929063\n - type: cos_sim_f1\n value: 86.93009118541033\n - type: cos_sim_precision\n value: 88.09034907597535\n - type: cos_sim_recall\n value: 85.8\n - type: dot_accuracy\n value: 99.22970297029703\n - type: dot_ap\n value: 51.58725659485144\n - type: dot_f1\n value: 53.51351351351352\n - type: dot_precision\n value: 58.235294117647065\n - type: dot_recall\n value: 49.5\n - type: euclidean_accuracy\n value: 99.74356435643564\n - type: euclidean_ap\n value: 92.40332894384368\n - type: euclidean_f1\n value: 86.97838109602817\n - type: euclidean_precision\n value: 87.46208291203236\n - type: euclidean_recall\n value: 86.5\n - type: manhattan_accuracy\n value: 99.73069306930694\n - type: manhattan_ap\n value: 92.01320815721121\n - type: manhattan_f1\n value: 86.4135864135864\n - type: manhattan_precision\n value: 86.32734530938124\n - type: manhattan_recall\n value: 86.5\n - type: max_accuracy\n value: 99.74653465346535\n - type: max_ap\n value: 93.01476369929063\n - type: max_f1\n value: 86.97838109602817\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 55.2660514302523\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 30.4637783572547\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 49.41377758357637\n - type: mrr\n value: 50.138451213818854\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 28.887846011166594\n - type: cos_sim_spearman\n value: 30.10823258355903\n - type: dot_pearson\n value: 12.888049550236385\n - type: dot_spearman\n value: 12.827495903098123\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.21\n - type: map_at_10\n value: 1.667\n - type: map_at_100\n value: 9.15\n - type: map_at_1000\n value: 22.927\n - type: map_at_3\n value: 0.573\n - type: map_at_5\n value: 0.915\n - type: mrr_at_1\n value: 80\n - type: mrr_at_10\n value: 87.167\n - type: mrr_at_100\n value: 87.167\n - type: mrr_at_1000\n value: 87.167\n - type: mrr_at_3\n value: 85.667\n - type: mrr_at_5\n value: 87.167\n - type: ndcg_at_1\n value: 76\n - type: ndcg_at_10\n value: 69.757\n - type: ndcg_at_100\n value: 52.402\n - type: ndcg_at_1000\n value: 47.737\n - type: ndcg_at_3\n value: 71.866\n - type: ndcg_at_5\n value: 72.225\n - type: precision_at_1\n value: 80\n - type: precision_at_10\n value: 75\n - type: precision_at_100\n value: 53.959999999999994\n - type: precision_at_1000\n value: 21.568\n - type: precision_at_3\n value: 76.667\n - type: precision_at_5\n value: 78\n - type: recall_at_1\n value: 0.21\n - type: recall_at_10\n value: 1.9189999999999998\n - type: recall_at_100\n value: 12.589\n - type: recall_at_1000\n value: 45.312000000000005\n - type: recall_at_3\n value: 0.61\n - type: recall_at_5\n value: 1.019\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (sqi-eng)\n type: mteb/tatoeba-bitext-mining\n config: sqi-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 92.10000000000001\n - type: f1\n value: 90.06\n - type: precision\n value: 89.17333333333333\n - type: recall\n value: 92.10000000000001\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (fry-eng)\n type: mteb/tatoeba-bitext-mining\n config: fry-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 56.06936416184971\n - type: f1\n value: 50.87508028259473\n - type: precision\n value: 48.97398843930635\n - type: recall\n value: 56.06936416184971\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (kur-eng)\n type: mteb/tatoeba-bitext-mining\n config: kur-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 57.3170731707317\n - type: f1\n value: 52.96080139372822\n - type: precision\n value: 51.67861124382864\n - type: recall\n value: 57.3170731707317\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tur-eng)\n type: mteb/tatoeba-bitext-mining\n config: tur-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.3\n - type: f1\n value: 92.67333333333333\n - type: precision\n value: 91.90833333333333\n - type: recall\n value: 94.3\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (deu-eng)\n type: mteb/tatoeba-bitext-mining\n config: deu-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 97.7\n - type: f1\n value: 97.07333333333332\n - type: precision\n value: 96.79500000000002\n - type: recall\n value: 97.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (nld-eng)\n type: mteb/tatoeba-bitext-mining\n config: nld-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.69999999999999\n - type: f1\n value: 93.2\n - type: precision\n value: 92.48333333333333\n - type: recall\n value: 94.69999999999999\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ron-eng)\n type: mteb/tatoeba-bitext-mining\n config: ron-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 92.9\n - type: f1\n value: 91.26666666666667\n - type: precision\n value: 90.59444444444445\n - type: recall\n value: 92.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ang-eng)\n type: mteb/tatoeba-bitext-mining\n config: ang-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 34.32835820895522\n - type: f1\n value: 29.074180380150533\n - type: precision\n value: 28.068207322920596\n - type: recall\n value: 34.32835820895522\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ido-eng)\n type: mteb/tatoeba-bitext-mining\n config: ido-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 78.5\n - type: f1\n value: 74.3945115995116\n - type: precision\n value: 72.82967843459222\n - type: recall\n value: 78.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (jav-eng)\n type: mteb/tatoeba-bitext-mining\n config: jav-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 66.34146341463415\n - type: f1\n value: 61.2469400518181\n - type: precision\n value: 59.63977756660683\n - type: recall\n value: 66.34146341463415\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (isl-eng)\n type: mteb/tatoeba-bitext-mining\n config: isl-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 80.9\n - type: f1\n value: 76.90349206349207\n - type: precision\n value: 75.32921568627451\n - type: recall\n value: 80.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (slv-eng)\n type: mteb/tatoeba-bitext-mining\n config: slv-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 84.93317132442284\n - type: f1\n value: 81.92519105034295\n - type: precision\n value: 80.71283920615635\n - type: recall\n value: 84.93317132442284\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (cym-eng)\n type: mteb/tatoeba-bitext-mining\n config: cym-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 71.1304347826087\n - type: f1\n value: 65.22394755003451\n - type: precision\n value: 62.912422360248435\n - type: recall\n value: 71.1304347826087\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (kaz-eng)\n type: mteb/tatoeba-bitext-mining\n config: kaz-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 79.82608695652173\n - type: f1\n value: 75.55693581780538\n - type: precision\n value: 73.79420289855072\n - type: recall\n value: 79.82608695652173\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (est-eng)\n type: mteb/tatoeba-bitext-mining\n config: est-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 74\n - type: f1\n value: 70.51022222222223\n - type: precision\n value: 69.29673599347512\n - type: recall\n value: 74\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (heb-eng)\n type: mteb/tatoeba-bitext-mining\n config: heb-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 78.7\n - type: f1\n value: 74.14238095238095\n - type: precision\n value: 72.27214285714285\n - type: recall\n value: 78.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (gla-eng)\n type: mteb/tatoeba-bitext-mining\n config: gla-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 48.97466827503016\n - type: f1\n value: 43.080330405420874\n - type: precision\n value: 41.36505499593557\n - type: recall\n value: 48.97466827503016\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (mar-eng)\n type: mteb/tatoeba-bitext-mining\n config: mar-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 89.60000000000001\n - type: f1\n value: 86.62333333333333\n - type: precision\n value: 85.225\n - type: recall\n value: 89.60000000000001\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (lat-eng)\n type: mteb/tatoeba-bitext-mining\n config: lat-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 45.2\n - type: f1\n value: 39.5761253006253\n - type: precision\n value: 37.991358436312\n - type: recall\n value: 45.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (bel-eng)\n type: mteb/tatoeba-bitext-mining\n config: bel-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 89.5\n - type: f1\n value: 86.70333333333333\n - type: precision\n value: 85.53166666666667\n - type: recall\n value: 89.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (pms-eng)\n type: mteb/tatoeba-bitext-mining\n config: pms-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 50.095238095238095\n - type: f1\n value: 44.60650460650461\n - type: precision\n value: 42.774116796477045\n - type: recall\n value: 50.095238095238095\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (gle-eng)\n type: mteb/tatoeba-bitext-mining\n config: gle-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 63.4\n - type: f1\n value: 58.35967261904762\n - type: precision\n value: 56.54857142857143\n - type: recall\n value: 63.4\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (pes-eng)\n type: mteb/tatoeba-bitext-mining\n config: pes-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 89.2\n - type: f1\n value: 87.075\n - type: precision\n value: 86.12095238095239\n - type: recall\n value: 89.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (nob-eng)\n type: mteb/tatoeba-bitext-mining\n config: nob-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 96.8\n - type: f1\n value: 95.90333333333334\n - type: precision\n value: 95.50833333333333\n - type: recall\n value: 96.8\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (bul-eng)\n type: mteb/tatoeba-bitext-mining\n config: bul-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 90.9\n - type: f1\n value: 88.6288888888889\n - type: precision\n value: 87.61607142857142\n - type: recall\n value: 90.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (cbk-eng)\n type: mteb/tatoeba-bitext-mining\n config: cbk-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 65.2\n - type: f1\n value: 60.54377630539395\n - type: precision\n value: 58.89434482711381\n - type: recall\n value: 65.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (hun-eng)\n type: mteb/tatoeba-bitext-mining\n config: hun-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 87\n - type: f1\n value: 84.32412698412699\n - type: precision\n value: 83.25527777777778\n - type: recall\n value: 87\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (uig-eng)\n type: mteb/tatoeba-bitext-mining\n config: uig-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 68.7\n - type: f1\n value: 63.07883541295306\n - type: precision\n value: 61.06117424242426\n - type: recall\n value: 68.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (rus-eng)\n type: mteb/tatoeba-bitext-mining\n config: rus-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 93.7\n - type: f1\n value: 91.78333333333335\n - type: precision\n value: 90.86666666666667\n - type: recall\n value: 93.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (spa-eng)\n type: mteb/tatoeba-bitext-mining\n config: spa-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 97.7\n - type: f1\n value: 96.96666666666667\n - type: precision\n value: 96.61666666666667\n - type: recall\n value: 97.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (hye-eng)\n type: mteb/tatoeba-bitext-mining\n config: hye-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 88.27493261455525\n - type: f1\n value: 85.90745732255168\n - type: precision\n value: 84.91389637616052\n - type: recall\n value: 88.27493261455525\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tel-eng)\n type: mteb/tatoeba-bitext-mining\n config: tel-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 90.5982905982906\n - type: f1\n value: 88.4900284900285\n - type: precision\n value: 87.57122507122507\n - type: recall\n value: 90.5982905982906\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (afr-eng)\n type: mteb/tatoeba-bitext-mining\n config: afr-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 89.5\n - type: f1\n value: 86.90769841269842\n - type: precision\n value: 85.80178571428571\n - type: recall\n value: 89.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (mon-eng)\n type: mteb/tatoeba-bitext-mining\n config: mon-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 82.5\n - type: f1\n value: 78.36796536796538\n - type: precision\n value: 76.82196969696969\n - type: recall\n value: 82.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (arz-eng)\n type: mteb/tatoeba-bitext-mining\n config: arz-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 71.48846960167715\n - type: f1\n value: 66.78771089148448\n - type: precision\n value: 64.98302885095339\n - type: recall\n value: 71.48846960167715\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (hrv-eng)\n type: mteb/tatoeba-bitext-mining\n config: hrv-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.1\n - type: f1\n value: 92.50333333333333\n - type: precision\n value: 91.77499999999999\n - type: recall\n value: 94.1\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (nov-eng)\n type: mteb/tatoeba-bitext-mining\n config: nov-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 71.20622568093385\n - type: f1\n value: 66.83278891450098\n - type: precision\n value: 65.35065777283677\n - type: recall\n value: 71.20622568093385\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (gsw-eng)\n type: mteb/tatoeba-bitext-mining\n config: gsw-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 48.717948717948715\n - type: f1\n value: 43.53146853146853\n - type: precision\n value: 42.04721204721204\n - type: recall\n value: 48.717948717948715\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (nds-eng)\n type: mteb/tatoeba-bitext-mining\n config: nds-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 58.5\n - type: f1\n value: 53.8564991863928\n - type: precision\n value: 52.40329436122275\n - type: recall\n value: 58.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ukr-eng)\n type: mteb/tatoeba-bitext-mining\n config: ukr-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 90.8\n - type: f1\n value: 88.29\n - type: precision\n value: 87.09166666666667\n - type: recall\n value: 90.8\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (uzb-eng)\n type: mteb/tatoeba-bitext-mining\n config: uzb-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 67.28971962616822\n - type: f1\n value: 62.63425307817832\n - type: precision\n value: 60.98065939771546\n - type: recall\n value: 67.28971962616822\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (lit-eng)\n type: mteb/tatoeba-bitext-mining\n config: lit-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 78.7\n - type: f1\n value: 75.5264472455649\n - type: precision\n value: 74.38205086580086\n - type: recall\n value: 78.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ina-eng)\n type: mteb/tatoeba-bitext-mining\n config: ina-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 88.7\n - type: f1\n value: 86.10809523809525\n - type: precision\n value: 85.07602564102565\n - type: recall\n value: 88.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (lfn-eng)\n type: mteb/tatoeba-bitext-mining\n config: lfn-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 56.99999999999999\n - type: f1\n value: 52.85487521402737\n - type: precision\n value: 51.53985162713104\n - type: recall\n value: 56.99999999999999\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (zsm-eng)\n type: mteb/tatoeba-bitext-mining\n config: zsm-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94\n - type: f1\n value: 92.45333333333333\n - type: precision\n value: 91.79166666666667\n - type: recall\n value: 94\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ita-eng)\n type: mteb/tatoeba-bitext-mining\n config: ita-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 92.30000000000001\n - type: f1\n value: 90.61333333333333\n - type: precision\n value: 89.83333333333331\n - type: recall\n value: 92.30000000000001\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (cmn-eng)\n type: mteb/tatoeba-bitext-mining\n config: cmn-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.69999999999999\n - type: f1\n value: 93.34555555555555\n - type: precision\n value: 92.75416666666668\n - type: recall\n value: 94.69999999999999\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (lvs-eng)\n type: mteb/tatoeba-bitext-mining\n config: lvs-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 80.2\n - type: f1\n value: 76.6563035113035\n - type: precision\n value: 75.3014652014652\n - type: recall\n value: 80.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (glg-eng)\n type: mteb/tatoeba-bitext-mining\n config: glg-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 84.7\n - type: f1\n value: 82.78689263765207\n - type: precision\n value: 82.06705086580087\n - type: recall\n value: 84.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ceb-eng)\n type: mteb/tatoeba-bitext-mining\n config: ceb-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 50.33333333333333\n - type: f1\n value: 45.461523661523664\n - type: precision\n value: 43.93545574795575\n - type: recall\n value: 50.33333333333333\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (bre-eng)\n type: mteb/tatoeba-bitext-mining\n config: bre-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 6.6000000000000005\n - type: f1\n value: 5.442121400446441\n - type: precision\n value: 5.146630385487529\n - type: recall\n value: 6.6000000000000005\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ben-eng)\n type: mteb/tatoeba-bitext-mining\n config: ben-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 85\n - type: f1\n value: 81.04666666666667\n - type: precision\n value: 79.25\n - type: recall\n value: 85\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (swg-eng)\n type: mteb/tatoeba-bitext-mining\n config: swg-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 47.32142857142857\n - type: f1\n value: 42.333333333333336\n - type: precision\n value: 40.69196428571429\n - type: recall\n value: 47.32142857142857\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (arq-eng)\n type: mteb/tatoeba-bitext-mining\n config: arq-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 30.735455543358945\n - type: f1\n value: 26.73616790022338\n - type: precision\n value: 25.397823220451283\n - type: recall\n value: 30.735455543358945\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (kab-eng)\n type: mteb/tatoeba-bitext-mining\n config: kab-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 25.1\n - type: f1\n value: 21.975989896371022\n - type: precision\n value: 21.059885632257203\n - type: recall\n value: 25.1\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (fra-eng)\n type: mteb/tatoeba-bitext-mining\n config: fra-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.3\n - type: f1\n value: 92.75666666666666\n - type: precision\n value: 92.06166666666665\n - type: recall\n value: 94.3\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (por-eng)\n type: mteb/tatoeba-bitext-mining\n config: por-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.1\n - type: f1\n value: 92.74\n - type: precision\n value: 92.09166666666667\n - type: recall\n value: 94.1\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tat-eng)\n type: mteb/tatoeba-bitext-mining\n config: tat-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 71.3\n - type: f1\n value: 66.922442002442\n - type: precision\n value: 65.38249567099568\n - type: recall\n value: 71.3\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (oci-eng)\n type: mteb/tatoeba-bitext-mining\n config: oci-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 40.300000000000004\n - type: f1\n value: 35.78682789299971\n - type: precision\n value: 34.66425128716588\n - type: recall\n value: 40.300000000000004\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (pol-eng)\n type: mteb/tatoeba-bitext-mining\n config: pol-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 96\n - type: f1\n value: 94.82333333333334\n - type: precision\n value: 94.27833333333334\n - type: recall\n value: 96\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (war-eng)\n type: mteb/tatoeba-bitext-mining\n config: war-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 51.1\n - type: f1\n value: 47.179074753133584\n - type: precision\n value: 46.06461044702424\n - type: recall\n value: 51.1\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (aze-eng)\n type: mteb/tatoeba-bitext-mining\n config: aze-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 87.7\n - type: f1\n value: 84.71\n - type: precision\n value: 83.46166666666667\n - type: recall\n value: 87.7\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (vie-eng)\n type: mteb/tatoeba-bitext-mining\n config: vie-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 95.8\n - type: f1\n value: 94.68333333333334\n - type: precision\n value: 94.13333333333334\n - type: recall\n value: 95.8\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (nno-eng)\n type: mteb/tatoeba-bitext-mining\n config: nno-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 85.39999999999999\n - type: f1\n value: 82.5577380952381\n - type: precision\n value: 81.36833333333334\n - type: recall\n value: 85.39999999999999\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (cha-eng)\n type: mteb/tatoeba-bitext-mining\n config: cha-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 21.16788321167883\n - type: f1\n value: 16.948865627297987\n - type: precision\n value: 15.971932568647897\n - type: recall\n value: 21.16788321167883\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (mhr-eng)\n type: mteb/tatoeba-bitext-mining\n config: mhr-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 6.9\n - type: f1\n value: 5.515526831658907\n - type: precision\n value: 5.141966366966367\n - type: recall\n value: 6.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (dan-eng)\n type: mteb/tatoeba-bitext-mining\n config: dan-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 93.2\n - type: f1\n value: 91.39666666666668\n - type: precision\n value: 90.58666666666667\n - type: recall\n value: 93.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ell-eng)\n type: mteb/tatoeba-bitext-mining\n config: ell-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 92.2\n - type: f1\n value: 89.95666666666666\n - type: precision\n value: 88.92833333333333\n - type: recall\n value: 92.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (amh-eng)\n type: mteb/tatoeba-bitext-mining\n config: amh-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 79.76190476190477\n - type: f1\n value: 74.93386243386244\n - type: precision\n value: 73.11011904761904\n - type: recall\n value: 79.76190476190477\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (pam-eng)\n type: mteb/tatoeba-bitext-mining\n config: pam-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 8.799999999999999\n - type: f1\n value: 6.921439712248537\n - type: precision\n value: 6.489885109680683\n - type: recall\n value: 8.799999999999999\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (hsb-eng)\n type: mteb/tatoeba-bitext-mining\n config: hsb-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 45.75569358178054\n - type: f1\n value: 40.34699501312631\n - type: precision\n value: 38.57886764719063\n - type: recall\n value: 45.75569358178054\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (srp-eng)\n type: mteb/tatoeba-bitext-mining\n config: srp-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 91.4\n - type: f1\n value: 89.08333333333333\n - type: precision\n value: 88.01666666666668\n - type: recall\n value: 91.4\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (epo-eng)\n type: mteb/tatoeba-bitext-mining\n config: epo-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 93.60000000000001\n - type: f1\n value: 92.06690476190477\n - type: precision\n value: 91.45095238095239\n - type: recall\n value: 93.60000000000001\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (kzj-eng)\n type: mteb/tatoeba-bitext-mining\n config: kzj-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 7.5\n - type: f1\n value: 6.200363129378736\n - type: precision\n value: 5.89115314822466\n - type: recall\n value: 7.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (awa-eng)\n type: mteb/tatoeba-bitext-mining\n config: awa-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 73.59307359307358\n - type: f1\n value: 68.38933553219267\n - type: precision\n value: 66.62698412698413\n - type: recall\n value: 73.59307359307358\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (fao-eng)\n type: mteb/tatoeba-bitext-mining\n config: fao-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 69.8473282442748\n - type: f1\n value: 64.72373682297346\n - type: precision\n value: 62.82834214131924\n - type: recall\n value: 69.8473282442748\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (mal-eng)\n type: mteb/tatoeba-bitext-mining\n config: mal-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 97.5254730713246\n - type: f1\n value: 96.72489082969432\n - type: precision\n value: 96.33672974284326\n - type: recall\n value: 97.5254730713246\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ile-eng)\n type: mteb/tatoeba-bitext-mining\n config: ile-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 75.6\n - type: f1\n value: 72.42746031746033\n - type: precision\n value: 71.14036630036631\n - type: recall\n value: 75.6\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (bos-eng)\n type: mteb/tatoeba-bitext-mining\n config: bos-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 91.24293785310734\n - type: f1\n value: 88.86064030131826\n - type: precision\n value: 87.73540489642184\n - type: recall\n value: 91.24293785310734\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (cor-eng)\n type: mteb/tatoeba-bitext-mining\n config: cor-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 6.2\n - type: f1\n value: 4.383083659794954\n - type: precision\n value: 4.027861324289673\n - type: recall\n value: 6.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (cat-eng)\n type: mteb/tatoeba-bitext-mining\n config: cat-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 86.8\n - type: f1\n value: 84.09428571428572\n - type: precision\n value: 83.00333333333333\n - type: recall\n value: 86.8\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (eus-eng)\n type: mteb/tatoeba-bitext-mining\n config: eus-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 60.699999999999996\n - type: f1\n value: 56.1584972394755\n - type: precision\n value: 54.713456330903135\n - type: recall\n value: 60.699999999999996\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (yue-eng)\n type: mteb/tatoeba-bitext-mining\n config: yue-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 84.2\n - type: f1\n value: 80.66190476190475\n - type: precision\n value: 79.19690476190476\n - type: recall\n value: 84.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (swe-eng)\n type: mteb/tatoeba-bitext-mining\n config: swe-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 93.2\n - type: f1\n value: 91.33\n - type: precision\n value: 90.45\n - type: recall\n value: 93.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (dtp-eng)\n type: mteb/tatoeba-bitext-mining\n config: dtp-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 6.3\n - type: f1\n value: 5.126828976748276\n - type: precision\n value: 4.853614328966668\n - type: recall\n value: 6.3\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (kat-eng)\n type: mteb/tatoeba-bitext-mining\n config: kat-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 81.76943699731903\n - type: f1\n value: 77.82873739308057\n - type: precision\n value: 76.27622452019234\n - type: recall\n value: 81.76943699731903\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (jpn-eng)\n type: mteb/tatoeba-bitext-mining\n config: jpn-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 92.30000000000001\n - type: f1\n value: 90.29666666666665\n - type: precision\n value: 89.40333333333334\n - type: recall\n value: 92.30000000000001\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (csb-eng)\n type: mteb/tatoeba-bitext-mining\n config: csb-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 29.249011857707508\n - type: f1\n value: 24.561866096392947\n - type: precision\n value: 23.356583740215456\n - type: recall\n value: 29.249011857707508\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (xho-eng)\n type: mteb/tatoeba-bitext-mining\n config: xho-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 77.46478873239437\n - type: f1\n value: 73.23943661971832\n - type: precision\n value: 71.66666666666667\n - type: recall\n value: 77.46478873239437\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (orv-eng)\n type: mteb/tatoeba-bitext-mining\n config: orv-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 20.35928143712575\n - type: f1\n value: 15.997867865075824\n - type: precision\n value: 14.882104658301346\n - type: recall\n value: 20.35928143712575\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ind-eng)\n type: mteb/tatoeba-bitext-mining\n config: ind-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 92.2\n - type: f1\n value: 90.25999999999999\n - type: precision\n value: 89.45333333333335\n - type: recall\n value: 92.2\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tuk-eng)\n type: mteb/tatoeba-bitext-mining\n config: tuk-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 23.15270935960591\n - type: f1\n value: 19.65673625772148\n - type: precision\n value: 18.793705293464992\n - type: recall\n value: 23.15270935960591\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (max-eng)\n type: mteb/tatoeba-bitext-mining\n config: max-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 59.154929577464785\n - type: f1\n value: 52.3868463305083\n - type: precision\n value: 50.14938113529662\n - type: recall\n value: 59.154929577464785\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (swh-eng)\n type: mteb/tatoeba-bitext-mining\n config: swh-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 70.51282051282051\n - type: f1\n value: 66.8089133089133\n - type: precision\n value: 65.37645687645687\n - type: recall\n value: 70.51282051282051\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (hin-eng)\n type: mteb/tatoeba-bitext-mining\n config: hin-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 94.6\n - type: f1\n value: 93\n - type: precision\n value: 92.23333333333333\n - type: recall\n value: 94.6\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (dsb-eng)\n type: mteb/tatoeba-bitext-mining\n config: dsb-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 38.62212943632568\n - type: f1\n value: 34.3278276962583\n - type: precision\n value: 33.07646935732408\n - type: recall\n value: 38.62212943632568\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ber-eng)\n type: mteb/tatoeba-bitext-mining\n config: ber-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 28.1\n - type: f1\n value: 23.579609223054604\n - type: precision\n value: 22.39622774921555\n - type: recall\n value: 28.1\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tam-eng)\n type: mteb/tatoeba-bitext-mining\n config: tam-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 88.27361563517914\n - type: f1\n value: 85.12486427795874\n - type: precision\n value: 83.71335504885994\n - type: recall\n value: 88.27361563517914\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (slk-eng)\n type: mteb/tatoeba-bitext-mining\n config: slk-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 88.6\n - type: f1\n value: 86.39928571428571\n - type: precision\n value: 85.4947557997558\n - type: recall\n value: 88.6\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tgl-eng)\n type: mteb/tatoeba-bitext-mining\n config: tgl-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 86.5\n - type: f1\n value: 83.77952380952381\n - type: precision\n value: 82.67602564102565\n - type: recall\n value: 86.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ast-eng)\n type: mteb/tatoeba-bitext-mining\n config: ast-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 79.52755905511812\n - type: f1\n value: 75.3055868016498\n - type: precision\n value: 73.81889763779527\n - type: recall\n value: 79.52755905511812\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (mkd-eng)\n type: mteb/tatoeba-bitext-mining\n config: mkd-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 77.9\n - type: f1\n value: 73.76261904761905\n - type: precision\n value: 72.11670995670995\n - type: recall\n value: 77.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (khm-eng)\n type: mteb/tatoeba-bitext-mining\n config: khm-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 53.8781163434903\n - type: f1\n value: 47.25804051288816\n - type: precision\n value: 45.0603482390186\n - type: recall\n value: 53.8781163434903\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ces-eng)\n type: mteb/tatoeba-bitext-mining\n config: ces-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 91.10000000000001\n - type: f1\n value: 88.88\n - type: precision\n value: 87.96333333333334\n - type: recall\n value: 91.10000000000001\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tzl-eng)\n type: mteb/tatoeba-bitext-mining\n config: tzl-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 38.46153846153847\n - type: f1\n value: 34.43978243978244\n - type: precision\n value: 33.429487179487175\n - type: recall\n value: 38.46153846153847\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (urd-eng)\n type: mteb/tatoeba-bitext-mining\n config: urd-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 88.9\n - type: f1\n value: 86.19888888888887\n - type: precision\n value: 85.07440476190476\n - type: recall\n value: 88.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (ara-eng)\n type: mteb/tatoeba-bitext-mining\n config: ara-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 85.9\n - type: f1\n value: 82.58857142857143\n - type: precision\n value: 81.15666666666667\n - type: recall\n value: 85.9\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (kor-eng)\n type: mteb/tatoeba-bitext-mining\n config: kor-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 86.8\n - type: f1\n value: 83.36999999999999\n - type: precision\n value: 81.86833333333333\n - type: recall\n value: 86.8\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (yid-eng)\n type: mteb/tatoeba-bitext-mining\n config: yid-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 68.51415094339622\n - type: f1\n value: 63.195000099481234\n - type: precision\n value: 61.394033442972116\n - type: recall\n value: 68.51415094339622\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (fin-eng)\n type: mteb/tatoeba-bitext-mining\n config: fin-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 88.5\n - type: f1\n value: 86.14603174603175\n - type: precision\n value: 85.1162037037037\n - type: recall\n value: 88.5\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (tha-eng)\n type: mteb/tatoeba-bitext-mining\n config: tha-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 95.62043795620438\n - type: f1\n value: 94.40389294403892\n - type: precision\n value: 93.7956204379562\n - type: recall\n value: 95.62043795620438\n - task:\n type: BitextMining\n dataset:\n name: MTEB Tatoeba (wuu-eng)\n type: mteb/tatoeba-bitext-mining\n config: wuu-eng\n split: test\n revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553\n metrics:\n - type: accuracy\n value: 81.8\n - type: f1\n value: 78.6532178932179\n - type: precision\n value: 77.46348795840176\n - type: recall\n value: 81.8\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: webis-touche2020\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 2.603\n - type: map_at_10\n value: 8.5\n - type: map_at_100\n value: 12.985\n - type: map_at_1000\n value: 14.466999999999999\n - type: map_at_3\n value: 4.859999999999999\n - type: map_at_5\n value: 5.817\n - type: mrr_at_1\n value: 28.571\n - type: mrr_at_10\n value: 42.331\n - type: mrr_at_100\n value: 43.592999999999996\n - type: mrr_at_1000\n value: 43.592999999999996\n - type: mrr_at_3\n value: 38.435\n - type: mrr_at_5\n value: 39.966\n - type: ndcg_at_1\n value: 26.531\n - type: ndcg_at_10\n value: 21.353\n - type: ndcg_at_100\n value: 31.087999999999997\n - type: ndcg_at_1000\n value: 43.163000000000004\n - type: ndcg_at_3\n value: 22.999\n - type: ndcg_at_5\n value: 21.451\n - type: precision_at_1\n value: 28.571\n - type: precision_at_10\n value: 19.387999999999998\n - type: precision_at_100\n value: 6.265\n - type: precision_at_1000\n value: 1.4160000000000001\n - type: precision_at_3\n value: 24.490000000000002\n - type: precision_at_5\n value: 21.224\n - type: recall_at_1\n value: 2.603\n - type: recall_at_10\n value: 14.474\n - type: recall_at_100\n value: 40.287\n - type: recall_at_1000\n value: 76.606\n - type: recall_at_3\n value: 5.978\n - type: recall_at_5\n value: 7.819\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 69.7848\n - type: ap\n value: 13.661023167088224\n - type: f1\n value: 53.61686134460943\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 61.28183361629882\n - type: f1\n value: 61.55481034919965\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 35.972128420092396\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 85.59933241938367\n - type: cos_sim_ap\n value: 72.20760361208136\n - type: cos_sim_f1\n value: 66.4447731755424\n - type: cos_sim_precision\n value: 62.35539102267469\n - type: cos_sim_recall\n value: 71.10817941952506\n - type: dot_accuracy\n value: 78.98313166835548\n - type: dot_ap\n value: 44.492521645493795\n - type: dot_f1\n value: 45.814889336016094\n - type: dot_precision\n value: 37.02439024390244\n - type: dot_recall\n value: 60.07915567282321\n - type: euclidean_accuracy\n value: 85.3907134767837\n - type: euclidean_ap\n value: 71.53847289080343\n - type: euclidean_f1\n value: 65.95952206778834\n - type: euclidean_precision\n value: 61.31006346328196\n - type: euclidean_recall\n value: 71.37203166226914\n - type: manhattan_accuracy\n value: 85.40859510043511\n - type: manhattan_ap\n value: 71.49664104395515\n - type: manhattan_f1\n value: 65.98569969356485\n - type: manhattan_precision\n value: 63.928748144482924\n - type: manhattan_recall\n value: 68.17941952506597\n - type: max_accuracy\n value: 85.59933241938367\n - type: max_ap\n value: 72.20760361208136\n - type: max_f1\n value: 66.4447731755424\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 88.83261536073273\n - type: cos_sim_ap\n value: 85.48178133644264\n - type: cos_sim_f1\n value: 77.87816307403935\n - type: cos_sim_precision\n value: 75.88953021114926\n - type: cos_sim_recall\n value: 79.97382198952879\n - type: dot_accuracy\n value: 79.76287499514883\n - type: dot_ap\n value: 59.17438838475084\n - type: dot_f1\n value: 56.34566667855996\n - type: dot_precision\n value: 52.50349092359864\n - type: dot_recall\n value: 60.794579611949494\n - type: euclidean_accuracy\n value: 88.76857996662397\n - type: euclidean_ap\n value: 85.22764834359887\n - type: euclidean_f1\n value: 77.65379751543554\n - type: euclidean_precision\n value: 75.11152683839401\n - type: euclidean_recall\n value: 80.37419156144134\n - type: manhattan_accuracy\n value: 88.6987231730508\n - type: manhattan_ap\n value: 85.18907981724007\n - type: manhattan_f1\n value: 77.51967028849757\n - type: manhattan_precision\n value: 75.49992701795358\n - type: manhattan_recall\n value: 79.65044656606098\n - type: max_accuracy\n value: 88.83261536073273\n - type: max_ap\n value: 85.48178133644264\n - type: max_f1\n value: 77.87816307403935\n---\n\n## Multilingual-E5-base\n\n[Multilingual E5 Text Embeddings: A Technical Report](https://arxiv.org/pdf/2402.05672).\nLiang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, Furu Wei, arXiv 2024\n\nThis model has 12 layers and the embedding size is 768.\n\n## Usage\n\nBelow is an example to encode queries and passages from the MS-MARCO passage ranking dataset.\n\n```python\nimport torch.nn.functional as F\n\nfrom torch import Tensor\nfrom transformers import AutoTokenizer, AutoModel\n\n\ndef average_pool(last_hidden_states: Tensor,\n attention_mask: Tensor) -> Tensor:\n last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0)\n return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]\n\n\n# Each input text should start with \"query: \" or \"passage: \", even for non-English texts.\n# For tasks other than retrieval, you can simply use the \"query: \" prefix.\ninput_texts = ['query: how much protein should a female eat',\n 'query: 南瓜的家常做法',\n \"passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.\",\n \"passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右,放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅\"]\n\ntokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-base')\nmodel = AutoModel.from_pretrained('intfloat/multilingual-e5-base')\n\n# Tokenize the input texts\nbatch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt')\n\noutputs = model(**batch_dict)\nembeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask'])\n\n# normalize embeddings\nembeddings = F.normalize(embeddings, p=2, dim=1)\nscores = (embeddings[:2] @ embeddings[2:].T) * 100\nprint(scores.tolist())\n```\n\n## Supported Languages\n\nThis model is initialized from [xlm-roberta-base](https://huggingface.co/xlm-roberta-base)\nand continually trained on a mixture of multilingual datasets.\nIt supports 100 languages from xlm-roberta,\nbut low-resource languages may see performance degradation.\n\n## Training Details\n\n**Initialization**: [xlm-roberta-base](https://huggingface.co/xlm-roberta-base)\n\n**First stage**: contrastive pre-training with weak supervision\n\n| Dataset | Weak supervision | # of text pairs |\n|--------------------------------------------------------------------------------------------------------|---------------------------------------|-----------------|\n| Filtered [mC4](https://huggingface.co/datasets/mc4) | (title, page content) | 1B |\n| [CC News](https://huggingface.co/datasets/intfloat/multilingual_cc_news) | (title, news content) | 400M |\n| [NLLB](https://huggingface.co/datasets/allenai/nllb) | translation pairs | 2.4B |\n| [Wikipedia](https://huggingface.co/datasets/intfloat/wikipedia) | (hierarchical section title, passage) | 150M |\n| Filtered [Reddit](https://www.reddit.com/) | (comment, response) | 800M |\n| [S2ORC](https://github.com/allenai/s2orc) | (title, abstract) and citation pairs | 100M |\n| [Stackexchange](https://stackexchange.com/) | (question, answer) | 50M |\n| [xP3](https://huggingface.co/datasets/bigscience/xP3) | (input prompt, response) | 80M |\n| [Miscellaneous unsupervised SBERT data](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) | - | 10M |\n\n**Second stage**: supervised fine-tuning\n\n| Dataset | Language | # of text pairs |\n|----------------------------------------------------------------------------------------|--------------|-----------------|\n| [MS MARCO](https://microsoft.github.io/msmarco/) | English | 500k |\n| [NQ](https://github.com/facebookresearch/DPR) | English | 70k |\n| [Trivia QA](https://github.com/facebookresearch/DPR) | English | 60k |\n| [NLI from SimCSE](https://github.com/princeton-nlp/SimCSE) | English | <300k |\n| [ELI5](https://huggingface.co/datasets/eli5) | English | 500k |\n| [DuReader Retrieval](https://github.com/baidu/DuReader/tree/master/DuReader-Retrieval) | Chinese | 86k |\n| [KILT Fever](https://huggingface.co/datasets/kilt_tasks) | English | 70k |\n| [KILT HotpotQA](https://huggingface.co/datasets/kilt_tasks) | English | 70k |\n| [SQuAD](https://huggingface.co/datasets/squad) | English | 87k |\n| [Quora](https://huggingface.co/datasets/quora) | English | 150k |\n| [Mr. TyDi](https://huggingface.co/datasets/castorini/mr-tydi) | 11 languages | 50k |\n| [MIRACL](https://huggingface.co/datasets/miracl/miracl) | 16 languages | 40k |\n\nFor all labeled datasets, we only use its training set for fine-tuning.\n\nFor other training details, please refer to our paper at [https://arxiv.org/pdf/2402.05672](https://arxiv.org/pdf/2402.05672).\n\n## Benchmark Results on [Mr. TyDi](https://arxiv.org/abs/2108.08787)\n\n| Model | Avg MRR@10 | | ar | bn | en | fi | id | ja | ko | ru | sw | te | th |\n|-----------------------|------------|-------|------| --- | --- | --- | --- | --- | --- | --- |------| --- | --- |\n| BM25 | 33.3 | | 36.7 | 41.3 | 15.1 | 28.8 | 38.2 | 21.7 | 28.1 | 32.9 | 39.6 | 42.4 | 41.7 |\n| mDPR | 16.7 | | 26.0 | 25.8 | 16.2 | 11.3 | 14.6 | 18.1 | 21.9 | 18.5 | 7.3 | 10.6 | 13.5 |\n| BM25 + mDPR | 41.7 | | 49.1 | 53.5 | 28.4 | 36.5 | 45.5 | 35.5 | 36.2 | 42.7 | 40.5 | 42.0 | 49.2 |\n| | |\n| multilingual-e5-small | 64.4 | | 71.5 | 66.3 | 54.5 | 57.7 | 63.2 | 55.4 | 54.3 | 60.8 | 65.4 | 89.1 | 70.1 |\n| multilingual-e5-base | 65.9 | | 72.3 | 65.0 | 58.5 | 60.8 | 64.9 | 56.6 | 55.8 | 62.7 | 69.0 | 86.6 | 72.7 |\n| multilingual-e5-large | **70.5** | | 77.5 | 73.2 | 60.8 | 66.8 | 68.5 | 62.5 | 61.6 | 65.8 | 72.7 | 90.2 | 76.2 |\n\n## MTEB Benchmark Evaluation\n\nCheck out [unilm/e5](https://github.com/microsoft/unilm/tree/master/e5) to reproduce evaluation results \non the [BEIR](https://arxiv.org/abs/2104.08663) and [MTEB benchmark](https://arxiv.org/abs/2210.07316).\n\n## Support for Sentence Transformers\n\nBelow is an example for usage with sentence_transformers.\n```python\nfrom sentence_transformers import SentenceTransformer\nmodel = SentenceTransformer('intfloat/multilingual-e5-base')\ninput_texts = [\n 'query: how much protein should a female eat',\n 'query: 南瓜的家常做法',\n \"passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 i s 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or traini ng for a marathon. Check out the chart below to see how much protein you should be eating each day.\",\n \"passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮 ,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右, 放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油 锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅\"\n]\nembeddings = model.encode(input_texts, normalize_embeddings=True)\n```\n\nPackage requirements\n\n`pip install sentence_transformers~=2.2.2`\n\nContributors: [michaelfeil](https://huggingface.co/michaelfeil)\n\n## FAQ\n\n**1. Do I need to add the prefix \"query: \" and \"passage: \" to input texts?**\n\nYes, this is how the model is trained, otherwise you will see a performance degradation.\n\nHere are some rules of thumb:\n- Use \"query: \" and \"passage: \" correspondingly for asymmetric tasks such as passage retrieval in open QA, ad-hoc information retrieval.\n\n- Use \"query: \" prefix for symmetric tasks such as semantic similarity, bitext mining, paraphrase retrieval.\n\n- Use \"query: \" prefix if you want to use embeddings as features, such as linear probing classification, clustering.\n\n**2. Why are my reproduced results slightly different from reported in the model card?**\n\nDifferent versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences.\n\n**3. Why does the cosine similarity scores distribute around 0.7 to 1.0?**\n\nThis is a known and expected behavior as we use a low temperature 0.01 for InfoNCE contrastive loss. \n\nFor text embedding tasks like text retrieval or semantic similarity, \nwhat matters is the relative order of the scores instead of the absolute values, \nso this should not be an issue.\n\n## Citation\n\nIf you find our paper or models helpful, please consider cite as follows:\n\n```\n@article{wang2024multilingual,\n title={Multilingual E5 Text Embeddings: A Technical Report},\n author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu},\n journal={arXiv preprint arXiv:2402.05672},\n year={2024}\n}\n```\n\n## Limitations\n\nLong texts will be truncated to at most 512 tokens.\n"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2494,"cells":{"id":{"kind":"string","value":"RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","region:us"],"string":"[\n \"gguf\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-06T19:43:54Z","string":"2024-08-06T19:43:54Z"},"last_modified":{"kind":"string","value":"2024-08-06T19:44:59+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nPhi-3-mini-128k-instruct - GGUF\n- Model creator: https://huggingface.co/gretelai/\n- Original model: https://huggingface.co/gretelai/Phi-3-mini-128k-instruct/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [Phi-3-mini-128k-instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q2_K.gguf) | Q2_K | 0.65GB |\n| [Phi-3-mini-128k-instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ3_XS.gguf) | IQ3_XS | 0.07GB |\n| [Phi-3-mini-128k-instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ3_S.gguf) | IQ3_S | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K_S.gguf) | Q3_K_S | 0.0GB |\n| [Phi-3-mini-128k-instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ3_M.gguf) | IQ3_M | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K.gguf) | Q3_K | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K_M.gguf) | Q3_K_M | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K_L.gguf) | Q3_K_L | 0.0GB |\n| [Phi-3-mini-128k-instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ4_XS.gguf) | IQ4_XS | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_0.gguf) | Q4_0 | 0.0GB |\n| [Phi-3-mini-128k-instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ4_NL.gguf) | IQ4_NL | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_K_S.gguf) | Q4_K_S | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_K.gguf) | Q4_K | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_K_M.gguf) | Q4_K_M | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_1.gguf) | Q4_1 | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_0.gguf) | Q5_0 | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_K_S.gguf) | Q5_K_S | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_K.gguf) | Q5_K | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_K_M.gguf) | Q5_K_M | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_1.gguf) | Q5_1 | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q6_K.gguf) | Q6_K | 0.0GB |\n| [Phi-3-mini-128k-instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q8_0.gguf) | Q8_0 | 0.0GB |\n\n\n\n\nOriginal model description:\n---\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/LICENSE\n\nlanguage:\n- en\npipeline_tag: text-generation\ntags:\n- nlp\n- code\nwidget:\n - messages:\n - role: user\n content: Can you provide ways to eat combinations of bananas and dragonfruits?\n---\n\nNOTE: this is mirrored from https://huggingface.co/microsoft/Phi-3-mini-128k-instruct\n\n## Model Summary\n\nThe Phi-3-Mini-128K-Instruct is a 3.8 billion-parameter, lightweight, state-of-the-art open model trained using the Phi-3 datasets.\nThis dataset includes both synthetic data and filtered publicly available website data, with an emphasis on high-quality and reasoning-dense properties.\nThe model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support.\n\nAfter initial training, the model underwent a post-training process that involved supervised fine-tuning and direct preference optimization to enhance its ability to follow instructions and adhere to safety measures.\nWhen evaluated against benchmarks that test common sense, language understanding, mathematics, coding, long-term context, and logical reasoning, the Phi-3 Mini-128K-Instruct demonstrated robust and state-of-the-art performance among models with fewer than 13 billion parameters.\nResources and Technical Documentation:\n\n🏡 [Phi-3 Portal](https://azure.microsoft.com/en-us/products/phi-3)
\n📰 [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024)
\n📖 [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)
\n🛠️ [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)
\n👩‍🍳 [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook)
\n🖥️ [Try It](https://aka.ms/try-phi3)\n\n| | Short Context | Long Context |\n| :- | :- | :- |\n| Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)|\n| Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)|\n| Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)|\n| Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct-onnx-cuda)|\n\n\n\n## Intended Uses\n\n**Primary use cases**\n\nThe model is intended for commercial and research use in English. The model provides uses for applications which require:\n\n1) Memory/compute constrained environments\n2) Latency bound scenarios\n3) Strong reasoning (especially code, math and logic)\n\nOur model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. \n\n**Use case considerations**\n\nOur models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.\n\nNothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. \n\n## Release Notes \n\nThis is an update over the original instruction-tuned Phi-3-mini release based on valuable customer feedback. \nThe model used additional post-training data leading to substantial gains on long-context understanding, instruction following, and structure output. \nWe also improve multi-turn conversation quality, explicitly support <|system|> tag, and significantly improve reasoning capability. \nWe believe most use cases will benefit from this release, but we encourage users to test in their particular AI applications.\nWe appreciate the enthusiastic adoption of the Phi-3 model family, and continue to welcome all feedback from the community. \n\nThese tables below highlights improvements on instruction following, structure output, reasoning, and long-context understanding of the new release on our public and internal benchmark datasets.\n\n| Benchmarks | Original | June 2024 Update |\n| :- | :- | :- |\n| Instruction Extra Hard | 5.7 | 5.9 |\n| Instruction Hard | 5.0 | 5.2 |\n| JSON Structure Output | 1.9 | 60.1 |\n| XML Structure Output | 47.8 | 52.9 |\n| GPQA\t| 25.9\t| 29.7 |\n| MMLU\t| 68.1\t| 69.7 |\n| **Average**\t| **25.7**\t| **37.3** |\n\nRULER: a retrieval-based benchmark for long context understanding\n\n| Model | 4K | 8K | 16K | 32K | 64K | 128K | Average |\n| :-------------------| :------| :------| :------| :------| :------| :------| :---------|\n| Original | 86.7 | 78.1 | 75.6 | 70.3 | 58.9 | 43.3 | **68.8** |\n| June 2024 Update | 92.4 | 91.1 | 90.8 | 87.9 | 79.8 | 65.6 | **84.6** |\n\nRepoQA: a benchmark for long context code understanding\n\n| Model | Python | C++ | Rust | Java | TypeScript | Average |\n| :-------------------| :--------| :-----| :------| :------| :------------| :---------|\n| Original | 27 | 29 | 40 | 33 | 33 | **32.4** |\n| June 2024 Update | 85 | 63 | 72 | 93 | 72 | **77** |\n\n\nNotes: if users would like to check out the previous version, use the git commit id **bb5bf1e4001277a606e11debca0ef80323e5f824**. For the model conversion, e.g. GGUF and other formats, we invite the community to experiment with various approaches and share your valuable feedback. Let's innovate together!\n\n## How to Use\n\nPhi-3 Mini-128K-Instruct has been integrated in the development version (4.41.3) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:\n\n* When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.\n\n* Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.\n\nThe current `transformers` version can be verified with: `pip list | grep transformers`.\n\nExamples of required packages:\n```\nflash_attn==2.5.8\ntorch==2.3.1\naccelerate==0.31.0\ntransformers==4.41.2\n```\n\nPhi-3 Mini-128K-Instruct is also available in [Azure AI Studio](https://aka.ms/try-phi3)\n\n### Tokenizer\n\nPhi-3 Mini-128K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size.\n\n\n### Chat Format\n\nGiven the nature of the training data, the Phi-3 Mini-128K-Instruct model is best suited for prompts using the chat format as follows. \nYou can provide the prompt as a question with a generic template as follow:\n```markdown\n<|system|>\nYou are a helpful assistant.<|end|>\n<|user|>\nQuestion?<|end|>\n<|assistant|>\n```\n\nFor example:\n```markdown\n<|system|>\nYou are a helpful assistant.<|end|>\n<|user|>\nHow to explain Internet for a medieval knight?<|end|>\n<|assistant|> \n```\nwhere the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:\n\n```markdown\n<|system|>\nYou are a helpful travel assistant.<|end|>\n<|user|>\nI am going to Paris, what should I see?<|end|>\n<|assistant|>\nParis, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\\n\\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\\n\\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.\"<|end|>\n<|user|>\nWhat is so great about #1?<|end|>\n<|assistant|>\n```\n\n### Sample inference code\n\nThis code snippets show how to get quickly started with running the model on a GPU:\n\n```python\nimport torch \nfrom transformers import AutoModelForCausalLM, AutoTokenizer, pipeline \n\ntorch.random.manual_seed(0) \nmodel = AutoModelForCausalLM.from_pretrained( \n \"microsoft/Phi-3-mini-128k-instruct\", \n device_map=\"cuda\", \n torch_dtype=\"auto\", \n trust_remote_code=True, \n) \n\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/Phi-3-mini-128k-instruct\") \n\nmessages = [ \n {\"role\": \"system\", \"content\": \"You are a helpful AI assistant.\"}, \n {\"role\": \"user\", \"content\": \"Can you provide ways to eat combinations of bananas and dragonfruits?\"}, \n {\"role\": \"assistant\", \"content\": \"Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey.\"}, \n {\"role\": \"user\", \"content\": \"What about solving an 2x + 3 = 7 equation?\"}, \n] \n\npipe = pipeline( \n \"text-generation\", \n model=model, \n tokenizer=tokenizer, \n) \n\ngeneration_args = { \n \"max_new_tokens\": 500, \n \"return_full_text\": False, \n \"temperature\": 0.0, \n \"do_sample\": False, \n} \n\noutput = pipe(messages, **generation_args) \nprint(output[0]['generated_text']) \n```\n\nNotes: If you want to use flash attention, call _AutoModelForCausalLM.from_pretrained()_ with _attn_implementation=\"flash_attention_2\"_\n\n## Responsible AI Considerations\n\nLike other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:\n\n+ Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. \n+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. \n+ Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. \n+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. \n+ Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as \"typing, math, random, collections, datetime, itertools\". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. \n\nDevelopers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:\n\n+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.\n+ High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. \n+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). \n+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. \n+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.\n\n## Training\n\n### Model\n\n* Architecture: Phi-3 Mini-128K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines.\n* Inputs: Text. It is best suited for prompts using chat format.\n* Context length: 128K tokens\n* GPUs: 512 H100-80G\n* Training time: 10 days\n* Training data: 4.9T tokens\n* Outputs: Generated text in response to the input\n* Dates: Our models were trained between May and June 2024\n* Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models.\n* Release dates: June, 2024.\n\n### Datasets\n\nOur training data includes a wide variety of sources, totaling 4.9 trillion tokens, and is a combination of \n1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; \n2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); \n3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.\n\nWe are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report).\n\n### Fine-tuning\n\nA basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/sample_finetune.py).\n\n## Benchmarks\n\nWe report the results under completion format for Phi-3-Mini-128K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5.\n\nAll the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation.\n\nAs is now standard, we use few-shot prompts to evaluate the models, at temperature 0. \nThe prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3.\nMore specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model.\n\nThe number of k–shot examples is listed per-benchmark. \n\n| Category | Benchmark | Phi-3-Mini-128K-Ins | Gemma-7B | Mistral-7B | Mixtral-8x7B | Llama-3-8B-Ins | GPT3.5-Turbo-1106 |\n| :----------| :-----------| :---------------------| :----------| :------------| :--------------| :----------------| :-------------------|\n| Popular aggregated benchmark | AGI Eval
5-shot| 39.5 | 42.1 | 35.1 | 45.2 | 42 | 48.4 |\n| | MMLU
5-shot | 69.7 | 63.6 | 61.7 | 70.5 | 66.5 | 71.4 |\n| | BigBench Hard
3-shot | 72.1 | 59.6 | 57.3 | 69.7 | 51.5 | 68.3 |\n| Language Understanding | ANLI
7-shot | 52.3 | 48.7 | 47.1 | 55.2 | 57.3 | 58.1 |\n| | HellaSwag
5-shot | 70.5 | 49.8 | 58.5 | 70.4 | 71.1 | 78.8 |\n| Reasoning | ARC Challenge
10-shot | 85.5 | 78.3 | 78.6 | 87.3 | 82.8 | 87.4 |\n| | BoolQ
0-shot | 77.1 | 66 | 72.2 | 76.6 | 80.9 | 79.1 |\n| | MedQA
2-shot | 56.4 | 49.6 | 50 | 62.2 | 60.5 | 63.4 |\n| | OpenBookQA
10-shot | 78.8 | 78.6 | 79.8 | 85.8 | 82.6 | 86 |\n| | PIQA
5-shot | 80.1 | 78.1 | 77.7 | 86 | 75.7 | 86.6 |\n| | GPQA
0-shot | 29.7 | 2.9 | 15 | 6.9 | 32.4 | 29.9 |\n| | Social IQA
5-shot | 74.7 | 65.5 | 74.6 | 75.9 | 73.9 | 68.3 |\n| | TruthfulQA (MC2)
10-shot | 64.8 | 52.1 | 53 | 60.1 | 63.2 | 67.7 |\n| | WinoGrande
5-shot | 71.0 | 55.6 | 54.2 | 62 | 65 | 68.8 |\n| Factual Knowledge | TriviaQA
5-shot | 57.8 | 72.3 | 75.2 | 82.2 | 67.7 | 85.8 |\n| Math | GSM8K CoTT
8-shot | 85.3 | 59.8 | 46.4 | 64.7 | 77.4 | 78.1 |\n| Code Generation | HumanEval
0-shot | 60.4 | 34.1 | 28.0 | 37.8 | 60.4 | 62.2 |\n| | MBPP
3-shot | 70.0 | 51.5 | 50.8 | 60.2 | 67.7 | 77.8 |\n| **Average** | | **66.4** | **56.0** | **56.4** | **64.4** | **65.5** | **70.3** |\n\n**Long Context**: Phi-3 Mini-128K-Instruct supports 128K context length, therefore the model is capable of several long context tasks including long document/meeting summarization, long document QA. \n\n| Benchmark | Phi-3 Mini-128K-Instruct | Mistral-7B | Mixtral 8x7B | LLaMA-3-8B-Instruct |\n| :---------------| :--------------------------|:------------|:--------------|:---------------------|\n| GovReport | 25.3 | 4.9 | 20.3 | 10.3 |\n| QMSum | 21.9 | 15.5 | 20.6 | 2.9 |\n| Qasper | 41.6 | 23.5 | 26.6 | 8.1 |\n| SQuALITY | 24.1 | 14.7 | 16.2 | 25 |\n| SummScreenFD | 16.8 | 9.3 | 11.3 | 5.1 |\n| **Average** | **25.9** | **13.6** | **19.0** | **10.3** |\n\nWe take a closer look at different categories across 100 public benchmark datasets at the table below: \n\n| Category | Phi-3-Mini-128K-Instruct | Gemma-7B | Mistral-7B | Mixtral 8x7B | Llama-3-8B-Instruct | GPT-3.5-Turbo |\n|:----------|:--------------------------|:----------|:------------|:--------------|:---------------------|:---------------|\n| Popular aggregated benchmark | 60.6 | 59.4 | 56.5 | 66.2 | 59.9 | 67.0 |\n| Reasoning | 69.4 | 60.3 | 62.8 | 68.1 | 69.6 | 71.7 |\n| Language understanding | 57.5 | 57.6 | 52.5 | 66.1 | 63.2 | 67.7 |\n| Code generation | 61.0 | 45.6 | 42.9 | 52.7 | 56.4 | 70.4 |\n| Math | 51.6 | 35.8 | 25.4 | 40.3 | 41.1 | 52.8 |\n| Factual knowledge | 35.8 | 46.7 | 49.8 | 58.6 | 43.1 | 63.4 |\n| Multilingual | 56.4 | 66.5 | 57.4 | 66.7 | 66.6 | 71.0 |\n| Robustness | 61.1 | 38.4 | 40.6 | 51.0 | 64.5 | 69.3 |\n\nOverall, the model with only 3.8B-param achieves a similar level of language understanding and reasoning ability as much larger models. However, it is still fundamentally limited by its size for certain tasks. The model simply does not have the capacity to store too much world knowledge, which can be seen for example with low performance on TriviaQA. However, we believe such weakness can be resolved by augmenting Phi-3-Mini with a search engine. \n\n## Cross Platform Support \n\n[ONNX runtime](https://onnxruntime.ai/blogs/accelerating-phi-3) now supports Phi-3 mini models across platforms and hardware. \n\nOptimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA). \n\nAlong with DML, ONNX Runtime provides cross platform support for Phi3 mini across a range of devices CPU, GPU, and mobile. \n\nHere are some of the optimized configurations we have added: \n\n1. ONNX models for int4 DML: Quantized to int4 via AWQ \n2. ONNX model for fp16 CUDA \n3. ONNX model for int4 CUDA: Quantized to int4 via RTN \n4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN \n\n## Software\n\n* [PyTorch](https://github.com/pytorch/pytorch)\n* [Transformers](https://github.com/huggingface/transformers)\n* [Flash-Attention](https://github.com/HazyResearch/flash-attention)\n\n## Hardware\nNote that by default, the Phi-3 Mini-128K-Instruct model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:\n* NVIDIA A100\n* NVIDIA A6000\n* NVIDIA H100\n\nIf you want to run the model on:\n* NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation=\"eager\"\n* Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128K](https://aka.ms/phi3-mini-128k-instruct-onnx)\n \n## License\n\nThe model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-128k/resolve/main/LICENSE).\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2495,"cells":{"id":{"kind":"string","value":"knowledgator/gliner-bi-llama-v1.0"},"author":{"kind":"string","value":"knowledgator"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["gliner","pytorch","NER","GLiNER","information extraction","encoder","entity recognition","token-classification","multilingual","dataset:urchade/pile-mistral-v0.1","dataset:numind/NuNER","dataset:knowledgator/GLINER-multi-task-synthetic-data","license:apache-2.0","region:us"],"string":"[\n \"gliner\",\n \"pytorch\",\n \"NER\",\n \"GLiNER\",\n \"information extraction\",\n \"encoder\",\n \"entity recognition\",\n \"token-classification\",\n \"multilingual\",\n \"dataset:urchade/pile-mistral-v0.1\",\n \"dataset:numind/NuNER\",\n \"dataset:knowledgator/GLINER-multi-task-synthetic-data\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-29T18:32:18Z","string":"2024-08-29T18:32:18Z"},"last_modified":{"kind":"string","value":"2024-09-02T05:45:51+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- urchade/pile-mistral-v0.1\n- numind/NuNER\n- knowledgator/GLINER-multi-task-synthetic-data\nlanguage:\n- multilingual\nlibrary_name: gliner\nlicense: apache-2.0\npipeline_tag: token-classification\ntags:\n- NER\n- GLiNER\n- information extraction\n- encoder\n- entity recognition\n---\n# About\n\nGLiNER is a Named Entity Recognition (NER) model capable of identifying any entity type using a bidirectional transformer encoders (BERT-like). It provides a practical alternative to traditional NER models, which are limited to predefined entities, and Large Language Models (LLMs) that, despite their flexibility, are costly and large for resource-constrained scenarios.\n\nThis particular version utilize bi-encoder architecture, where textual encoder is [Sheared-Llama-1.3B](https://huggingface.co/princeton-nlp/Sheared-LLaMA-1.3B) and entity label encoder is sentence transformer - [BGE-base-en](https://huggingface.co/BAAI/bge-small-en-v1.5).\n\nThis model leverages the [LLM2Vec](https://github.com/McGill-NLP/llm2vec/tree/main/llm2vec) approach, transforming the initial decoder model into a bi-directional encoder. We further enhanced the model by pre-training it on the masked token prediction task using the Wikipedia corpus. This approach unlocks new capabilities for GLiNER, such as supporting flash attention, enabling a longer context window, and achieving faster inference times. Moreover, by utilizing modern decoders trained on extensive and up-to-date datasets, the model benefits from improved generalization and performance.\n\nThis version highlights the key improvements and contextual benefits more clearly.Such architecture brings several advantages over uni-encoder GLiNER:\n* An unlimited amount of entities can be recognized at a single time;\n* Faster inference if entity embeddings are preprocessed;\n* Better generalization to unseen entities;\n\nHowever, it has some drawbacks such as a lack of inter-label interactions that make it hard for the model to disambiguate semantically similar but contextually different entities.\n\n### Installation & Usage\nInstall or update the gliner package:\n```bash\npip install gliner -U\n```\n\nOnce you've downloaded the GLiNER library, you can import the GLiNER class. You can then load this model using `GLiNER.from_pretrained` and predict entities with `predict_entities`.\n\n```python\nfrom gliner import GLiNER\n\nmodel = GLiNER.from_pretrained(\"knowledgator/gliner-bi-llama-v1.0\")\n\ntext = \"\"\"\nCristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃˈtjɐnu ʁɔˈnaldu]; born 5 February 1985) is a Portuguese professional footballer who plays as a forward for and captains both Saudi Pro League club Al Nassr and the Portugal national team. Widely regarded as one of the greatest players of all time, Ronaldo has won five Ballon d'Or awards,[note 3] a record three UEFA Men's Player of the Year Awards, and four European Golden Shoes, the most by a European player. He has won 33 trophies in his career, including seven league titles, five UEFA Champions Leagues, the UEFA European Championship and the UEFA Nations League. Ronaldo holds the records for most appearances (183), goals (140) and assists (42) in the Champions League, goals in the European Championship (14), international goals (128) and international appearances (205). He is one of the few players to have made over 1,200 professional career appearances, the most by an outfield player, and has scored over 850 official senior career goals for club and country, making him the top goalscorer of all time.\n\"\"\"\n\nlabels = [\"person\", \"award\", \"date\", \"competitions\", \"teams\"]\n\nentities = model.predict_entities(text, labels, threshold=0.3)\n\nfor entity in entities:\n print(entity[\"text\"], \"=>\", entity[\"label\"])\n```\n\n```\nCristiano Ronaldo dos Santos Aveiro => person\n5 February 1985 => date\nAl Nassr => teams\nPortugal national team => teams\nBallon d'Or => award\nUEFA Men's Player of the Year Awards => award\nEuropean Golden Shoes => award\nUEFA Champions Leagues => competitions\nUEFA European Championship => competitions\nUEFA Nations League => competitions\nChampions League => competitions\nEuropean Championship => competitions\n```\n\nIf you want to use flash attention or increase sequence length, please, check the following code:\n```python\nfrom gliner import GLiNER\nimport torch\n\nmodel = GLiNER.from_pretrained(\"knowledgator/gliner-bi-llama-v1.0\",\n _attn_implementation = 'flash_attention_2',\n max_length = 2048).to('cuda:0', dtype=torch.float16)\n```\n\n\nIf you have a large amount of entities and want to pre-embed them, please, refer to the following code snippet:\n\n```python\nlabels = [\"your entities\"]\ntexts = [\"your texts\"]\n\nentity_embeddings = model.encode_labels(labels, batch_size = 8)\n\noutputs = model.batch_predict_with_embeds(texts, entity_embeddings, labels)\n```\n\n### Benchmarks\nBelow you can see the table with benchmarking results on various named entity recognition datasets:\n\n| Dataset | Score |\n|-------------------------|--------|\n| ACE 2004 | 26.8% |\n| ACE 2005 | 29.2% |\n| AnatEM | 25.3% |\n| Broad Tweet Corpus | 66.8% |\n| CoNLL 2003 | 60.3% |\n| FabNER | 21.2% |\n| FindVehicle | 28.3% |\n| GENIA_NER | 58.3% |\n| HarveyNER | 18.3% |\n| MultiNERD | 64.7% |\n| Ontonotes | 28.4% |\n| PolyglotNER | 45.3% |\n| TweetNER7 | 35.9% |\n| WikiANN en | 53.6% |\n| WikiNeural | 73.4% |\n| bc2gm | 63.2% |\n| bc4chemd | 56.8% |\n| bc5cdr | 71.3% |\n| ncbi | 64.9% |\n| **Average** | **47.0%** |\n| | |\n| CrossNER_AI | 56.7% |\n| CrossNER_literature | 61.5% |\n| CrossNER_music | 70.2% |\n| CrossNER_politics | 75.6% |\n| CrossNER_science | 66.8% |\n| mit-movie | 39.9% |\n| mit-restaurant | 41.7% |\n| **Average (zero-shot benchmark)** | **58.9%** |\n\n### Join Our Discord\n\nConnect with our community on Discord for news, support, and discussion about our models. Join [Discord](https://discord.gg/dkyeAgs9DG)."},"matched_bigbio_names":{"kind":"list like","value":["ANATEM","BC5CDR"],"string":"[\n \"ANATEM\",\n \"BC5CDR\"\n]"}}},{"rowIdx":2496,"cells":{"id":{"kind":"string","value":"knowledgator/gliner-llama-1B-v1.0"},"author":{"kind":"string","value":"knowledgator"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["gliner","pytorch","NER","GLiNER","information extraction","encoder","entity recognition","token-classification","multilingual","dataset:urchade/pile-mistral-v0.1","dataset:knowledgator/GLINER-multi-task-synthetic-data","dataset:EmergentMethods/AskNews-NER-v0","license:apache-2.0","region:us"],"string":"[\n \"gliner\",\n \"pytorch\",\n \"NER\",\n \"GLiNER\",\n \"information extraction\",\n \"encoder\",\n \"entity recognition\",\n \"token-classification\",\n \"multilingual\",\n \"dataset:urchade/pile-mistral-v0.1\",\n \"dataset:knowledgator/GLINER-multi-task-synthetic-data\",\n \"dataset:EmergentMethods/AskNews-NER-v0\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-09-01T09:21:19Z","string":"2024-09-01T09:21:19Z"},"last_modified":{"kind":"string","value":"2024-09-06T06:41:35+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":6,"string":"6"},"README":{"kind":"string","value":"---\ndatasets:\n- urchade/pile-mistral-v0.1\n- knowledgator/GLINER-multi-task-synthetic-data\n- EmergentMethods/AskNews-NER-v0\nlanguage:\n- multilingual\nlibrary_name: gliner\nlicense: apache-2.0\npipeline_tag: token-classification\ntags:\n- NER\n- GLiNER\n- information extraction\n- encoder\n- entity recognition\n---\n# About\n\nGLiNER is a Named Entity Recognition (NER) model capable of identifying any entity type using a bidirectional transformer encoders (BERT-like). It provides a practical alternative to traditional NER models, which are limited to predefined entities, and Large Language Models (LLMs) that, despite their flexibility, are costly and large for resource-constrained scenarios.\n\nThe initial versions of GLiNER relied on older encoder architectures like BERT and DeBERTA. These models, however, were trained on smaller datasets and lacked support for modern optimization techniques such as flash attention. Additionally, their context window was typically limited to 512 tokens, which is insufficient for many practical applications. Recognizing these limitations, we began exploring alternative backbones for GLiNER.\n\nThis latest model leverages the LLM2Vec approach, transforming the initial decoder model into a bidirectional encoder. We further enhanced the model by pre-training it on the masked token prediction task using the Wikipedia corpus. This approach introduces several advancements for GLiNER, including support for flash attention, an extended context window, and faster inference times. Additionally, by utilizing modern decoders trained on large, up-to-date datasets, the model exhibits improved generalization and performance.\n\nKey Advantages Over Previous GLiNER Models:\n\n* Enhanced performance and generalization capabilities\n* Support for Flash Attention\n* Extended context window (up to 32k tokens)\n\nWhile these models are larger and require more computational resources compared to older encoders, they are still considered relatively small given current standards and provide significant benefits for a wide range of use cases.\n\n### Installation & Usage\nInstall or update the gliner package:\n```bash\npip install gliner -U\n```\nAnd LLM2Vec packages:\n```bash\npip install llm2vec\n```\n\nOnce you've downloaded the GLiNER library, you can import the GLiNER class. You can then load this model using `GLiNER.from_pretrained` and predict entities with `predict_entities`.\n\n```python\nfrom gliner import GLiNER\n\nmodel = GLiNER.from_pretrained(\"knowledgator/gliner-llama-1B-v1.0\")\n\ntext = \"\"\"\nCristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃˈtjɐnu ʁɔˈnaldu]; born 5 February 1985) is a Portuguese professional footballer who plays as a forward for and captains both Saudi Pro League club Al Nassr and the Portugal national team. Widely regarded as one of the greatest players of all time, Ronaldo has won five Ballon d'Or awards,[note 3] a record three UEFA Men's Player of the Year Awards, and four European Golden Shoes, the most by a European player. He has won 33 trophies in his career, including seven league titles, five UEFA Champions Leagues, the UEFA European Championship and the UEFA Nations League. Ronaldo holds the records for most appearances (183), goals (140) and assists (42) in the Champions League, goals in the European Championship (14), international goals (128) and international appearances (205). He is one of the few players to have made over 1,200 professional career appearances, the most by an outfield player, and has scored over 850 official senior career goals for club and country, making him the top goalscorer of all time.\n\"\"\"\n\nlabels = [\"person\", \"award\", \"date\", \"competitions\", \"teams\"]\n\nentities = model.predict_entities(text, labels, threshold=0.5)\n\nfor entity in entities:\n print(entity[\"text\"], \"=>\", entity[\"label\"])\n```\n\n```\nCristiano Ronaldo dos Santos Aveiro => person\n5 February 1985 => date\nAl Nassr => teams\nPortugal national team => teams\nBallon d'Or => award\nUEFA Men's Player of the Year Awards => award\nEuropean Golden Shoes => award\nUEFA Champions Leagues => competitions\nUEFA European Championship => competitions\nUEFA Nations League => competitions\nChampions League => competitions\nEuropean Championship => competitions\n```\n\nIf you want to use flash attention or increase sequence length, please, check the following code:\n```python\nfrom gliner import GLiNER\nimport torch\n\nmodel = GLiNER.from_pretrained(\"knowledgator/gliner-llama-1B-v1.0\",\n _attn_implementation = 'flash_attention_2',\n max_length = 2048).to('cuda:0', dtype=torch.float16)\n```\n\n\n### Benchmarks\nBelow you can see the table with benchmarking results on various named entity recognition datasets:\n\n| Dataset | Score |\n|-----------------------------|--------|\n| ACE 2004 | 29.0% |\n| ACE 2005 | 30.4% |\n| AnatEM | 40.2% |\n| Broad Tweet Corpus | 65.3% |\n| CoNLL 2003 | 62.4% |\n| FabNER | 25.3% |\n| FindVehicle | 39.7% |\n| GENIA_NER | 55.6% |\n| HarveyNER | 25.1% |\n| MultiNERD | 61.3% |\n| Ontonotes | 25.6% |\n| PolyglotNER | 42.7% |\n| TweetNER7 | 36.2% |\n| WikiANN en | 55.7% |\n| WikiNeural | 73.6% |\n| bc2gm | 55.5% |\n| bc4chemd | 65.1% |\n| bc5cdr | 74.0% |\n| ncbi | 64.8% |\n| **Average** | **48.8%** |\n| | |\n| CrossNER_AI | 57.5% |\n| CrossNER_literature | 68.1% |\n| CrossNER_music | 65.3% |\n| CrossNER_politics | 73.3% |\n| CrossNER_science | 67.8% |\n| mit-movie | 47.7% |\n| mit-restaurant | 40.0% |\n| **Average (zero-shot benchmark)** | **60.1%** |\n\n\n### Join Our Discord\n\nConnect with our community on Discord for news, support, and discussion about our models. Join [Discord](https://discord.gg/dkyeAgs9DG)."},"matched_bigbio_names":{"kind":"list like","value":["ANATEM","BC5CDR"],"string":"[\n \"ANATEM\",\n \"BC5CDR\"\n]"}}},{"rowIdx":2497,"cells":{"id":{"kind":"string","value":"alexbuz/GRIN-MoE-2"},"author":{"kind":"string","value":"alexbuz"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","grinmoe","nlp","code","text-generation","conversational","custom_code","en","arxiv:2409.12136","arxiv:2404.14219","license:mit","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"grinmoe\",\n \"nlp\",\n \"code\",\n \"text-generation\",\n \"conversational\",\n \"custom_code\",\n \"en\",\n \"arxiv:2409.12136\",\n \"arxiv:2404.14219\",\n \"license:mit\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-09-22T22:25:16Z","string":"2024-09-22T22:25:16Z"},"last_modified":{"kind":"string","value":"2024-09-22T22:41:51+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\nlicense_link: https://github.com/microsoft/GRIN-MoE/tree/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\nwidget:\n- messages:\n - role: user\n content: Sally (a girl) has 3 brothers. Each brother has 2 sisters. How many sisters\n does Sally have?\n---\n\n

\t&#128513; MoE

\n

GRIN: GRadient-INformed MoE

\n

\nHugging Face&nbsp | &nbsp Tech Report&nbsp | &nbsp License&nbsp | &nbsp Github &nbsp | &nbsp Get Started&nbsp\n
\n\n- With **only 6.6B** activate parameters, GRIN MoE achieves **exceptionally good** performance across a diverse set of tasks, particularly in coding and mathematics tasks.\n\n- GRIN uses [**SparseMixer-v2**](https://arxiv.org/html/2409.12136v1#Pt1) to estimate the gradient related to expert routing, while the conventional MoE training treats expert gating as a proxy for the gradient estimation. \n\n- GRIN scales MoE training with [**neither expert parallelism nor token dropping**](https://arxiv.org/pdf/2409.12136#page=5.42), while the conventional MoE training employs expert parallelism and deploys token dropping.\n\n## Intended Uses\n\n### Primary Use Cases\n\nThe model is intended for commercial and research use in multiple languages. The model provides uses for general purpose AI systems and applications which require:\n\n1) Memory/compute constrained environments\n2) Latency bound scenarios\n3) Strong reasoning (especially code, math and logic)\n\nOur model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. \n\n### Use Case Considerations\n\nOur models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.\n\n***Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.*** \n\n## Usage\n\n### Command-line Demo\n\nThe simpliest way to inference with GRIN-MoE is to run the demo as below, which would setup environment, download model weight, and run inference for a math question. \n\n```bash\n# This script is available at `https://github.com/microsoft/GRIN-MoE/blob/main/demo/demo.sh` and requires docker to run.\ncurl https://raw.githubusercontent.com/microsoft/GRIN-MoE/main/demo/demo.sh | bash -s \n```\n\n### Interactive Demo\n\nRun the following command to play with the model with more questions and customized inputs, which would launch a jupyter notebook at `localhost:8887`. \n```bash\n# This script requires docker to run.\ndocker run --gpus all -p 8887:8887 --rm nvcr.io/nvidia/pytorch:24.08-py3 /bin/bash -c 'git clone https://github.com/microsoft/GRIN-MoE.git && jupyter notebook --port 8887 --notebook-dir GRIN-MoE/demo'\n```\n\n## Benchmarks\n\nTo understand the capabilities, we compare GRIN MoE with a set of models over a variety of benchmarks using our internal benchmark platform. At the high-level overview of the model quality on representative benchmarks:\n\n### Popular Benchmarks\n\nNote a different version of mid-training and post-training, emphasizing long context and multilingual ability, has been conducted and has been released at https://huggingface.co/microsoft/Phi-3.5-MoE-instruct.\n\n| | GRIN MoE (16x3.8B) | Phi-3.5-MoE (16x3.8B) | Mixtral (8x7B) | Mixtral (8x22B) | Llama3 (8B) | Llama3 (70B) | GPT3.5 | GPT4o |\n|---------------|-----------|---------|---------|---------|--------|--------|--------|-------|\n| MMLU | 79.4 | 78.9 | 70.5 | 76.2 | 66.5 | 80.2 | 71.4 | 86.9 |\n| HellaSwag | 83.7 | 83.8 | 70.4 | 79.0 | 71.1 | 82.6 | 78.8 | 91.7 |\n| ANLI | 60.6 | 59.8 | 55.2 | 65.2 | 57.3 | 68.3 | 58.1 | 75.7 |\n| GSM-8K | 90.4 | 88.7 | 64.7 | 83.8 | 77.4 | 93.5 | 78.1 | 93.8 |\n| MedQA | 70.4 | 70.5 | 62.2 | 67.9 | 60.5 | 78.5 | 63.4 | 88.9 |\n| AGIEval | 48.2 | 50.3 | 45.2 | 54.0 | 42.0 | 56.9 | 48.4 | 37.6 |\n| TriviaQA | 73.9 | 71.6 | 78.5 | 82.2 | 67.7 | 84.5 | 85.8 | 66.0 |\n| Arc-C | 92.0 | 91.0 | 87.3 | 91.3 | 82.8 | 93.0 | 87.4 | 97.0 |\n| Arc-E | 98.0 | 97.1 | 95.6 | 96.9 | 93.4 | 98.2 | 96.3 | 99.0 |\n| PIQA | 89.0 | 88.6 | 86.0 | 85.0 | 75.7 | 85.3 | 86.6 | 92.9 |\n| SociQA | 79.5 | 78.0 | 75.9 | 78.2 | 73.9 | 81.1 | 68.3 | 81.4 |\n| BigBench-Hard | 81.4 | 79.1 | 69.7 | 81.8 | 51.5 | 80.2 | 68.3 | 81.2 |\n| WinoGrande | 81.4 | 81.3 | 62.0 | 75.3 | 65.0 | 83.3 | 68.8 | 89.3 |\n| OpenBookQA | 89.8 | 89.6 | 85.8 | 88.6 | 82.6 | 91.8 | 86.0 | 95.2 |\n| BoolQ | 83.4 | 84.6 | 77.6 | 82.7 | 80.9 | 89.1 | 79.1 | 90.6 |\n| CommonSenseQA | 81.8 | 83.5 | 78.1 | 82.0 | 79.0 | 84.4 | 79.6 | 88.5 |\n| TruthfulQA | 74.5 | 77.5 | 60.1 | 67.4 | 63.2 | 81.9 | 85.8 | 85.6 |\n| HumanEval | 74.4 | 70.7 | 37.8 | 39.6 | 60.4 | 78.7 | 62.2 | 92.1 |\n| MBPP | 80.3 | 80.8 | 60.2 | 70.7 | 67.7 | 81.3 | 77.8 | 90.4 |\n| Average | 79.6 | 79.2 | 69.6 | 76.2 | 69.4 | 82.8 | 75.2 | 85.7 |\n\n### Livebench\nPerformance on LiveBench-2024-07-25. Models are ranked by their average score (AVG). *Baseline results are referenced from the official benchmark.\n\n| | Reasoning | Coding | Mathematics | Data Analysis | Language | IF | AVG |\n|------------------------------|-----------|----------|--------------|---------------|----------|----------|----------|\n| Claude-3-haiku* | 29.3 | 24.5 | 25.7 | 41.5 | 30.1 | 64.0 | 35.9 |\n| Mixtral-8x22B-instruct-v0.1* | 29.3 | 32.0 | 28.3 | 31.7 | 26.5 | 63.1 | 35.2 |\n| GPT-3.5-turbo-0125* | 26.7 | 27.7 | 26.9 | 41.2 | 24.2 | 60.5 | 34.5 |\n| **GRIN MoE** | **35.3** | **23.7** | **29.8** | **32.0** | **16.9** | **57.6** | **32.5** |\n| Mistral-small-2402* | 26.0 | 21.2 | 28.2 | 31.9 | 22.1 | 63.9 | 32.2 |\n| Command-r-plus* | 28.7 | 19.5 | 24.9 | 24.6 | 23.9 | 71.5 | 32.2 |\n| Gemma-2-9B-it* | 17.3 | 22.5 | 24.0 | 35.1 | 27.6 | 61.6 | 31.3 |\n\n\n## Training\n\n### Model\n| | |\n|---------------------|-----| \n| Developer | Microsoft |\n| Architecture | GRIN MoE has 16x3.8B parameters with **6.6B active parameters** when using 2 experts. The model is a mixture-of-expert decoder-only Transformer model using the tokenizer with vocabulary size of 32,064. |\n| Inputs | Text. It is best suited for prompts using chat format. |\n| Context length | 4K tokens |\n| GPUs | 512 H100-80G |\n| Training time | 18 days |\n| Training data | 4.0T tokens |\n| Outputs | Generated text in response to the input |\n| Dates | Trained between April and June 2024 |\n| Status | This is a static model trained on an offline dataset with cutoff date October 2023 for publicly available data. Future versions of the tuned models may be released as we improve models. |\n| Supported languages | English |\n| Release date | Sep 2024 |\n| License | MIT |\n\n### Training Datasets\nOur training data includes a wide variety of sources, totaling 4 trillion tokens, and is a combination of 1) publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) high quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. More details about data can be found in the [Phi-3 Technical Report](https://arxiv.org/pdf/2404.14219).\n\n## Responsible AI Considerations\nLike other language models, Gradient Informed (GRIN) MoE model can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: \n* Quality of Service: GRIN MoE is trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. \n* Representation of Harms & Perpetuation of Stereotypes: This model can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. \n* Inappropriate or Offensive Content: This model may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. \n* Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. \n* Limited Scope for Code: Majority of the training data is based in Python and use common packages such as \"typing, math, random, collections, datetime, itertools\". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. \n\nDevelopers should apply responsible AI best practices and are responsible for ensuring that a specific use-case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: \n* Allocation: The model may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.\n* High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. \n* Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). \n* Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. \n* Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.\n* Copyrighted content: The model might generate content that infringes on copyright protections. Developers should implement measures to detect and filter copyrighted material, and end-users should be informed about the potential for unintended copyright violations and the importance of verifying original sources to avoid legal complications.\n* Election Misinformation: Developers should ensure robust verification mechanisms are in place to detect and correct false information regarding elections and should inform users of the need for critical evaluation of AI-generated election-related content to mitigate the spread of misinformation.\n \n## License\nThe model is licensed under the [MIT license](./LICENSE).\n\n## Trademarks\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.\nHyper Icon"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2498,"cells":{"id":{"kind":"string","value":"SkyeTeam/stella_en_400m"},"author":{"kind":"string","value":"SkyeTeam"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","pytorch","safetensors","new","feature-extraction","mteb","transformers","sentence-similarity","custom_code","arxiv:2205.13147","license:mit","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"pytorch\",\n \"safetensors\",\n \"new\",\n \"feature-extraction\",\n \"mteb\",\n \"transformers\",\n \"sentence-similarity\",\n \"custom_code\",\n \"arxiv:2205.13147\",\n \"license:mit\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-09-28T00:12:08Z","string":"2024-09-28T00:12:08Z"},"last_modified":{"kind":"string","value":"2024-09-28T00:13:33+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: mit\ntags:\n- mteb\n- sentence-transformers\n- transformers\n- sentence-similarity\nmodel-index:\n- name: stella_en_400M_v5\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 92.35820895522387\n - type: ap\n value: 70.81322736988783\n - type: ap_weighted\n value: 70.81322736988783\n - type: f1\n value: 88.9505466159595\n - type: f1_weighted\n value: 92.68630932872613\n - type: main_score\n value: 92.35820895522387\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 97.1945\n - type: ap\n value: 96.08192192244094\n - type: ap_weighted\n value: 96.08192192244094\n - type: f1\n value: 97.1936887167346\n - type: f1_weighted\n value: 97.1936887167346\n - type: main_score\n value: 97.1945\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 59.528000000000006\n - type: f1\n value: 59.21016819840188\n - type: f1_weighted\n value: 59.21016819840188\n - type: main_score\n value: 59.528000000000006\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: mteb/arguana\n config: default\n split: test\n revision: c22ab2a51041ffd869aaddef7af8d8215647e41a\n metrics:\n - type: main_score\n value: 64.24\n - type: map_at_1\n value: 40.398\n - type: map_at_10\n value: 56.215\n - type: map_at_100\n value: 56.833999999999996\n - type: map_at_1000\n value: 56.835\n - type: map_at_20\n value: 56.747\n - type: map_at_3\n value: 52.181\n - type: map_at_5\n value: 54.628\n - type: mrr_at_1\n value: 41.25177809388336\n - type: mrr_at_10\n value: 56.570762491815216\n - type: mrr_at_100\n value: 57.17548614361504\n - type: mrr_at_1000\n value: 57.176650626377466\n - type: mrr_at_20\n value: 57.08916253512566\n - type: mrr_at_3\n value: 52.47747747747754\n - type: mrr_at_5\n value: 54.94547178757718\n - type: nauc_map_at_1000_diff1\n value: 22.408086887100158\n - type: nauc_map_at_1000_max\n value: -8.730419096847543\n - type: nauc_map_at_1000_std\n value: -17.789262741255737\n - type: nauc_map_at_100_diff1\n value: 22.407371684274025\n - type: nauc_map_at_100_max\n value: -8.732263549026266\n - type: nauc_map_at_100_std\n value: -17.79550515579994\n - type: nauc_map_at_10_diff1\n value: 21.925005073301246\n - type: nauc_map_at_10_max\n value: -8.990323944492134\n - type: nauc_map_at_10_std\n value: -18.199246301671458\n - type: nauc_map_at_1_diff1\n value: 26.23276644969203\n - type: nauc_map_at_1_max\n value: -12.376511389571245\n - type: nauc_map_at_1_std\n value: -18.11411715207284\n - type: nauc_map_at_20_diff1\n value: 22.32455790850922\n - type: nauc_map_at_20_max\n value: -8.664671547236034\n - type: nauc_map_at_20_std\n value: -17.8290016125137\n - type: nauc_map_at_3_diff1\n value: 22.395462147465064\n - type: nauc_map_at_3_max\n value: -8.206580750918844\n - type: nauc_map_at_3_std\n value: -17.604490446911484\n - type: nauc_map_at_5_diff1\n value: 21.95307379904799\n - type: nauc_map_at_5_max\n value: -8.03958102978443\n - type: nauc_map_at_5_std\n value: -17.36578866595004\n - type: nauc_mrr_at_1000_diff1\n value: 20.124236798365587\n - type: nauc_mrr_at_1000_max\n value: -9.587376069575898\n - type: nauc_mrr_at_1000_std\n value: -17.79191612151833\n - type: nauc_mrr_at_100_diff1\n value: 20.123612603474033\n - type: nauc_mrr_at_100_max\n value: -9.589187218607831\n - type: nauc_mrr_at_100_std\n value: -17.7981617777748\n - type: nauc_mrr_at_10_diff1\n value: 19.723683875738075\n - type: nauc_mrr_at_10_max\n value: -9.774151729178815\n - type: nauc_mrr_at_10_std\n value: -18.168668675495162\n - type: nauc_mrr_at_1_diff1\n value: 23.945332059908132\n - type: nauc_mrr_at_1_max\n value: -12.260461466152819\n - type: nauc_mrr_at_1_std\n value: -18.007194922921148\n - type: nauc_mrr_at_20_diff1\n value: 20.04819461810257\n - type: nauc_mrr_at_20_max\n value: -9.518368283588936\n - type: nauc_mrr_at_20_std\n value: -17.831608149836136\n - type: nauc_mrr_at_3_diff1\n value: 19.8571785245832\n - type: nauc_mrr_at_3_max\n value: -9.464375021240478\n - type: nauc_mrr_at_3_std\n value: -17.728533927330453\n - type: nauc_mrr_at_5_diff1\n value: 19.670313652167827\n - type: nauc_mrr_at_5_max\n value: -8.966372585728434\n - type: nauc_mrr_at_5_std\n value: -17.468955834324817\n - type: nauc_ndcg_at_1000_diff1\n value: 21.863049281767417\n - type: nauc_ndcg_at_1000_max\n value: -8.18698520924057\n - type: nauc_ndcg_at_1000_std\n value: -17.634483364794804\n - type: nauc_ndcg_at_100_diff1\n value: 21.849924385738586\n - type: nauc_ndcg_at_100_max\n value: -8.226437560889345\n - type: nauc_ndcg_at_100_std\n value: -17.774648478087002\n - type: nauc_ndcg_at_10_diff1\n value: 19.888395590413573\n - type: nauc_ndcg_at_10_max\n value: -8.968706085632382\n - type: nauc_ndcg_at_10_std\n value: -19.31386964628115\n - type: nauc_ndcg_at_1_diff1\n value: 26.23276644969203\n - type: nauc_ndcg_at_1_max\n value: -12.376511389571245\n - type: nauc_ndcg_at_1_std\n value: -18.11411715207284\n - type: nauc_ndcg_at_20_diff1\n value: 21.38413342416933\n - type: nauc_ndcg_at_20_max\n value: -7.636238194084164\n - type: nauc_ndcg_at_20_std\n value: -17.946390844693028\n - type: nauc_ndcg_at_3_diff1\n value: 21.29169165029195\n - type: nauc_ndcg_at_3_max\n value: -6.793840499730093\n - type: nauc_ndcg_at_3_std\n value: -17.52359001586737\n - type: nauc_ndcg_at_5_diff1\n value: 20.238297656671364\n - type: nauc_ndcg_at_5_max\n value: -6.424992706950072\n - type: nauc_ndcg_at_5_std\n value: -17.082391132291356\n - type: nauc_precision_at_1000_diff1\n value: -7.05195108528572\n - type: nauc_precision_at_1000_max\n value: 34.439879624882145\n - type: nauc_precision_at_1000_std\n value: 68.72436351659353\n - type: nauc_precision_at_100_diff1\n value: -2.769464113932605\n - type: nauc_precision_at_100_max\n value: 9.89562961226698\n - type: nauc_precision_at_100_std\n value: -0.5880967482224028\n - type: nauc_precision_at_10_diff1\n value: 2.1371544726832323\n - type: nauc_precision_at_10_max\n value: -11.93051325147756\n - type: nauc_precision_at_10_std\n value: -30.83144187392059\n - type: nauc_precision_at_1_diff1\n value: 26.23276644969203\n - type: nauc_precision_at_1_max\n value: -12.376511389571245\n - type: nauc_precision_at_1_std\n value: -18.11411715207284\n - type: nauc_precision_at_20_diff1\n value: 3.780146814257504\n - type: nauc_precision_at_20_max\n value: 17.06527540214615\n - type: nauc_precision_at_20_std\n value: -20.36832563035565\n - type: nauc_precision_at_3_diff1\n value: 17.63894384012077\n - type: nauc_precision_at_3_max\n value: -2.0220490624638887\n - type: nauc_precision_at_3_std\n value: -17.285601413493918\n - type: nauc_precision_at_5_diff1\n value: 12.557855071944601\n - type: nauc_precision_at_5_max\n value: 0.5840236463956658\n - type: nauc_precision_at_5_std\n value: -15.827224420217846\n - type: nauc_recall_at_1000_diff1\n value: -7.051951085286463\n - type: nauc_recall_at_1000_max\n value: 34.43987962487738\n - type: nauc_recall_at_1000_std\n value: 68.724363516591\n - type: nauc_recall_at_100_diff1\n value: -2.769464113930314\n - type: nauc_recall_at_100_max\n value: 9.895629612270017\n - type: nauc_recall_at_100_std\n value: -0.58809674821745\n - type: nauc_recall_at_10_diff1\n value: 2.1371544726834495\n - type: nauc_recall_at_10_max\n value: -11.930513251477253\n - type: nauc_recall_at_10_std\n value: -30.83144187392047\n - type: nauc_recall_at_1_diff1\n value: 26.23276644969203\n - type: nauc_recall_at_1_max\n value: -12.376511389571245\n - type: nauc_recall_at_1_std\n value: -18.11411715207284\n - type: nauc_recall_at_20_diff1\n value: 3.7801468142575922\n - type: nauc_recall_at_20_max\n value: 17.0652754021456\n - type: nauc_recall_at_20_std\n value: -20.36832563035559\n - type: nauc_recall_at_3_diff1\n value: 17.63894384012074\n - type: nauc_recall_at_3_max\n value: -2.02204906246383\n - type: nauc_recall_at_3_std\n value: -17.28560141349386\n - type: nauc_recall_at_5_diff1\n value: 12.55785507194463\n - type: nauc_recall_at_5_max\n value: 0.5840236463957296\n - type: nauc_recall_at_5_std\n value: -15.827224420217856\n - type: ndcg_at_1\n value: 40.398\n - type: ndcg_at_10\n value: 64.24\n - type: ndcg_at_100\n value: 66.631\n - type: ndcg_at_1000\n value: 66.65100000000001\n - type: ndcg_at_20\n value: 66.086\n - type: ndcg_at_3\n value: 55.938\n - type: ndcg_at_5\n value: 60.370000000000005\n - type: precision_at_1\n value: 40.398\n - type: precision_at_10\n value: 8.962\n - type: precision_at_100\n value: 0.9950000000000001\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_20\n value: 4.836\n - type: precision_at_3\n value: 22.262\n - type: precision_at_5\n value: 15.519\n - type: recall_at_1\n value: 40.398\n - type: recall_at_10\n value: 89.616\n - type: recall_at_100\n value: 99.502\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_20\n value: 96.72800000000001\n - type: recall_at_3\n value: 66.78500000000001\n - type: recall_at_5\n value: 77.596\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: main_score\n value: 55.1564333205451\n - type: v_measure\n value: 55.1564333205451\n - type: v_measure_std\n value: 14.696883012214512\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: main_score\n value: 49.823698316694795\n - type: v_measure\n value: 49.823698316694795\n - type: v_measure_std\n value: 14.951660654298186\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: main_score\n value: 66.15294503553424\n - type: map\n value: 66.15294503553424\n - type: mrr\n value: 78.53438420612935\n - type: nAUC_map_diff1\n value: 12.569697092717997\n - type: nAUC_map_max\n value: 21.50670312412572\n - type: nAUC_map_std\n value: 16.943786429229064\n - type: nAUC_mrr_diff1\n value: 15.590272897361238\n - type: nAUC_mrr_max\n value: 34.96072022474653\n - type: nAUC_mrr_std\n value: 21.649217605241045\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cosine_pearson\n value: 85.7824546319275\n - type: cosine_spearman\n value: 83.29587385660628\n - type: euclidean_pearson\n value: 84.58764190565167\n - type: euclidean_spearman\n value: 83.30069324352772\n - type: main_score\n value: 83.29587385660628\n - type: manhattan_pearson\n value: 84.95996839947179\n - type: manhattan_spearman\n value: 83.87480271054358\n - type: pearson\n value: 85.7824546319275\n - type: spearman\n value: 83.29587385660628\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 89.30194805194806\n - type: f1\n value: 89.26182507266391\n - type: f1_weighted\n value: 89.26182507266391\n - type: main_score\n value: 89.30194805194806\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: main_score\n value: 50.67972171889736\n - type: v_measure\n value: 50.67972171889736\n - type: v_measure_std\n value: 0.7687409980036303\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: main_score\n value: 45.80539715556144\n - type: v_measure\n value: 45.80539715556144\n - type: v_measure_std\n value: 0.9601346216579142\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackRetrieval\n type: mteb/cqadupstack\n config: default\n split: test\n revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4\n metrics:\n - type: main_score\n value: 44.361250000000005\n - type: map_at_1\n value: 28.304499999999997\n - type: map_at_10\n value: 38.54841666666666\n - type: map_at_100\n value: 39.83141666666667\n - type: map_at_1000\n value: 39.944750000000006\n - type: map_at_20\n value: 39.25341666666667\n - type: map_at_3\n value: 35.406749999999995\n - type: map_at_5\n value: 37.15558333333333\n - type: mrr_at_1\n value: 34.09077232860122\n - type: mrr_at_10\n value: 43.15445393211421\n - type: mrr_at_100\n value: 43.98645286848257\n - type: mrr_at_1000\n value: 44.037631313469404\n - type: mrr_at_20\n value: 43.64045813249614\n - type: mrr_at_3\n value: 40.674138648480486\n - type: mrr_at_5\n value: 42.106251182620255\n - type: nauc_map_at_1000_diff1\n value: 46.250011739434996\n - type: nauc_map_at_1000_max\n value: 30.13664446260598\n - type: nauc_map_at_1000_std\n value: 5.422301791618935\n - type: nauc_map_at_100_diff1\n value: 46.253631351999395\n - type: nauc_map_at_100_max\n value: 30.12612918885181\n - type: nauc_map_at_100_std\n value: 5.367077019987172\n - type: nauc_map_at_10_diff1\n value: 46.328171341741346\n - type: nauc_map_at_10_max\n value: 29.80274612581464\n - type: nauc_map_at_10_std\n value: 4.62996685176396\n - type: nauc_map_at_1_diff1\n value: 51.56118117729493\n - type: nauc_map_at_1_max\n value: 27.94885243863768\n - type: nauc_map_at_1_std\n value: 1.700366508927356\n - type: nauc_map_at_20_diff1\n value: 46.286750260299094\n - type: nauc_map_at_20_max\n value: 29.979205290353278\n - type: nauc_map_at_20_std\n value: 5.010588412441873\n - type: nauc_map_at_3_diff1\n value: 47.10018183619064\n - type: nauc_map_at_3_max\n value: 29.062318206078753\n - type: nauc_map_at_3_std\n value: 3.2235696254694197\n - type: nauc_map_at_5_diff1\n value: 46.41971733050039\n - type: nauc_map_at_5_max\n value: 29.456798617695657\n - type: nauc_map_at_5_std\n value: 4.0921691023077145\n - type: nauc_mrr_at_1000_diff1\n value: 45.88888977975723\n - type: nauc_mrr_at_1000_max\n value: 32.162138978089544\n - type: nauc_mrr_at_1000_std\n value: 6.2811943424217915\n - type: nauc_mrr_at_100_diff1\n value: 45.87480433011124\n - type: nauc_mrr_at_100_max\n value: 32.16011334212834\n - type: nauc_mrr_at_100_std\n value: 6.2865717772421785\n - type: nauc_mrr_at_10_diff1\n value: 45.849652904658825\n - type: nauc_mrr_at_10_max\n value: 32.13847916232293\n - type: nauc_mrr_at_10_std\n value: 6.105718728141999\n - type: nauc_mrr_at_1_diff1\n value: 51.013730325062156\n - type: nauc_mrr_at_1_max\n value: 32.77457396492779\n - type: nauc_mrr_at_1_std\n value: 4.415684893471724\n - type: nauc_mrr_at_20_diff1\n value: 45.86663046255274\n - type: nauc_mrr_at_20_max\n value: 32.15219360697865\n - type: nauc_mrr_at_20_std\n value: 6.19603046412763\n - type: nauc_mrr_at_3_diff1\n value: 46.522376582423185\n - type: nauc_mrr_at_3_max\n value: 32.18259009733714\n - type: nauc_mrr_at_3_std\n value: 5.288000648220897\n - type: nauc_mrr_at_5_diff1\n value: 45.86611481369745\n - type: nauc_mrr_at_5_max\n value: 32.14261639054921\n - type: nauc_mrr_at_5_std\n value: 5.8811238177073735\n - type: nauc_ndcg_at_1000_diff1\n value: 44.5055097547565\n - type: nauc_ndcg_at_1000_max\n value: 31.149682057975458\n - type: nauc_ndcg_at_1000_std\n value: 8.157937194901333\n - type: nauc_ndcg_at_100_diff1\n value: 44.12398363638596\n - type: nauc_ndcg_at_100_max\n value: 30.878064321409994\n - type: nauc_ndcg_at_100_std\n value: 8.40493441452808\n - type: nauc_ndcg_at_10_diff1\n value: 44.200093505221474\n - type: nauc_ndcg_at_10_max\n value: 30.15267107733158\n - type: nauc_ndcg_at_10_std\n value: 6.407495361566107\n - type: nauc_ndcg_at_1_diff1\n value: 51.013730325062156\n - type: nauc_ndcg_at_1_max\n value: 32.77457396492779\n - type: nauc_ndcg_at_1_std\n value: 4.415684893471724\n - type: nauc_ndcg_at_20_diff1\n value: 44.16988321564116\n - type: nauc_ndcg_at_20_max\n value: 30.333532500651213\n - type: nauc_ndcg_at_20_std\n value: 7.10024701386895\n - type: nauc_ndcg_at_3_diff1\n value: 45.35982873879988\n - type: nauc_ndcg_at_3_max\n value: 30.288312457948702\n - type: nauc_ndcg_at_3_std\n value: 4.653900898293395\n - type: nauc_ndcg_at_5_diff1\n value: 44.324558115380185\n - type: nauc_ndcg_at_5_max\n value: 30.048149698941373\n - type: nauc_ndcg_at_5_std\n value: 5.6684459618413205\n - type: nauc_precision_at_1000_diff1\n value: -7.282175798304458\n - type: nauc_precision_at_1000_max\n value: 7.820142031765352\n - type: nauc_precision_at_1000_std\n value: 11.736131836431172\n - type: nauc_precision_at_100_diff1\n value: 1.0222940256506976\n - type: nauc_precision_at_100_max\n value: 16.12346497070298\n - type: nauc_precision_at_100_std\n value: 18.202607395247874\n - type: nauc_precision_at_10_diff1\n value: 18.289439185857837\n - type: nauc_precision_at_10_max\n value: 26.116517399154375\n - type: nauc_precision_at_10_std\n value: 13.921214069982302\n - type: nauc_precision_at_1_diff1\n value: 51.013730325062156\n - type: nauc_precision_at_1_max\n value: 32.77457396492779\n - type: nauc_precision_at_1_std\n value: 4.415684893471724\n - type: nauc_precision_at_20_diff1\n value: 12.365165405210886\n - type: nauc_precision_at_20_max\n value: 22.946297258937367\n - type: nauc_precision_at_20_std\n value: 16.13862870358933\n - type: nauc_precision_at_3_diff1\n value: 32.063423642849685\n - type: nauc_precision_at_3_max\n value: 30.140965811989407\n - type: nauc_precision_at_3_std\n value: 8.501746262550146\n - type: nauc_precision_at_5_diff1\n value: 24.777203357717948\n - type: nauc_precision_at_5_max\n value: 28.401579566848472\n - type: nauc_precision_at_5_std\n value: 11.643246774390914\n - type: nauc_recall_at_1000_diff1\n value: 30.04216463401409\n - type: nauc_recall_at_1000_max\n value: 34.98067760563842\n - type: nauc_recall_at_1000_std\n value: 48.01453905250591\n - type: nauc_recall_at_100_diff1\n value: 31.193415507513972\n - type: nauc_recall_at_100_max\n value: 28.69740149270981\n - type: nauc_recall_at_100_std\n value: 25.20960758920368\n - type: nauc_recall_at_10_diff1\n value: 36.18870823636506\n - type: nauc_recall_at_10_max\n value: 26.005625231341238\n - type: nauc_recall_at_10_std\n value: 8.891983977041376\n - type: nauc_recall_at_1_diff1\n value: 51.56118117729493\n - type: nauc_recall_at_1_max\n value: 27.94885243863768\n - type: nauc_recall_at_1_std\n value: 1.700366508927356\n - type: nauc_recall_at_20_diff1\n value: 34.93996118564803\n - type: nauc_recall_at_20_max\n value: 26.149961715956138\n - type: nauc_recall_at_20_std\n value: 12.0657502367633\n - type: nauc_recall_at_3_diff1\n value: 40.80743946709512\n - type: nauc_recall_at_3_max\n value: 26.443127773025783\n - type: nauc_recall_at_3_std\n value: 3.7011448604241477\n - type: nauc_recall_at_5_diff1\n value: 37.608535157055776\n - type: nauc_recall_at_5_max\n value: 26.168016189725822\n - type: nauc_recall_at_5_std\n value: 6.344191564595316\n - type: ndcg_at_1\n value: 34.09083333333333\n - type: ndcg_at_10\n value: 44.361250000000005\n - type: ndcg_at_100\n value: 49.586166666666664\n - type: ndcg_at_1000\n value: 51.623583333333336\n - type: ndcg_at_20\n value: 46.40158333333333\n - type: ndcg_at_3\n value: 39.27733333333333\n - type: ndcg_at_5\n value: 41.662333333333336\n - type: precision_at_1\n value: 34.09083333333333\n - type: precision_at_10\n value: 7.957000000000002\n - type: precision_at_100\n value: 1.2521666666666669\n - type: precision_at_1000\n value: 0.16125\n - type: precision_at_20\n value: 4.6755\n - type: precision_at_3\n value: 18.402083333333334\n - type: precision_at_5\n value: 13.104333333333335\n - type: recall_at_1\n value: 28.304499999999997\n - type: recall_at_10\n value: 56.80666666666667\n - type: recall_at_100\n value: 79.66208333333334\n - type: recall_at_1000\n value: 93.6455\n - type: recall_at_20\n value: 64.2495\n - type: recall_at_3\n value: 42.431333333333335\n - type: recall_at_5\n value: 48.665416666666665\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: mteb/climate-fever\n config: default\n split: test\n revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380\n metrics:\n - type: main_score\n value: 43.525999999999996\n - type: map_at_1\n value: 19.291\n - type: map_at_10\n value: 33.471000000000004\n - type: map_at_100\n value: 35.388999999999996\n - type: map_at_1000\n value: 35.568\n - type: map_at_20\n value: 34.496\n - type: map_at_3\n value: 28.713\n - type: map_at_5\n value: 31.384\n - type: mrr_at_1\n value: 43.77850162866449\n - type: mrr_at_10\n value: 56.28576598934912\n - type: mrr_at_100\n value: 56.8588518168194\n - type: mrr_at_1000\n value: 56.878236725973544\n - type: mrr_at_20\n value: 56.6409328120183\n - type: mrr_at_3\n value: 53.56134636264935\n - type: mrr_at_5\n value: 55.27795874049956\n - type: nauc_map_at_1000_diff1\n value: 27.262513153363876\n - type: nauc_map_at_1000_max\n value: 40.099398684385584\n - type: nauc_map_at_1000_std\n value: 18.847812394005512\n - type: nauc_map_at_100_diff1\n value: 27.238993503030745\n - type: nauc_map_at_100_max\n value: 40.07730434492169\n - type: nauc_map_at_100_std\n value: 18.795349250833684\n - type: nauc_map_at_10_diff1\n value: 27.70929180366227\n - type: nauc_map_at_10_max\n value: 39.55987024970173\n - type: nauc_map_at_10_std\n value: 17.214881544648996\n - type: nauc_map_at_1_diff1\n value: 43.34155892182403\n - type: nauc_map_at_1_max\n value: 38.23324890148018\n - type: nauc_map_at_1_std\n value: 6.0781444393516075\n - type: nauc_map_at_20_diff1\n value: 27.311577477800103\n - type: nauc_map_at_20_max\n value: 39.624414083413456\n - type: nauc_map_at_20_std\n value: 18.149811054163287\n - type: nauc_map_at_3_diff1\n value: 30.475965062734367\n - type: nauc_map_at_3_max\n value: 38.49324825043695\n - type: nauc_map_at_3_std\n value: 13.357656038648487\n - type: nauc_map_at_5_diff1\n value: 28.425110095017747\n - type: nauc_map_at_5_max\n value: 39.017894870747796\n - type: nauc_map_at_5_std\n value: 15.543817194122564\n - type: nauc_mrr_at_1000_diff1\n value: 33.16689354701644\n - type: nauc_mrr_at_1000_max\n value: 41.70755363247148\n - type: nauc_mrr_at_1000_std\n value: 24.61667417463176\n - type: nauc_mrr_at_100_diff1\n value: 33.147229262917506\n - type: nauc_mrr_at_100_max\n value: 41.712455697170725\n - type: nauc_mrr_at_100_std\n value: 24.6418922043652\n - type: nauc_mrr_at_10_diff1\n value: 32.94185191112572\n - type: nauc_mrr_at_10_max\n value: 41.64272730141954\n - type: nauc_mrr_at_10_std\n value: 24.663391015702707\n - type: nauc_mrr_at_1_diff1\n value: 39.571969559016395\n - type: nauc_mrr_at_1_max\n value: 39.396249211263495\n - type: nauc_mrr_at_1_std\n value: 16.984149923258357\n - type: nauc_mrr_at_20_diff1\n value: 33.10040770334742\n - type: nauc_mrr_at_20_max\n value: 41.807565560083034\n - type: nauc_mrr_at_20_std\n value: 24.8064180365271\n - type: nauc_mrr_at_3_diff1\n value: 33.065406161485704\n - type: nauc_mrr_at_3_max\n value: 41.049510969934694\n - type: nauc_mrr_at_3_std\n value: 23.18371458928609\n - type: nauc_mrr_at_5_diff1\n value: 33.2389593543916\n - type: nauc_mrr_at_5_max\n value: 41.629486918949915\n - type: nauc_mrr_at_5_std\n value: 24.5777253036149\n - type: nauc_ndcg_at_1000_diff1\n value: 25.868840609197637\n - type: nauc_ndcg_at_1000_max\n value: 42.79564910784761\n - type: nauc_ndcg_at_1000_std\n value: 27.035091271680113\n - type: nauc_ndcg_at_100_diff1\n value: 25.019789319579942\n - type: nauc_ndcg_at_100_max\n value: 42.482345143533735\n - type: nauc_ndcg_at_100_std\n value: 26.76872010731345\n - type: nauc_ndcg_at_10_diff1\n value: 25.949464660653238\n - type: nauc_ndcg_at_10_max\n value: 40.79769544643906\n - type: nauc_ndcg_at_10_std\n value: 22.486116508973204\n - type: nauc_ndcg_at_1_diff1\n value: 39.571969559016395\n - type: nauc_ndcg_at_1_max\n value: 39.396249211263495\n - type: nauc_ndcg_at_1_std\n value: 16.984149923258357\n - type: nauc_ndcg_at_20_diff1\n value: 25.173455685962214\n - type: nauc_ndcg_at_20_max\n value: 40.88873540662413\n - type: nauc_ndcg_at_20_std\n value: 24.4451041955519\n - type: nauc_ndcg_at_3_diff1\n value: 28.185416070726333\n - type: nauc_ndcg_at_3_max\n value: 39.10600031163912\n - type: nauc_ndcg_at_3_std\n value: 18.42694044215541\n - type: nauc_ndcg_at_5_diff1\n value: 27.112647584005583\n - type: nauc_ndcg_at_5_max\n value: 40.154045682322526\n - type: nauc_ndcg_at_5_std\n value: 20.26822517176828\n - type: nauc_precision_at_1000_diff1\n value: -16.42087927044017\n - type: nauc_precision_at_1000_max\n value: 3.5326295053913\n - type: nauc_precision_at_1000_std\n value: 24.406810708493197\n - type: nauc_precision_at_100_diff1\n value: -12.17648135724982\n - type: nauc_precision_at_100_max\n value: 15.895489260126183\n - type: nauc_precision_at_100_std\n value: 32.48346122610907\n - type: nauc_precision_at_10_diff1\n value: -1.2493131347748072\n - type: nauc_precision_at_10_max\n value: 26.409459305604376\n - type: nauc_precision_at_10_std\n value: 31.115432019300016\n - type: nauc_precision_at_1_diff1\n value: 39.571969559016395\n - type: nauc_precision_at_1_max\n value: 39.396249211263495\n - type: nauc_precision_at_1_std\n value: 16.984149923258357\n - type: nauc_precision_at_20_diff1\n value: -6.597509397240593\n - type: nauc_precision_at_20_max\n value: 21.461984620659695\n - type: nauc_precision_at_20_std\n value: 32.9450259748889\n - type: nauc_precision_at_3_diff1\n value: 9.46378764865453\n - type: nauc_precision_at_3_max\n value: 32.03650819375425\n - type: nauc_precision_at_3_std\n value: 26.489382638510765\n - type: nauc_precision_at_5_diff1\n value: 3.5987036728169537\n - type: nauc_precision_at_5_max\n value: 30.633955978579703\n - type: nauc_precision_at_5_std\n value: 30.532430088014443\n - type: nauc_recall_at_1000_diff1\n value: 10.714633106872254\n - type: nauc_recall_at_1000_max\n value: 43.94958623961\n - type: nauc_recall_at_1000_std\n value: 51.78914468954123\n - type: nauc_recall_at_100_diff1\n value: 9.63781472255557\n - type: nauc_recall_at_100_max\n value: 38.50917465255336\n - type: nauc_recall_at_100_std\n value: 37.78623984642377\n - type: nauc_recall_at_10_diff1\n value: 16.480342820841688\n - type: nauc_recall_at_10_max\n value: 35.982566867357406\n - type: nauc_recall_at_10_std\n value: 23.30688188788895\n - type: nauc_recall_at_1_diff1\n value: 43.34155892182403\n - type: nauc_recall_at_1_max\n value: 38.23324890148018\n - type: nauc_recall_at_1_std\n value: 6.0781444393516075\n - type: nauc_recall_at_20_diff1\n value: 13.521048985146367\n - type: nauc_recall_at_20_max\n value: 34.62462209239834\n - type: nauc_recall_at_20_std\n value: 27.85924191501618\n - type: nauc_recall_at_3_diff1\n value: 23.57032748533523\n - type: nauc_recall_at_3_max\n value: 36.32703197635613\n - type: nauc_recall_at_3_std\n value: 15.730238734014337\n - type: nauc_recall_at_5_diff1\n value: 19.61387036368584\n - type: nauc_recall_at_5_max\n value: 36.22030835529556\n - type: nauc_recall_at_5_std\n value: 19.76310648649897\n - type: ndcg_at_1\n value: 43.779\n - type: ndcg_at_10\n value: 43.525999999999996\n - type: ndcg_at_100\n value: 50.138000000000005\n - type: ndcg_at_1000\n value: 52.991\n - type: ndcg_at_20\n value: 46.083\n - type: ndcg_at_3\n value: 38.002\n - type: ndcg_at_5\n value: 39.842\n - type: precision_at_1\n value: 43.779\n - type: precision_at_10\n value: 13.205\n - type: precision_at_100\n value: 2.051\n - type: precision_at_1000\n value: 0.259\n - type: precision_at_20\n value: 7.722999999999999\n - type: precision_at_3\n value: 28.903000000000002\n - type: precision_at_5\n value: 21.368000000000002\n - type: recall_at_1\n value: 19.291\n - type: recall_at_10\n value: 48.754\n - type: recall_at_100\n value: 70.97200000000001\n - type: recall_at_1000\n value: 86.611\n - type: recall_at_20\n value: 55.884\n - type: recall_at_3\n value: 34.101\n - type: recall_at_5\n value: 40.784\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: mteb/dbpedia\n config: default\n split: test\n revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659\n metrics:\n - type: main_score\n value: 49.884\n - type: map_at_1\n value: 9.913\n - type: map_at_10\n value: 23.186999999999998\n - type: map_at_100\n value: 34.207\n - type: map_at_1000\n value: 36.318\n - type: map_at_20\n value: 27.419\n - type: map_at_3\n value: 15.656\n - type: map_at_5\n value: 18.945999999999998\n - type: mrr_at_1\n value: 75.75\n - type: mrr_at_10\n value: 82.16279761904761\n - type: mrr_at_100\n value: 82.48445635330299\n - type: mrr_at_1000\n value: 82.4870246719901\n - type: mrr_at_20\n value: 82.36203632968338\n - type: mrr_at_3\n value: 81.29166666666666\n - type: mrr_at_5\n value: 82.02916666666667\n - type: nauc_map_at_1000_diff1\n value: 17.0739966990996\n - type: nauc_map_at_1000_max\n value: 28.440065298437133\n - type: nauc_map_at_1000_std\n value: 20.83498154003865\n - type: nauc_map_at_100_diff1\n value: 17.75982086107111\n - type: nauc_map_at_100_max\n value: 26.87850835673573\n - type: nauc_map_at_100_std\n value: 18.350282298599275\n - type: nauc_map_at_10_diff1\n value: 17.15984258564116\n - type: nauc_map_at_10_max\n value: 10.846179132675553\n - type: nauc_map_at_10_std\n value: -6.263534464094614\n - type: nauc_map_at_1_diff1\n value: 24.014897777973694\n - type: nauc_map_at_1_max\n value: -4.556638938723358\n - type: nauc_map_at_1_std\n value: -22.7844467526989\n - type: nauc_map_at_20_diff1\n value: 16.3179372493187\n - type: nauc_map_at_20_max\n value: 17.176378915498915\n - type: nauc_map_at_20_std\n value: 1.9378637630340372\n - type: nauc_map_at_3_diff1\n value: 19.12786794046792\n - type: nauc_map_at_3_max\n value: 0.09063919305677291\n - type: nauc_map_at_3_std\n value: -16.713143158330492\n - type: nauc_map_at_5_diff1\n value: 18.76504725420023\n - type: nauc_map_at_5_max\n value: 5.040867712207419\n - type: nauc_map_at_5_std\n value: -12.382578318931165\n - type: nauc_mrr_at_1000_diff1\n value: 54.61266255011247\n - type: nauc_mrr_at_1000_max\n value: 60.83961280977112\n - type: nauc_mrr_at_1000_std\n value: 32.70429260443016\n - type: nauc_mrr_at_100_diff1\n value: 54.61346236538542\n - type: nauc_mrr_at_100_max\n value: 60.8407974416647\n - type: nauc_mrr_at_100_std\n value: 32.69272843993462\n - type: nauc_mrr_at_10_diff1\n value: 54.74633685810871\n - type: nauc_mrr_at_10_max\n value: 61.084525933097865\n - type: nauc_mrr_at_10_std\n value: 33.001220210025565\n - type: nauc_mrr_at_1_diff1\n value: 56.12708423835806\n - type: nauc_mrr_at_1_max\n value: 58.9314540998289\n - type: nauc_mrr_at_1_std\n value: 27.39422607651012\n - type: nauc_mrr_at_20_diff1\n value: 54.58896150245695\n - type: nauc_mrr_at_20_max\n value: 60.890929983464815\n - type: nauc_mrr_at_20_std\n value: 32.65559641276393\n - type: nauc_mrr_at_3_diff1\n value: 54.38229071443791\n - type: nauc_mrr_at_3_max\n value: 59.987849044098596\n - type: nauc_mrr_at_3_std\n value: 33.439813880719974\n - type: nauc_mrr_at_5_diff1\n value: 54.961790262449824\n - type: nauc_mrr_at_5_max\n value: 61.17705173908951\n - type: nauc_mrr_at_5_std\n value: 33.30939850734856\n - type: nauc_ndcg_at_1000_diff1\n value: 29.27465932507067\n - type: nauc_ndcg_at_1000_max\n value: 47.952543312315214\n - type: nauc_ndcg_at_1000_std\n value: 36.17132236391485\n - type: nauc_ndcg_at_100_diff1\n value: 28.63072328980134\n - type: nauc_ndcg_at_100_max\n value: 41.460833419186564\n - type: nauc_ndcg_at_100_std\n value: 27.157100358988135\n - type: nauc_ndcg_at_10_diff1\n value: 23.41488013023301\n - type: nauc_ndcg_at_10_max\n value: 39.27798133072349\n - type: nauc_ndcg_at_10_std\n value: 21.979241438928312\n - type: nauc_ndcg_at_1_diff1\n value: 46.12120543657642\n - type: nauc_ndcg_at_1_max\n value: 47.28452124039853\n - type: nauc_ndcg_at_1_std\n value: 19.799884708952543\n - type: nauc_ndcg_at_20_diff1\n value: 23.627669045115574\n - type: nauc_ndcg_at_20_max\n value: 35.88225062457673\n - type: nauc_ndcg_at_20_std\n value: 18.218628030529498\n - type: nauc_ndcg_at_3_diff1\n value: 25.37309228946118\n - type: nauc_ndcg_at_3_max\n value: 40.64426332992231\n - type: nauc_ndcg_at_3_std\n value: 24.608330645901482\n - type: nauc_ndcg_at_5_diff1\n value: 24.055798594999654\n - type: nauc_ndcg_at_5_max\n value: 41.16180524175431\n - type: nauc_ndcg_at_5_std\n value: 24.048305528761315\n - type: nauc_precision_at_1000_diff1\n value: -18.234943251015576\n - type: nauc_precision_at_1000_max\n value: 0.48708502364659184\n - type: nauc_precision_at_1000_std\n value: 2.4473601543134027\n - type: nauc_precision_at_100_diff1\n value: -3.0077810947381227\n - type: nauc_precision_at_100_max\n value: 25.27249321108913\n - type: nauc_precision_at_100_std\n value: 37.36575792126928\n - type: nauc_precision_at_10_diff1\n value: -0.2393778190297635\n - type: nauc_precision_at_10_max\n value: 36.40513293547299\n - type: nauc_precision_at_10_std\n value: 37.4827885766009\n - type: nauc_precision_at_1_diff1\n value: 56.12708423835806\n - type: nauc_precision_at_1_max\n value: 58.9314540998289\n - type: nauc_precision_at_1_std\n value: 27.39422607651012\n - type: nauc_precision_at_20_diff1\n value: -1.2010133229402933\n - type: nauc_precision_at_20_max\n value: 34.117541814385966\n - type: nauc_precision_at_20_std\n value: 39.13273254177449\n - type: nauc_precision_at_3_diff1\n value: 11.757378092198486\n - type: nauc_precision_at_3_max\n value: 42.637962482588875\n - type: nauc_precision_at_3_std\n value: 37.42465077352342\n - type: nauc_precision_at_5_diff1\n value: 7.233177203405101\n - type: nauc_precision_at_5_max\n value: 43.1663582897407\n - type: nauc_precision_at_5_std\n value: 38.848449220750055\n - type: nauc_recall_at_1000_diff1\n value: 27.33938551969145\n - type: nauc_recall_at_1000_max\n value: 45.5614254479334\n - type: nauc_recall_at_1000_std\n value: 50.58528916250458\n - type: nauc_recall_at_100_diff1\n value: 23.610383761920097\n - type: nauc_recall_at_100_max\n value: 31.422168485847184\n - type: nauc_recall_at_100_std\n value: 25.58649926458304\n - type: nauc_recall_at_10_diff1\n value: 14.62495111808408\n - type: nauc_recall_at_10_max\n value: 7.4295041277681095\n - type: nauc_recall_at_10_std\n value: -9.32297089600654\n - type: nauc_recall_at_1_diff1\n value: 24.014897777973694\n - type: nauc_recall_at_1_max\n value: -4.556638938723358\n - type: nauc_recall_at_1_std\n value: -22.7844467526989\n - type: nauc_recall_at_20_diff1\n value: 14.027862330014662\n - type: nauc_recall_at_20_max\n value: 12.437478731690844\n - type: nauc_recall_at_20_std\n value: -3.0740743798103676\n - type: nauc_recall_at_3_diff1\n value: 16.354018356566712\n - type: nauc_recall_at_3_max\n value: -2.9812231240997917\n - type: nauc_recall_at_3_std\n value: -18.27746460743442\n - type: nauc_recall_at_5_diff1\n value: 16.81486583473587\n - type: nauc_recall_at_5_max\n value: 2.420128513974744\n - type: nauc_recall_at_5_std\n value: -14.441820321214108\n - type: ndcg_at_1\n value: 63.87500000000001\n - type: ndcg_at_10\n value: 49.884\n - type: ndcg_at_100\n value: 54.738\n - type: ndcg_at_1000\n value: 61.635\n - type: ndcg_at_20\n value: 48.894999999999996\n - type: ndcg_at_3\n value: 54.287\n - type: ndcg_at_5\n value: 52.40899999999999\n - type: precision_at_1\n value: 75.75\n - type: precision_at_10\n value: 40.9\n - type: precision_at_100\n value: 13.139999999999999\n - type: precision_at_1000\n value: 2.533\n - type: precision_at_20\n value: 30.8\n - type: precision_at_3\n value: 57.667\n - type: precision_at_5\n value: 51.05\n - type: recall_at_1\n value: 9.913\n - type: recall_at_10\n value: 28.591\n - type: recall_at_100\n value: 61.017999999999994\n - type: recall_at_1000\n value: 83.383\n - type: recall_at_20\n value: 37.834\n - type: recall_at_3\n value: 17.049\n - type: recall_at_5\n value: 21.685\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 78.77499999999999\n - type: f1\n value: 73.74058240799386\n - type: f1_weighted\n value: 79.78804377638227\n - type: main_score\n value: 78.77499999999999\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: mteb/fever\n config: default\n split: test\n revision: bea83ef9e8fb933d90a2f1d5515737465d613e12\n metrics:\n - type: main_score\n value: 90.986\n - type: map_at_1\n value: 81.601\n - type: map_at_10\n value: 88.242\n - type: map_at_100\n value: 88.46000000000001\n - type: map_at_1000\n value: 88.472\n - type: map_at_20\n value: 88.375\n - type: map_at_3\n value: 87.237\n - type: map_at_5\n value: 87.85300000000001\n - type: mrr_at_1\n value: 87.81878187818782\n - type: mrr_at_10\n value: 92.20301196786335\n - type: mrr_at_100\n value: 92.24884236673292\n - type: mrr_at_1000\n value: 92.2496338899362\n - type: mrr_at_20\n value: 92.23112073283473\n - type: mrr_at_3\n value: 91.77417741774165\n - type: mrr_at_5\n value: 92.03970397039689\n - type: nauc_map_at_1000_diff1\n value: 56.54670664910505\n - type: nauc_map_at_1000_max\n value: 33.08375749975477\n - type: nauc_map_at_1000_std\n value: 2.7491595418252865\n - type: nauc_map_at_100_diff1\n value: 56.50887688686924\n - type: nauc_map_at_100_max\n value: 33.075487189958494\n - type: nauc_map_at_100_std\n value: 2.7675869969253375\n - type: nauc_map_at_10_diff1\n value: 56.08080806610569\n - type: nauc_map_at_10_max\n value: 32.776972098819066\n - type: nauc_map_at_10_std\n value: 2.5904846711290097\n - type: nauc_map_at_1_diff1\n value: 60.645344065853145\n - type: nauc_map_at_1_max\n value: 31.232776777514797\n - type: nauc_map_at_1_std\n value: -1.1946138176109171\n - type: nauc_map_at_20_diff1\n value: 56.28378454162355\n - type: nauc_map_at_20_max\n value: 32.98207150385811\n - type: nauc_map_at_20_std\n value: 2.8469814040214025\n - type: nauc_map_at_3_diff1\n value: 55.81958007095375\n - type: nauc_map_at_3_max\n value: 31.602707711038313\n - type: nauc_map_at_3_std\n value: 0.8117019292273401\n - type: nauc_map_at_5_diff1\n value: 55.706025752316535\n - type: nauc_map_at_5_max\n value: 32.16032683604737\n - type: nauc_map_at_5_std\n value: 1.8853201503498669\n - type: nauc_mrr_at_1000_diff1\n value: 75.4997173366251\n - type: nauc_mrr_at_1000_max\n value: 41.49117135484116\n - type: nauc_mrr_at_1000_std\n value: -2.0636172883680852\n - type: nauc_mrr_at_100_diff1\n value: 75.50118860648519\n - type: nauc_mrr_at_100_max\n value: 41.49490161517194\n - type: nauc_mrr_at_100_std\n value: -2.057024385178682\n - type: nauc_mrr_at_10_diff1\n value: 75.47295153099428\n - type: nauc_mrr_at_10_max\n value: 41.55003304042536\n - type: nauc_mrr_at_10_std\n value: -2.0353663198929253\n - type: nauc_mrr_at_1_diff1\n value: 76.632058433229\n - type: nauc_mrr_at_1_max\n value: 39.754483718891656\n - type: nauc_mrr_at_1_std\n value: -2.962241058101701\n - type: nauc_mrr_at_20_diff1\n value: 75.47221882396194\n - type: nauc_mrr_at_20_max\n value: 41.50779280480839\n - type: nauc_mrr_at_20_std\n value: -1.9620212266426307\n - type: nauc_mrr_at_3_diff1\n value: 75.5682297897137\n - type: nauc_mrr_at_3_max\n value: 41.53543801506081\n - type: nauc_mrr_at_3_std\n value: -3.391681195945978\n - type: nauc_mrr_at_5_diff1\n value: 75.37562775183947\n - type: nauc_mrr_at_5_max\n value: 41.42028509006753\n - type: nauc_mrr_at_5_std\n value: -2.418698675622726\n - type: nauc_ndcg_at_1000_diff1\n value: 59.364557011624\n - type: nauc_ndcg_at_1000_max\n value: 35.4112238125149\n - type: nauc_ndcg_at_1000_std\n value: 3.717516193303376\n - type: nauc_ndcg_at_100_diff1\n value: 58.55706703023122\n - type: nauc_ndcg_at_100_max\n value: 35.352285999934594\n - type: nauc_ndcg_at_100_std\n value: 4.273437944266781\n - type: nauc_ndcg_at_10_diff1\n value: 56.77422701267037\n - type: nauc_ndcg_at_10_max\n value: 34.24909893882957\n - type: nauc_ndcg_at_10_std\n value: 4.178151434006727\n - type: nauc_ndcg_at_1_diff1\n value: 76.632058433229\n - type: nauc_ndcg_at_1_max\n value: 39.754483718891656\n - type: nauc_ndcg_at_1_std\n value: -2.962241058101701\n - type: nauc_ndcg_at_20_diff1\n value: 57.27343398231262\n - type: nauc_ndcg_at_20_max\n value: 34.7416626740278\n - type: nauc_ndcg_at_20_std\n value: 4.955858766014002\n - type: nauc_ndcg_at_3_diff1\n value: 57.69267803121093\n - type: nauc_ndcg_at_3_max\n value: 33.13744317023105\n - type: nauc_ndcg_at_3_std\n value: 0.40380284030057023\n - type: nauc_ndcg_at_5_diff1\n value: 56.57461019113917\n - type: nauc_ndcg_at_5_max\n value: 33.244657840804386\n - type: nauc_ndcg_at_5_std\n value: 2.5121440827702046\n - type: nauc_precision_at_1000_diff1\n value: -14.54492513449718\n - type: nauc_precision_at_1000_max\n value: -5.94552147573623\n - type: nauc_precision_at_1000_std\n value: 1.2446209816057374\n - type: nauc_precision_at_100_diff1\n value: -15.452676132568344\n - type: nauc_precision_at_100_max\n value: -3.760241749847617\n - type: nauc_precision_at_100_std\n value: 4.623534605290865\n - type: nauc_precision_at_10_diff1\n value: -12.712908026086176\n - type: nauc_precision_at_10_max\n value: 0.45241316994816805\n - type: nauc_precision_at_10_std\n value: 7.849478570138391\n - type: nauc_precision_at_1_diff1\n value: 76.632058433229\n - type: nauc_precision_at_1_max\n value: 39.754483718891656\n - type: nauc_precision_at_1_std\n value: -2.962241058101701\n - type: nauc_precision_at_20_diff1\n value: -14.514618673172041\n - type: nauc_precision_at_20_max\n value: -1.113635490621818\n - type: nauc_precision_at_20_std\n value: 8.599811730457576\n - type: nauc_precision_at_3_diff1\n value: 6.1367799850003815\n - type: nauc_precision_at_3_max\n value: 8.466271950897857\n - type: nauc_precision_at_3_std\n value: 1.7458051543195068\n - type: nauc_precision_at_5_diff1\n value: -5.804548945783379\n - type: nauc_precision_at_5_max\n value: 3.4060251839074818\n - type: nauc_precision_at_5_std\n value: 5.583410511782371\n - type: nauc_recall_at_1000_diff1\n value: 19.329432953574095\n - type: nauc_recall_at_1000_max\n value: 43.260442595158736\n - type: nauc_recall_at_1000_std\n value: 53.89644660661804\n - type: nauc_recall_at_100_diff1\n value: 21.265326296051235\n - type: nauc_recall_at_100_max\n value: 38.573000195373695\n - type: nauc_recall_at_100_std\n value: 42.169391082152785\n - type: nauc_recall_at_10_diff1\n value: 29.785129558987432\n - type: nauc_recall_at_10_max\n value: 28.379657867558034\n - type: nauc_recall_at_10_std\n value: 21.132574624091973\n - type: nauc_recall_at_1_diff1\n value: 60.645344065853145\n - type: nauc_recall_at_1_max\n value: 31.232776777514797\n - type: nauc_recall_at_1_std\n value: -1.1946138176109171\n - type: nauc_recall_at_20_diff1\n value: 25.88845612373954\n - type: nauc_recall_at_20_max\n value: 30.24785945821152\n - type: nauc_recall_at_20_std\n value: 31.73911437468067\n - type: nauc_recall_at_3_diff1\n value: 42.2968464797395\n - type: nauc_recall_at_3_max\n value: 26.494318009870018\n - type: nauc_recall_at_3_std\n value: 2.6045977160467544\n - type: nauc_recall_at_5_diff1\n value: 35.81340094401374\n - type: nauc_recall_at_5_max\n value: 25.91082947510634\n - type: nauc_recall_at_5_std\n value: 9.759404930864779\n - type: ndcg_at_1\n value: 87.819\n - type: ndcg_at_10\n value: 90.986\n - type: ndcg_at_100\n value: 91.69\n - type: ndcg_at_1000\n value: 91.863\n - type: ndcg_at_20\n value: 91.293\n - type: ndcg_at_3\n value: 89.621\n - type: ndcg_at_5\n value: 90.333\n - type: precision_at_1\n value: 87.819\n - type: precision_at_10\n value: 10.753\n - type: precision_at_100\n value: 1.138\n - type: precision_at_1000\n value: 0.117\n - type: precision_at_20\n value: 5.4879999999999995\n - type: precision_at_3\n value: 33.703\n - type: precision_at_5\n value: 20.831\n - type: recall_at_1\n value: 81.601\n - type: recall_at_10\n value: 95.44200000000001\n - type: recall_at_100\n value: 98.14399999999999\n - type: recall_at_1000\n value: 99.157\n - type: recall_at_20\n value: 96.43\n - type: recall_at_3\n value: 91.729\n - type: recall_at_5\n value: 93.552\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: mteb/fiqa\n config: default\n split: test\n revision: 27a168819829fe9bcd655c2df245fb19452e8e06\n metrics:\n - type: main_score\n value: 56.056\n - type: map_at_1\n value: 28.666000000000004\n - type: map_at_10\n value: 47.437000000000005\n - type: map_at_100\n value: 49.537\n - type: map_at_1000\n value: 49.665\n - type: map_at_20\n value: 48.618\n - type: map_at_3\n value: 41.355\n - type: map_at_5\n value: 44.525\n - type: mrr_at_1\n value: 55.55555555555556\n - type: mrr_at_10\n value: 63.705173427395614\n - type: mrr_at_100\n value: 64.25449940779741\n - type: mrr_at_1000\n value: 64.27635581092147\n - type: mrr_at_20\n value: 64.03796029079103\n - type: mrr_at_3\n value: 61.49691358024688\n - type: mrr_at_5\n value: 62.73148148148143\n - type: nauc_map_at_1000_diff1\n value: 43.24282910397747\n - type: nauc_map_at_1000_max\n value: 28.506093180265644\n - type: nauc_map_at_1000_std\n value: -13.040508386155054\n - type: nauc_map_at_100_diff1\n value: 43.23650442904607\n - type: nauc_map_at_100_max\n value: 28.470565635459156\n - type: nauc_map_at_100_std\n value: -12.988098780714935\n - type: nauc_map_at_10_diff1\n value: 43.393840733087686\n - type: nauc_map_at_10_max\n value: 26.637302062720153\n - type: nauc_map_at_10_std\n value: -14.47500292113762\n - type: nauc_map_at_1_diff1\n value: 47.705150227211725\n - type: nauc_map_at_1_max\n value: 15.354189686550129\n - type: nauc_map_at_1_std\n value: -14.559819859039067\n - type: nauc_map_at_20_diff1\n value: 43.14121075706104\n - type: nauc_map_at_20_max\n value: 27.811170590408395\n - type: nauc_map_at_20_std\n value: -13.459413585283583\n - type: nauc_map_at_3_diff1\n value: 44.33938667720801\n - type: nauc_map_at_3_max\n value: 21.785619884549398\n - type: nauc_map_at_3_std\n value: -15.569980103071593\n - type: nauc_map_at_5_diff1\n value: 43.39280905665027\n - type: nauc_map_at_5_max\n value: 25.021492190645017\n - type: nauc_map_at_5_std\n value: -14.48856622187443\n - type: nauc_mrr_at_1000_diff1\n value: 52.971563939946286\n - type: nauc_mrr_at_1000_max\n value: 38.88019486172324\n - type: nauc_mrr_at_1000_std\n value: -12.412991642381616\n - type: nauc_mrr_at_100_diff1\n value: 52.978468139876945\n - type: nauc_mrr_at_100_max\n value: 38.89751787948751\n - type: nauc_mrr_at_100_std\n value: -12.3677876252269\n - type: nauc_mrr_at_10_diff1\n value: 52.78507148048174\n - type: nauc_mrr_at_10_max\n value: 38.55079809310022\n - type: nauc_mrr_at_10_std\n value: -12.944127025078755\n - type: nauc_mrr_at_1_diff1\n value: 55.52626805861546\n - type: nauc_mrr_at_1_max\n value: 40.49306809164979\n - type: nauc_mrr_at_1_std\n value: -12.886607701317681\n - type: nauc_mrr_at_20_diff1\n value: 52.9592152665678\n - type: nauc_mrr_at_20_max\n value: 38.88514014589964\n - type: nauc_mrr_at_20_std\n value: -12.434464359819444\n - type: nauc_mrr_at_3_diff1\n value: 52.73696844091174\n - type: nauc_mrr_at_3_max\n value: 38.61018727252859\n - type: nauc_mrr_at_3_std\n value: -13.123989867364166\n - type: nauc_mrr_at_5_diff1\n value: 53.037110010188\n - type: nauc_mrr_at_5_max\n value: 38.44770729849151\n - type: nauc_mrr_at_5_std\n value: -13.49318771828972\n - type: nauc_ndcg_at_1000_diff1\n value: 44.73813840091289\n - type: nauc_ndcg_at_1000_max\n value: 33.70113904685389\n - type: nauc_ndcg_at_1000_std\n value: -10.328687058192742\n - type: nauc_ndcg_at_100_diff1\n value: 44.595174119928835\n - type: nauc_ndcg_at_100_max\n value: 33.4788285112467\n - type: nauc_ndcg_at_100_std\n value: -8.695355259716946\n - type: nauc_ndcg_at_10_diff1\n value: 44.39837225263\n - type: nauc_ndcg_at_10_max\n value: 29.188289725593393\n - type: nauc_ndcg_at_10_std\n value: -13.67608323673103\n - type: nauc_ndcg_at_1_diff1\n value: 55.52626805861546\n - type: nauc_ndcg_at_1_max\n value: 40.49306809164979\n - type: nauc_ndcg_at_1_std\n value: -12.886607701317681\n - type: nauc_ndcg_at_20_diff1\n value: 44.24661739902305\n - type: nauc_ndcg_at_20_max\n value: 31.667868318249965\n - type: nauc_ndcg_at_20_std\n value: -10.65470780066342\n - type: nauc_ndcg_at_3_diff1\n value: 43.39857166975522\n - type: nauc_ndcg_at_3_max\n value: 31.764668313577495\n - type: nauc_ndcg_at_3_std\n value: -14.494866954678152\n - type: nauc_ndcg_at_5_diff1\n value: 43.16976647347281\n - type: nauc_ndcg_at_5_max\n value: 29.878329062643143\n - type: nauc_ndcg_at_5_std\n value: -13.987689089179739\n - type: nauc_precision_at_1000_diff1\n value: -9.807973252625484\n - type: nauc_precision_at_1000_max\n value: 26.6279603849494\n - type: nauc_precision_at_1000_std\n value: 7.113187103520632\n - type: nauc_precision_at_100_diff1\n value: -4.777149603323976\n - type: nauc_precision_at_100_max\n value: 31.03410463692187\n - type: nauc_precision_at_100_std\n value: 10.463144150275435\n - type: nauc_precision_at_10_diff1\n value: 8.691528703215962\n - type: nauc_precision_at_10_max\n value: 33.329579434123374\n - type: nauc_precision_at_10_std\n value: -0.8002015226329403\n - type: nauc_precision_at_1_diff1\n value: 55.52626805861546\n - type: nauc_precision_at_1_max\n value: 40.49306809164979\n - type: nauc_precision_at_1_std\n value: -12.886607701317681\n - type: nauc_precision_at_20_diff1\n value: 3.4564653474184284\n - type: nauc_precision_at_20_max\n value: 34.401070158471136\n - type: nauc_precision_at_20_std\n value: 5.813431200164549\n - type: nauc_precision_at_3_diff1\n value: 22.463219705462187\n - type: nauc_precision_at_3_max\n value: 34.77413976546924\n - type: nauc_precision_at_3_std\n value: -7.083890789741479\n - type: nauc_precision_at_5_diff1\n value: 14.011006004883154\n - type: nauc_precision_at_5_max\n value: 35.73655466853702\n - type: nauc_precision_at_5_std\n value: -2.8395172077771598\n - type: nauc_recall_at_1000_diff1\n value: 16.478046357391555\n - type: nauc_recall_at_1000_max\n value: 43.231704288282344\n - type: nauc_recall_at_1000_std\n value: 38.430684937573645\n - type: nauc_recall_at_100_diff1\n value: 30.764718344602436\n - type: nauc_recall_at_100_max\n value: 31.769050487166655\n - type: nauc_recall_at_100_std\n value: 23.48468311677149\n - type: nauc_recall_at_10_diff1\n value: 34.47339565324045\n - type: nauc_recall_at_10_max\n value: 19.054212335800454\n - type: nauc_recall_at_10_std\n value: -11.039734015330437\n - type: nauc_recall_at_1_diff1\n value: 47.705150227211725\n - type: nauc_recall_at_1_max\n value: 15.354189686550129\n - type: nauc_recall_at_1_std\n value: -14.559819859039067\n - type: nauc_recall_at_20_diff1\n value: 32.1011474016873\n - type: nauc_recall_at_20_max\n value: 25.546372988304423\n - type: nauc_recall_at_20_std\n value: -0.007233471152482897\n - type: nauc_recall_at_3_diff1\n value: 37.5708138019065\n - type: nauc_recall_at_3_max\n value: 16.66410785756736\n - type: nauc_recall_at_3_std\n value: -15.404817020108966\n - type: nauc_recall_at_5_diff1\n value: 35.714519648479595\n - type: nauc_recall_at_5_max\n value: 19.02075233009296\n - type: nauc_recall_at_5_std\n value: -13.180963359760725\n - type: ndcg_at_1\n value: 55.556000000000004\n - type: ndcg_at_10\n value: 56.056\n - type: ndcg_at_100\n value: 62.44\n - type: ndcg_at_1000\n value: 64.263\n - type: ndcg_at_20\n value: 58.638999999999996\n - type: ndcg_at_3\n value: 51.722\n - type: ndcg_at_5\n value: 52.701\n - type: precision_at_1\n value: 55.556000000000004\n - type: precision_at_10\n value: 15.679000000000002\n - type: precision_at_100\n value: 2.252\n - type: precision_at_1000\n value: 0.257\n - type: precision_at_20\n value: 9.02\n - type: precision_at_3\n value: 34.619\n - type: precision_at_5\n value: 25.093\n - type: recall_at_1\n value: 28.666000000000004\n - type: recall_at_10\n value: 63.717999999999996\n - type: recall_at_100\n value: 86.938\n - type: recall_at_1000\n value: 97.603\n - type: recall_at_20\n value: 71.649\n - type: recall_at_3\n value: 46.663\n - type: recall_at_5\n value: 53.313\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: mteb/hotpotqa\n config: default\n split: test\n revision: ab518f4d6fcca38d87c25209f94beba119d02014\n metrics:\n - type: main_score\n value: 71.74199999999999\n - type: map_at_1\n value: 41.729\n - type: map_at_10\n value: 63.168\n - type: map_at_100\n value: 64.132\n - type: map_at_1000\n value: 64.199\n - type: map_at_20\n value: 63.736000000000004\n - type: map_at_3\n value: 59.826\n - type: map_at_5\n value: 61.882000000000005\n - type: mrr_at_1\n value: 83.45712356515868\n - type: mrr_at_10\n value: 87.850342432719\n - type: mrr_at_100\n value: 88.0016320691113\n - type: mrr_at_1000\n value: 88.00576596968136\n - type: mrr_at_20\n value: 87.94463253190389\n - type: mrr_at_3\n value: 87.13706954760278\n - type: mrr_at_5\n value: 87.59419311276136\n - type: nauc_map_at_1000_diff1\n value: 13.635446621095054\n - type: nauc_map_at_1000_max\n value: 18.670632529445633\n - type: nauc_map_at_1000_std\n value: 10.444842636150575\n - type: nauc_map_at_100_diff1\n value: 13.599262398010783\n - type: nauc_map_at_100_max\n value: 18.636389405484806\n - type: nauc_map_at_100_std\n value: 10.460027483576043\n - type: nauc_map_at_10_diff1\n value: 13.235053919323942\n - type: nauc_map_at_10_max\n value: 18.252140477080047\n - type: nauc_map_at_10_std\n value: 9.9075337042203\n - type: nauc_map_at_1_diff1\n value: 76.51940497836482\n - type: nauc_map_at_1_max\n value: 51.251419487235474\n - type: nauc_map_at_1_std\n value: 0.16714896857146574\n - type: nauc_map_at_20_diff1\n value: 13.4178245722222\n - type: nauc_map_at_20_max\n value: 18.40988771210718\n - type: nauc_map_at_20_std\n value: 10.216685163366282\n - type: nauc_map_at_3_diff1\n value: 13.38370761663418\n - type: nauc_map_at_3_max\n value: 17.760962555456537\n - type: nauc_map_at_3_std\n value: 7.15741965624388\n - type: nauc_map_at_5_diff1\n value: 13.138133309724855\n - type: nauc_map_at_5_max\n value: 17.871761295251044\n - type: nauc_map_at_5_std\n value: 8.475147426940074\n - type: nauc_mrr_at_1000_diff1\n value: 75.82650818891959\n - type: nauc_mrr_at_1000_max\n value: 53.6736100668434\n - type: nauc_mrr_at_1000_std\n value: 1.8025016349213916\n - type: nauc_mrr_at_100_diff1\n value: 75.82530574210111\n - type: nauc_mrr_at_100_max\n value: 53.68067545829002\n - type: nauc_mrr_at_100_std\n value: 1.8147470536495791\n - type: nauc_mrr_at_10_diff1\n value: 75.8330135686799\n - type: nauc_mrr_at_10_max\n value: 53.78626885349077\n - type: nauc_mrr_at_10_std\n value: 1.7975782717226636\n - type: nauc_mrr_at_1_diff1\n value: 76.51940497836482\n - type: nauc_mrr_at_1_max\n value: 51.251419487235474\n - type: nauc_mrr_at_1_std\n value: 0.16714896857146574\n - type: nauc_mrr_at_20_diff1\n value: 75.82783382464166\n - type: nauc_mrr_at_20_max\n value: 53.68364567043885\n - type: nauc_mrr_at_20_std\n value: 1.742037904463963\n - type: nauc_mrr_at_3_diff1\n value: 75.6944609768663\n - type: nauc_mrr_at_3_max\n value: 53.803941340341666\n - type: nauc_mrr_at_3_std\n value: 1.1849945458077804\n - type: nauc_mrr_at_5_diff1\n value: 75.73006960604903\n - type: nauc_mrr_at_5_max\n value: 53.62223096420106\n - type: nauc_mrr_at_5_std\n value: 1.6144067563410909\n - type: nauc_ndcg_at_1000_diff1\n value: 21.58025241642726\n - type: nauc_ndcg_at_1000_max\n value: 24.675747527001153\n - type: nauc_ndcg_at_1000_std\n value: 13.075943547492718\n - type: nauc_ndcg_at_100_diff1\n value: 20.30260137544846\n - type: nauc_ndcg_at_100_max\n value: 23.757528813872018\n - type: nauc_ndcg_at_100_std\n value: 13.648994687574062\n - type: nauc_ndcg_at_10_diff1\n value: 18.995052360997818\n - type: nauc_ndcg_at_10_max\n value: 22.254260808196037\n - type: nauc_ndcg_at_10_std\n value: 11.27212390633054\n - type: nauc_ndcg_at_1_diff1\n value: 76.51940497836482\n - type: nauc_ndcg_at_1_max\n value: 51.251419487235474\n - type: nauc_ndcg_at_1_std\n value: 0.16714896857146574\n - type: nauc_ndcg_at_20_diff1\n value: 19.333742380695757\n - type: nauc_ndcg_at_20_max\n value: 22.527779834633364\n - type: nauc_ndcg_at_20_std\n value: 12.161009000707917\n - type: nauc_ndcg_at_3_diff1\n value: 20.013329040965534\n - type: nauc_ndcg_at_3_max\n value: 21.99692460311921\n - type: nauc_ndcg_at_3_std\n value: 6.8076290638386165\n - type: nauc_ndcg_at_5_diff1\n value: 19.08226315942471\n - type: nauc_ndcg_at_5_max\n value: 21.71185964294168\n - type: nauc_ndcg_at_5_std\n value: 8.671911269518214\n - type: nauc_precision_at_1000_diff1\n value: 2.4462475489446764\n - type: nauc_precision_at_1000_max\n value: 29.145662064268578\n - type: nauc_precision_at_1000_std\n value: 49.20704909525856\n - type: nauc_precision_at_100_diff1\n value: 0.11271196725540299\n - type: nauc_precision_at_100_max\n value: 17.37584606388067\n - type: nauc_precision_at_100_std\n value: 34.66099346244071\n - type: nauc_precision_at_10_diff1\n value: 2.9923183951227825\n - type: nauc_precision_at_10_max\n value: 14.261884731124264\n - type: nauc_precision_at_10_std\n value: 18.084188795498378\n - type: nauc_precision_at_1_diff1\n value: 76.51940497836482\n - type: nauc_precision_at_1_max\n value: 51.251419487235474\n - type: nauc_precision_at_1_std\n value: 0.16714896857146574\n - type: nauc_precision_at_20_diff1\n value: 1.9180293008303761\n - type: nauc_precision_at_20_max\n value: 13.832269193468512\n - type: nauc_precision_at_20_std\n value: 21.65284406055607\n - type: nauc_precision_at_3_diff1\n value: 7.226609484731811\n - type: nauc_precision_at_3_max\n value: 15.162908526977272\n - type: nauc_precision_at_3_std\n value: 8.451859972962776\n - type: nauc_precision_at_5_diff1\n value: 4.705236845538159\n - type: nauc_precision_at_5_max\n value: 14.022910843582666\n - type: nauc_precision_at_5_std\n value: 11.777269322821605\n - type: nauc_recall_at_1000_diff1\n value: 2.446247548945172\n - type: nauc_recall_at_1000_max\n value: 29.14566206426889\n - type: nauc_recall_at_1000_std\n value: 49.20704909525879\n - type: nauc_recall_at_100_diff1\n value: 0.1127119672553316\n - type: nauc_recall_at_100_max\n value: 17.37584606388062\n - type: nauc_recall_at_100_std\n value: 34.660993462440686\n - type: nauc_recall_at_10_diff1\n value: 2.9923183951227927\n - type: nauc_recall_at_10_max\n value: 14.261884731124299\n - type: nauc_recall_at_10_std\n value: 18.08418879549837\n - type: nauc_recall_at_1_diff1\n value: 76.51940497836482\n - type: nauc_recall_at_1_max\n value: 51.251419487235474\n - type: nauc_recall_at_1_std\n value: 0.16714896857146574\n - type: nauc_recall_at_20_diff1\n value: 1.918029300830432\n - type: nauc_recall_at_20_max\n value: 13.832269193468566\n - type: nauc_recall_at_20_std\n value: 21.65284406055605\n - type: nauc_recall_at_3_diff1\n value: 7.226609484731802\n - type: nauc_recall_at_3_max\n value: 15.162908526977182\n - type: nauc_recall_at_3_std\n value: 8.451859972962634\n - type: nauc_recall_at_5_diff1\n value: 4.705236845538197\n - type: nauc_recall_at_5_max\n value: 14.02291084358265\n - type: nauc_recall_at_5_std\n value: 11.777269322821638\n - type: ndcg_at_1\n value: 83.45700000000001\n - type: ndcg_at_10\n value: 71.74199999999999\n - type: ndcg_at_100\n value: 75.008\n - type: ndcg_at_1000\n value: 76.242\n - type: ndcg_at_20\n value: 73.114\n - type: ndcg_at_3\n value: 67.128\n - type: ndcg_at_5\n value: 69.645\n - type: precision_at_1\n value: 83.45700000000001\n - type: precision_at_10\n value: 14.747\n - type: precision_at_100\n value: 1.73\n - type: precision_at_1000\n value: 0.189\n - type: precision_at_20\n value: 7.8149999999999995\n - type: precision_at_3\n value: 42.323\n - type: precision_at_5\n value: 27.381\n - type: recall_at_1\n value: 41.729\n - type: recall_at_10\n value: 73.734\n - type: recall_at_100\n value: 86.502\n - type: recall_at_1000\n value: 94.60499999999999\n - type: recall_at_20\n value: 78.14999999999999\n - type: recall_at_3\n value: 63.483999999999995\n - type: recall_at_5\n value: 68.45400000000001\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 96.4904\n - type: ap\n value: 94.85481918794709\n - type: ap_weighted\n value: 94.85481918794709\n - type: f1\n value: 96.4898592305707\n - type: f1_weighted\n value: 96.4898592305707\n - type: main_score\n value: 96.4904\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: mteb/msmarco\n config: default\n split: dev\n revision: c5a29a104738b98a9e76336939199e264163d4a0\n metrics:\n - type: main_score\n value: 43.692\n - type: map_at_1\n value: 23.751\n - type: map_at_10\n value: 36.553999999999995\n - type: map_at_100\n value: 37.721\n - type: map_at_1000\n value: 37.763999999999996\n - type: map_at_20\n value: 37.289\n - type: map_at_3\n value: 32.643\n - type: map_at_5\n value: 34.851\n - type: mrr_at_1\n value: 24.455587392550143\n - type: mrr_at_10\n value: 37.18388706963206\n - type: mrr_at_100\n value: 38.28330737932916\n - type: mrr_at_1000\n value: 38.32054399710817\n - type: mrr_at_20\n value: 37.8818001216278\n - type: mrr_at_3\n value: 33.35721107927405\n - type: mrr_at_5\n value: 35.52483285577843\n - type: nauc_map_at_1000_diff1\n value: 36.3576177260684\n - type: nauc_map_at_1000_max\n value: 7.854511605962703\n - type: nauc_map_at_1000_std\n value: -17.701121059746878\n - type: nauc_map_at_100_diff1\n value: 36.356075649230505\n - type: nauc_map_at_100_max\n value: 7.862168042999533\n - type: nauc_map_at_100_std\n value: -17.670102459097233\n - type: nauc_map_at_10_diff1\n value: 36.22122978875574\n - type: nauc_map_at_10_max\n value: 7.80848606967416\n - type: nauc_map_at_10_std\n value: -18.3265151386167\n - type: nauc_map_at_1_diff1\n value: 39.28605466408357\n - type: nauc_map_at_1_max\n value: 6.20202977590459\n - type: nauc_map_at_1_std\n value: -15.734334090045026\n - type: nauc_map_at_20_diff1\n value: 36.33637880909657\n - type: nauc_map_at_20_max\n value: 7.843437969476022\n - type: nauc_map_at_20_std\n value: -17.917533363025996\n - type: nauc_map_at_3_diff1\n value: 36.24864976076741\n - type: nauc_map_at_3_max\n value: 7.420345251835957\n - type: nauc_map_at_3_std\n value: -18.71678497722944\n - type: nauc_map_at_5_diff1\n value: 36.0789619291824\n - type: nauc_map_at_5_max\n value: 7.7314285669514495\n - type: nauc_map_at_5_std\n value: -18.748688764538706\n - type: nauc_mrr_at_1000_diff1\n value: 36.23912675623378\n - type: nauc_mrr_at_1000_max\n value: 7.690553436255147\n - type: nauc_mrr_at_1000_std\n value: -17.609526070212304\n - type: nauc_mrr_at_100_diff1\n value: 36.23782651189002\n - type: nauc_mrr_at_100_max\n value: 7.70075095171647\n - type: nauc_mrr_at_100_std\n value: -17.575714144960184\n - type: nauc_mrr_at_10_diff1\n value: 36.125229472534215\n - type: nauc_mrr_at_10_max\n value: 7.635472248755658\n - type: nauc_mrr_at_10_std\n value: -18.208166616511086\n - type: nauc_mrr_at_1_diff1\n value: 39.20986875554532\n - type: nauc_mrr_at_1_max\n value: 6.062668487561363\n - type: nauc_mrr_at_1_std\n value: -16.04130340817602\n - type: nauc_mrr_at_20_diff1\n value: 36.21207088739667\n - type: nauc_mrr_at_20_max\n value: 7.699610250145951\n - type: nauc_mrr_at_20_std\n value: -17.778245221724028\n - type: nauc_mrr_at_3_diff1\n value: 36.03957583885305\n - type: nauc_mrr_at_3_max\n value: 7.225515576504581\n - type: nauc_mrr_at_3_std\n value: -18.74478742943741\n - type: nauc_mrr_at_5_diff1\n value: 35.969152496648974\n - type: nauc_mrr_at_5_max\n value: 7.584059789018233\n - type: nauc_mrr_at_5_std\n value: -18.569374723129332\n - type: nauc_ndcg_at_1000_diff1\n value: 35.894655529841806\n - type: nauc_ndcg_at_1000_max\n value: 8.579327424366236\n - type: nauc_ndcg_at_1000_std\n value: -16.359677367747896\n - type: nauc_ndcg_at_100_diff1\n value: 35.89861902483983\n - type: nauc_ndcg_at_100_max\n value: 8.830873623962242\n - type: nauc_ndcg_at_100_std\n value: -15.173125564722978\n - type: nauc_ndcg_at_10_diff1\n value: 35.36499811105169\n - type: nauc_ndcg_at_10_max\n value: 8.449267180956992\n - type: nauc_ndcg_at_10_std\n value: -18.41978802362402\n - type: nauc_ndcg_at_1_diff1\n value: 39.15422481210622\n - type: nauc_ndcg_at_1_max\n value: 6.055515791928331\n - type: nauc_ndcg_at_1_std\n value: -16.042779610876252\n - type: nauc_ndcg_at_20_diff1\n value: 35.73402868264468\n - type: nauc_ndcg_at_20_max\n value: 8.695705518210847\n - type: nauc_ndcg_at_20_std\n value: -16.7735829470466\n - type: nauc_ndcg_at_3_diff1\n value: 35.31358242856231\n - type: nauc_ndcg_at_3_max\n value: 7.645692789058997\n - type: nauc_ndcg_at_3_std\n value: -19.460003734786874\n - type: nauc_ndcg_at_5_diff1\n value: 35.05216588927143\n - type: nauc_ndcg_at_5_max\n value: 8.216690520604715\n - type: nauc_ndcg_at_5_std\n value: -19.3982054492159\n - type: nauc_precision_at_1000_diff1\n value: -4.440002625111349\n - type: nauc_precision_at_1000_max\n value: 7.886988951901723\n - type: nauc_precision_at_1000_std\n value: 9.88111187048247\n - type: nauc_precision_at_100_diff1\n value: 15.728286119463325\n - type: nauc_precision_at_100_max\n value: 13.218650824470654\n - type: nauc_precision_at_100_std\n value: 16.113245895522553\n - type: nauc_precision_at_10_diff1\n value: 29.51218489610567\n - type: nauc_precision_at_10_max\n value: 10.197432401942912\n - type: nauc_precision_at_10_std\n value: -16.950603431359493\n - type: nauc_precision_at_1_diff1\n value: 39.15422481210622\n - type: nauc_precision_at_1_max\n value: 6.055515791928331\n - type: nauc_precision_at_1_std\n value: -16.042779610876252\n - type: nauc_precision_at_20_diff1\n value: 27.825993070397338\n - type: nauc_precision_at_20_max\n value: 11.437632287846007\n - type: nauc_precision_at_20_std\n value: -7.450353566405601\n - type: nauc_precision_at_3_diff1\n value: 32.14135556796588\n - type: nauc_precision_at_3_max\n value: 7.989252443574163\n - type: nauc_precision_at_3_std\n value: -21.566254595671055\n - type: nauc_precision_at_5_diff1\n value: 30.68778685307082\n - type: nauc_precision_at_5_max\n value: 9.332160758499892\n - type: nauc_precision_at_5_std\n value: -20.928554713448914\n - type: nauc_recall_at_1000_diff1\n value: 25.00810478716878\n - type: nauc_recall_at_1000_max\n value: 46.518165765201644\n - type: nauc_recall_at_1000_std\n value: 61.4734635576085\n - type: nauc_recall_at_100_diff1\n value: 33.895581318261726\n - type: nauc_recall_at_100_max\n value: 20.10706035872801\n - type: nauc_recall_at_100_std\n value: 24.204226584457047\n - type: nauc_recall_at_10_diff1\n value: 32.363127359576296\n - type: nauc_recall_at_10_max\n value: 10.729923804989545\n - type: nauc_recall_at_10_std\n value: -18.1335370184202\n - type: nauc_recall_at_1_diff1\n value: 39.28605466408357\n - type: nauc_recall_at_1_max\n value: 6.20202977590459\n - type: nauc_recall_at_1_std\n value: -15.734334090045026\n - type: nauc_recall_at_20_diff1\n value: 33.47804003169795\n - type: nauc_recall_at_20_max\n value: 12.781494765263382\n - type: nauc_recall_at_20_std\n value: -9.263970132202658\n - type: nauc_recall_at_3_diff1\n value: 32.71001429428999\n - type: nauc_recall_at_3_max\n value: 8.353439197382693\n - type: nauc_recall_at_3_std\n value: -21.235097744366954\n - type: nauc_recall_at_5_diff1\n value: 31.87451464963415\n - type: nauc_recall_at_5_max\n value: 9.635051450907305\n - type: nauc_recall_at_5_std\n value: -21.113235357132794\n - type: ndcg_at_1\n value: 24.47\n - type: ndcg_at_10\n value: 43.692\n - type: ndcg_at_100\n value: 49.211\n - type: ndcg_at_1000\n value: 50.244\n - type: ndcg_at_20\n value: 46.278000000000006\n - type: ndcg_at_3\n value: 35.719\n - type: ndcg_at_5\n value: 39.652\n - type: precision_at_1\n value: 24.47\n - type: precision_at_10\n value: 6.857\n - type: precision_at_100\n value: 0.9610000000000001\n - type: precision_at_1000\n value: 0.105\n - type: precision_at_20\n value: 3.968\n - type: precision_at_3\n value: 15.181000000000001\n - type: precision_at_5\n value: 11.117\n - type: recall_at_1\n value: 23.751\n - type: recall_at_10\n value: 65.64\n - type: recall_at_100\n value: 90.967\n - type: recall_at_1000\n value: 98.738\n - type: recall_at_20\n value: 75.639\n - type: recall_at_3\n value: 43.927\n - type: recall_at_5\n value: 53.366\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 98.82580939352485\n - type: f1\n value: 98.75201754333801\n - type: f1_weighted\n value: 98.82795205108245\n - type: main_score\n value: 98.82580939352485\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 92.29822161422709\n - type: f1\n value: 77.75210224871594\n - type: f1_weighted\n value: 93.58661422540348\n - type: main_score\n value: 92.29822161422709\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 4672e20407010da34463acc759c162ca9734bca6\n metrics:\n - type: accuracy\n value: 85.17484868863484\n - type: f1\n value: 81.94484244487094\n - type: f1_weighted\n value: 85.21022593423332\n - type: main_score\n value: 85.17484868863484\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8\n metrics:\n - type: accuracy\n value: 89.61667787491594\n - type: f1\n value: 89.02701927621264\n - type: f1_weighted\n value: 89.56306982022801\n - type: main_score\n value: 89.61667787491594\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: main_score\n value: 46.318282423948574\n - type: v_measure\n value: 46.318282423948574\n - type: v_measure_std\n value: 0.9729055662461538\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: main_score\n value: 44.29033625273981\n - type: v_measure\n value: 44.29033625273981\n - type: v_measure_std\n value: 1.0596383629128594\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7\n metrics:\n - type: main_score\n value: 33.0526129239962\n - type: map\n value: 33.0526129239962\n - type: mrr\n value: 34.29260046890935\n - type: nAUC_map_diff1\n value: 12.579738077238032\n - type: nAUC_map_max\n value: -20.936629344962\n - type: nAUC_map_std\n value: -1.6096805784945216\n - type: nAUC_mrr_diff1\n value: 11.597584463580807\n - type: nAUC_mrr_max\n value: -15.723702838537504\n - type: nAUC_mrr_std\n value: 0.2719172965777737\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: mteb/nfcorpus\n config: default\n split: test\n revision: ec0fa4fe99da2ff19ca1214b7966684033a58814\n metrics:\n - type: main_score\n value: 41.486000000000004\n - type: map_at_1\n value: 6.866\n - type: map_at_10\n value: 15.895999999999999\n - type: map_at_100\n value: 21.093\n - type: map_at_1000\n value: 23.067\n - type: map_at_20\n value: 18.125\n - type: map_at_3\n value: 11.421000000000001\n - type: map_at_5\n value: 13.415\n - type: mrr_at_1\n value: 52.63157894736842\n - type: mrr_at_10\n value: 61.486805248415166\n - type: mrr_at_100\n value: 62.08211009182091\n - type: mrr_at_1000\n value: 62.10828701365016\n - type: mrr_at_20\n value: 61.904411187915784\n - type: mrr_at_3\n value: 59.90712074303407\n - type: mrr_at_5\n value: 60.91331269349847\n - type: nauc_map_at_1000_diff1\n value: 25.484625278529403\n - type: nauc_map_at_1000_max\n value: 31.206600396418853\n - type: nauc_map_at_1000_std\n value: 15.569448072357156\n - type: nauc_map_at_100_diff1\n value: 27.636750226316764\n - type: nauc_map_at_100_max\n value: 29.66992681250722\n - type: nauc_map_at_100_std\n value: 10.570600484002671\n - type: nauc_map_at_10_diff1\n value: 32.76642525548697\n - type: nauc_map_at_10_max\n value: 21.459225397237663\n - type: nauc_map_at_10_std\n value: -3.546494734209264\n - type: nauc_map_at_1_diff1\n value: 48.8002894871328\n - type: nauc_map_at_1_max\n value: 5.7236722609868815\n - type: nauc_map_at_1_std\n value: -13.283554044471352\n - type: nauc_map_at_20_diff1\n value: 30.57169701502308\n - type: nauc_map_at_20_max\n value: 25.79666139518404\n - type: nauc_map_at_20_std\n value: 1.781732492989651\n - type: nauc_map_at_3_diff1\n value: 40.076315947201095\n - type: nauc_map_at_3_max\n value: 12.862524429140054\n - type: nauc_map_at_3_std\n value: -9.188349777126817\n - type: nauc_map_at_5_diff1\n value: 36.9918718052938\n - type: nauc_map_at_5_max\n value: 16.74234374361876\n - type: nauc_map_at_5_std\n value: -7.818523349307494\n - type: nauc_mrr_at_1000_diff1\n value: 26.88183002609805\n - type: nauc_mrr_at_1000_max\n value: 47.10209348428658\n - type: nauc_mrr_at_1000_std\n value: 32.067825924992924\n - type: nauc_mrr_at_100_diff1\n value: 26.871482491566745\n - type: nauc_mrr_at_100_max\n value: 47.11303868498556\n - type: nauc_mrr_at_100_std\n value: 32.08961428818868\n - type: nauc_mrr_at_10_diff1\n value: 26.6356914977722\n - type: nauc_mrr_at_10_max\n value: 47.091624558810366\n - type: nauc_mrr_at_10_std\n value: 31.942424120660164\n - type: nauc_mrr_at_1_diff1\n value: 28.19774198483673\n - type: nauc_mrr_at_1_max\n value: 41.44380927834253\n - type: nauc_mrr_at_1_std\n value: 25.18222691885917\n - type: nauc_mrr_at_20_diff1\n value: 26.86487347109452\n - type: nauc_mrr_at_20_max\n value: 47.1987778214726\n - type: nauc_mrr_at_20_std\n value: 32.143517921610034\n - type: nauc_mrr_at_3_diff1\n value: 27.34340373236422\n - type: nauc_mrr_at_3_max\n value: 46.358726506276646\n - type: nauc_mrr_at_3_std\n value: 31.74924155572593\n - type: nauc_mrr_at_5_diff1\n value: 27.209667205060672\n - type: nauc_mrr_at_5_max\n value: 46.79883369072009\n - type: nauc_mrr_at_5_std\n value: 31.655605306670758\n - type: nauc_ndcg_at_1000_diff1\n value: 18.940195769769687\n - type: nauc_ndcg_at_1000_max\n value: 46.48551313937331\n - type: nauc_ndcg_at_1000_std\n value: 33.64819502089232\n - type: nauc_ndcg_at_100_diff1\n value: 19.50885253809146\n - type: nauc_ndcg_at_100_max\n value: 40.53174462354878\n - type: nauc_ndcg_at_100_std\n value: 28.516152877751118\n - type: nauc_ndcg_at_10_diff1\n value: 16.01699218096564\n - type: nauc_ndcg_at_10_max\n value: 41.17322878314514\n - type: nauc_ndcg_at_10_std\n value: 29.002233224832196\n - type: nauc_ndcg_at_1_diff1\n value: 27.443547710102205\n - type: nauc_ndcg_at_1_max\n value: 40.66529763309582\n - type: nauc_ndcg_at_1_std\n value: 24.15016766225869\n - type: nauc_ndcg_at_20_diff1\n value: 17.541197675685062\n - type: nauc_ndcg_at_20_max\n value: 40.53231266973844\n - type: nauc_ndcg_at_20_std\n value: 29.54096347876548\n - type: nauc_ndcg_at_3_diff1\n value: 18.649628357473716\n - type: nauc_ndcg_at_3_max\n value: 41.18603570171764\n - type: nauc_ndcg_at_3_std\n value: 27.125524188420396\n - type: nauc_ndcg_at_5_diff1\n value: 17.519593751448483\n - type: nauc_ndcg_at_5_max\n value: 42.715997890377345\n - type: nauc_ndcg_at_5_std\n value: 27.902627839899868\n - type: nauc_precision_at_1000_diff1\n value: -15.528797630565155\n - type: nauc_precision_at_1000_max\n value: 13.741640921778671\n - type: nauc_precision_at_1000_std\n value: 44.50896053788372\n - type: nauc_precision_at_100_diff1\n value: -14.491464489721887\n - type: nauc_precision_at_100_max\n value: 23.136434418999457\n - type: nauc_precision_at_100_std\n value: 49.73145147863128\n - type: nauc_precision_at_10_diff1\n value: -4.829188942994277\n - type: nauc_precision_at_10_max\n value: 40.327612559528866\n - type: nauc_precision_at_10_std\n value: 39.34919529635044\n - type: nauc_precision_at_1_diff1\n value: 28.19774198483673\n - type: nauc_precision_at_1_max\n value: 41.44380927834253\n - type: nauc_precision_at_1_std\n value: 25.18222691885917\n - type: nauc_precision_at_20_diff1\n value: -7.210726293112847\n - type: nauc_precision_at_20_max\n value: 37.195679576636984\n - type: nauc_precision_at_20_std\n value: 45.4597096418357\n - type: nauc_precision_at_3_diff1\n value: 7.578219537774854\n - type: nauc_precision_at_3_max\n value: 41.59775233475654\n - type: nauc_precision_at_3_std\n value: 30.764584790895118\n - type: nauc_precision_at_5_diff1\n value: 1.655451789039598\n - type: nauc_precision_at_5_max\n value: 43.435739407610455\n - type: nauc_precision_at_5_std\n value: 33.42552263325999\n - type: nauc_recall_at_1000_diff1\n value: 5.030705700690516\n - type: nauc_recall_at_1000_max\n value: 19.108072570815583\n - type: nauc_recall_at_1000_std\n value: 14.697734974217308\n - type: nauc_recall_at_100_diff1\n value: 14.746540318132407\n - type: nauc_recall_at_100_max\n value: 21.798705033854795\n - type: nauc_recall_at_100_std\n value: 11.416195108842587\n - type: nauc_recall_at_10_diff1\n value: 25.548642427860486\n - type: nauc_recall_at_10_max\n value: 18.711677681987474\n - type: nauc_recall_at_10_std\n value: -5.988904818971677\n - type: nauc_recall_at_1_diff1\n value: 48.8002894871328\n - type: nauc_recall_at_1_max\n value: 5.7236722609868815\n - type: nauc_recall_at_1_std\n value: -13.283554044471352\n - type: nauc_recall_at_20_diff1\n value: 23.39140739154809\n - type: nauc_recall_at_20_max\n value: 19.351150636155474\n - type: nauc_recall_at_20_std\n value: -2.757280266915132\n - type: nauc_recall_at_3_diff1\n value: 38.17453576012812\n - type: nauc_recall_at_3_max\n value: 13.47003839643972\n - type: nauc_recall_at_3_std\n value: -8.75780163862688\n - type: nauc_recall_at_5_diff1\n value: 33.02812855226899\n - type: nauc_recall_at_5_max\n value: 15.477626408978477\n - type: nauc_recall_at_5_std\n value: -9.072206441070708\n - type: ndcg_at_1\n value: 50.773999999999994\n - type: ndcg_at_10\n value: 41.486000000000004\n - type: ndcg_at_100\n value: 39.051\n - type: ndcg_at_1000\n value: 48.106\n - type: ndcg_at_20\n value: 39.432\n - type: ndcg_at_3\n value: 47.428\n - type: ndcg_at_5\n value: 45.227000000000004\n - type: precision_at_1\n value: 52.632\n - type: precision_at_10\n value: 31.146\n - type: precision_at_100\n value: 10.328\n - type: precision_at_1000\n value: 2.432\n - type: precision_at_20\n value: 23.793\n - type: precision_at_3\n value: 45.201\n - type: precision_at_5\n value: 39.876\n - type: recall_at_1\n value: 6.866\n - type: recall_at_10\n value: 20.447000000000003\n - type: recall_at_100\n value: 40.607\n - type: recall_at_1000\n value: 73.411\n - type: recall_at_20\n value: 26.082\n - type: recall_at_3\n value: 12.484\n - type: recall_at_5\n value: 15.847\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: mteb/nq\n config: default\n split: test\n revision: b774495ed302d8c44a3a7ea25c90dbce03968f31\n metrics:\n - type: main_score\n value: 69.072\n - type: map_at_1\n value: 45.483000000000004\n - type: map_at_10\n value: 62.050000000000004\n - type: map_at_100\n value: 62.693\n - type: map_at_1000\n value: 62.702999999999996\n - type: map_at_20\n value: 62.498\n - type: map_at_3\n value: 58.285\n - type: map_at_5\n value: 60.711000000000006\n - type: mrr_at_1\n value: 50.840092699884124\n - type: mrr_at_10\n value: 64.54635224116673\n - type: mrr_at_100\n value: 64.9526548702289\n - type: mrr_at_1000\n value: 64.95908460752281\n - type: mrr_at_20\n value: 64.82949565799959\n - type: mrr_at_3\n value: 61.89165701042856\n - type: mrr_at_5\n value: 63.632676709154026\n - type: nauc_map_at_1000_diff1\n value: 43.187285304185224\n - type: nauc_map_at_1000_max\n value: 32.39921659632756\n - type: nauc_map_at_1000_std\n value: -5.780901333066553\n - type: nauc_map_at_100_diff1\n value: 43.184487221204456\n - type: nauc_map_at_100_max\n value: 32.41176116347982\n - type: nauc_map_at_100_std\n value: -5.76422606662383\n - type: nauc_map_at_10_diff1\n value: 42.967066814031746\n - type: nauc_map_at_10_max\n value: 32.489617364418514\n - type: nauc_map_at_10_std\n value: -6.029045531102664\n - type: nauc_map_at_1_diff1\n value: 46.16376563218624\n - type: nauc_map_at_1_max\n value: 26.342624776802232\n - type: nauc_map_at_1_std\n value: -7.142171388751972\n - type: nauc_map_at_20_diff1\n value: 43.15894358608328\n - type: nauc_map_at_20_max\n value: 32.46492198956245\n - type: nauc_map_at_20_std\n value: -5.788373305449195\n - type: nauc_map_at_3_diff1\n value: 43.231752344608545\n - type: nauc_map_at_3_max\n value: 31.68003009949564\n - type: nauc_map_at_3_std\n value: -8.015235132765458\n - type: nauc_map_at_5_diff1\n value: 42.86197608819917\n - type: nauc_map_at_5_max\n value: 32.363857571094485\n - type: nauc_map_at_5_std\n value: -6.780487416387977\n - type: nauc_mrr_at_1000_diff1\n value: 43.40542912045782\n - type: nauc_mrr_at_1000_max\n value: 32.8461770324533\n - type: nauc_mrr_at_1000_std\n value: -3.6505425530008204\n - type: nauc_mrr_at_100_diff1\n value: 43.40233508014468\n - type: nauc_mrr_at_100_max\n value: 32.85598538385942\n - type: nauc_mrr_at_100_std\n value: -3.637477352635459\n - type: nauc_mrr_at_10_diff1\n value: 43.260179162806054\n - type: nauc_mrr_at_10_max\n value: 32.942643527040474\n - type: nauc_mrr_at_10_std\n value: -3.712052825320437\n - type: nauc_mrr_at_1_diff1\n value: 46.354919460881206\n - type: nauc_mrr_at_1_max\n value: 29.1760258591106\n - type: nauc_mrr_at_1_std\n value: -4.107225031227406\n - type: nauc_mrr_at_20_diff1\n value: 43.37092385434311\n - type: nauc_mrr_at_20_max\n value: 32.93390254712846\n - type: nauc_mrr_at_20_std\n value: -3.5719056112132006\n - type: nauc_mrr_at_3_diff1\n value: 43.1744474040527\n - type: nauc_mrr_at_3_max\n value: 32.741290559777994\n - type: nauc_mrr_at_3_std\n value: -4.72677925120697\n - type: nauc_mrr_at_5_diff1\n value: 43.108396819975674\n - type: nauc_mrr_at_5_max\n value: 32.970519514893084\n - type: nauc_mrr_at_5_std\n value: -4.090906158975974\n - type: nauc_ndcg_at_1000_diff1\n value: 42.786664193638714\n - type: nauc_ndcg_at_1000_max\n value: 33.65554095609296\n - type: nauc_ndcg_at_1000_std\n value: -4.024030130584482\n - type: nauc_ndcg_at_100_diff1\n value: 42.691246775210814\n - type: nauc_ndcg_at_100_max\n value: 34.063232335110875\n - type: nauc_ndcg_at_100_std\n value: -3.477813807415248\n - type: nauc_ndcg_at_10_diff1\n value: 41.90988990571757\n - type: nauc_ndcg_at_10_max\n value: 34.58934812881633\n - type: nauc_ndcg_at_10_std\n value: -4.3295110195497655\n - type: nauc_ndcg_at_1_diff1\n value: 46.354919460881206\n - type: nauc_ndcg_at_1_max\n value: 29.1760258591106\n - type: nauc_ndcg_at_1_std\n value: -4.107225031227406\n - type: nauc_ndcg_at_20_diff1\n value: 42.493206675867114\n - type: nauc_ndcg_at_20_max\n value: 34.562441307459544\n - type: nauc_ndcg_at_20_std\n value: -3.4456116866749107\n - type: nauc_ndcg_at_3_diff1\n value: 42.24180336502808\n - type: nauc_ndcg_at_3_max\n value: 33.064267018100594\n - type: nauc_ndcg_at_3_std\n value: -7.786248093572142\n - type: nauc_ndcg_at_5_diff1\n value: 41.692714787779565\n - type: nauc_ndcg_at_5_max\n value: 34.20502498949156\n - type: nauc_ndcg_at_5_std\n value: -5.979557859282785\n - type: nauc_precision_at_1000_diff1\n value: -13.779832506640702\n - type: nauc_precision_at_1000_max\n value: 1.243001688631421\n - type: nauc_precision_at_1000_std\n value: 17.351623398622323\n - type: nauc_precision_at_100_diff1\n value: -11.310526816290297\n - type: nauc_precision_at_100_max\n value: 5.771669506192959\n - type: nauc_precision_at_100_std\n value: 19.917795079540113\n - type: nauc_precision_at_10_diff1\n value: 2.163699384635286\n - type: nauc_precision_at_10_max\n value: 19.66440698458386\n - type: nauc_precision_at_10_std\n value: 13.689876348315726\n - type: nauc_precision_at_1_diff1\n value: 46.354919460881206\n - type: nauc_precision_at_1_max\n value: 29.1760258591106\n - type: nauc_precision_at_1_std\n value: -4.107225031227406\n - type: nauc_precision_at_20_diff1\n value: -3.038735879584471\n - type: nauc_precision_at_20_max\n value: 14.132968299701695\n - type: nauc_precision_at_20_std\n value: 17.78069734664346\n - type: nauc_precision_at_3_diff1\n value: 21.783760758070095\n - type: nauc_precision_at_3_max\n value: 30.244127986404497\n - type: nauc_precision_at_3_std\n value: -0.12411163467738723\n - type: nauc_precision_at_5_diff1\n value: 10.980635723302418\n - type: nauc_precision_at_5_max\n value: 25.302293738975575\n - type: nauc_precision_at_5_std\n value: 6.4740817488722024\n - type: nauc_recall_at_1000_diff1\n value: 34.10343772356593\n - type: nauc_recall_at_1000_max\n value: 80.72497340357538\n - type: nauc_recall_at_1000_std\n value: 69.54564103264093\n - type: nauc_recall_at_100_diff1\n value: 33.427719956774126\n - type: nauc_recall_at_100_max\n value: 71.54086768335449\n - type: nauc_recall_at_100_std\n value: 49.66157377654885\n - type: nauc_recall_at_10_diff1\n value: 33.70139560054039\n - type: nauc_recall_at_10_max\n value: 45.47878072860151\n - type: nauc_recall_at_10_std\n value: 1.4188516615716378\n - type: nauc_recall_at_1_diff1\n value: 46.16376563218624\n - type: nauc_recall_at_1_max\n value: 26.342624776802232\n - type: nauc_recall_at_1_std\n value: -7.142171388751972\n - type: nauc_recall_at_20_diff1\n value: 35.805379874970086\n - type: nauc_recall_at_20_max\n value: 51.80479822253392\n - type: nauc_recall_at_20_std\n value: 13.531467576460143\n - type: nauc_recall_at_3_diff1\n value: 37.288500141631616\n - type: nauc_recall_at_3_max\n value: 35.07078243516728\n - type: nauc_recall_at_3_std\n value: -10.452926441410405\n - type: nauc_recall_at_5_diff1\n value: 34.83186104526897\n - type: nauc_recall_at_5_max\n value: 39.58488976496973\n - type: nauc_recall_at_5_std\n value: -6.3049292065708835\n - type: ndcg_at_1\n value: 50.839999999999996\n - type: ndcg_at_10\n value: 69.072\n - type: ndcg_at_100\n value: 71.538\n - type: ndcg_at_1000\n value: 71.77799999999999\n - type: ndcg_at_20\n value: 70.41\n - type: ndcg_at_3\n value: 62.544999999999995\n - type: ndcg_at_5\n value: 66.33099999999999\n - type: precision_at_1\n value: 50.839999999999996\n - type: precision_at_10\n value: 10.495000000000001\n - type: precision_at_100\n value: 1.1900000000000002\n - type: precision_at_1000\n value: 0.121\n - type: precision_at_20\n value: 5.5809999999999995\n - type: precision_at_3\n value: 27.636\n - type: precision_at_5\n value: 18.864\n - type: recall_at_1\n value: 45.483000000000004\n - type: recall_at_10\n value: 87.483\n - type: recall_at_100\n value: 97.844\n - type: recall_at_1000\n value: 99.66199999999999\n - type: recall_at_20\n value: 92.294\n - type: recall_at_3\n value: 71.2\n - type: recall_at_5\n value: 79.753\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: mteb/quora\n config: default\n split: test\n revision: e4e08e0b7dbe3c8700f0daef558ff32256715259\n metrics:\n - type: main_score\n value: 89.58\n - type: map_at_1\n value: 71.819\n - type: map_at_10\n value: 86.04899999999999\n - type: map_at_100\n value: 86.648\n - type: map_at_1000\n value: 86.66199999999999\n - type: map_at_20\n value: 86.441\n - type: map_at_3\n value: 83.114\n - type: map_at_5\n value: 84.981\n - type: mrr_at_1\n value: 82.62\n - type: mrr_at_10\n value: 88.62899999999979\n - type: mrr_at_100\n value: 88.70918591324215\n - type: mrr_at_1000\n value: 88.70973091492397\n - type: mrr_at_20\n value: 88.68914765317221\n - type: mrr_at_3\n value: 87.74999999999979\n - type: mrr_at_5\n value: 88.36799999999974\n - type: nauc_map_at_1000_diff1\n value: 77.89207709760448\n - type: nauc_map_at_1000_max\n value: 29.63371361495422\n - type: nauc_map_at_1000_std\n value: -48.628180385874344\n - type: nauc_map_at_100_diff1\n value: 77.89592179104915\n - type: nauc_map_at_100_max\n value: 29.617171506130756\n - type: nauc_map_at_100_std\n value: -48.66057170774648\n - type: nauc_map_at_10_diff1\n value: 78.0618161228185\n - type: nauc_map_at_10_max\n value: 29.178490609366737\n - type: nauc_map_at_10_std\n value: -50.74755004592002\n - type: nauc_map_at_1_diff1\n value: 81.64335579973574\n - type: nauc_map_at_1_max\n value: 21.813832226652174\n - type: nauc_map_at_1_std\n value: -42.57570978190876\n - type: nauc_map_at_20_diff1\n value: 77.9299081005938\n - type: nauc_map_at_20_max\n value: 29.458718470003888\n - type: nauc_map_at_20_std\n value: -49.63337236763102\n - type: nauc_map_at_3_diff1\n value: 78.72941448509229\n - type: nauc_map_at_3_max\n value: 26.600997896960056\n - type: nauc_map_at_3_std\n value: -51.889002227479885\n - type: nauc_map_at_5_diff1\n value: 78.31466610917171\n - type: nauc_map_at_5_max\n value: 28.09863984582896\n - type: nauc_map_at_5_std\n value: -52.14058096096497\n - type: nauc_mrr_at_1000_diff1\n value: 78.42667263739992\n - type: nauc_mrr_at_1000_max\n value: 31.98996235127974\n - type: nauc_mrr_at_1000_std\n value: -44.380439148429296\n - type: nauc_mrr_at_100_diff1\n value: 78.42661032698115\n - type: nauc_mrr_at_100_max\n value: 31.991652631740102\n - type: nauc_mrr_at_100_std\n value: -44.37854108460535\n - type: nauc_mrr_at_10_diff1\n value: 78.39126022544136\n - type: nauc_mrr_at_10_max\n value: 32.02023484451197\n - type: nauc_mrr_at_10_std\n value: -44.561252349176954\n - type: nauc_mrr_at_1_diff1\n value: 79.21630894647448\n - type: nauc_mrr_at_1_max\n value: 31.526303156060177\n - type: nauc_mrr_at_1_std\n value: -41.887504422443136\n - type: nauc_mrr_at_20_diff1\n value: 78.42548039170424\n - type: nauc_mrr_at_20_max\n value: 31.99588275070137\n - type: nauc_mrr_at_20_std\n value: -44.44957722627042\n - type: nauc_mrr_at_3_diff1\n value: 78.26165151833735\n - type: nauc_mrr_at_3_max\n value: 32.18028826126801\n - type: nauc_mrr_at_3_std\n value: -44.6998237213182\n - type: nauc_mrr_at_5_diff1\n value: 78.34786430903962\n - type: nauc_mrr_at_5_max\n value: 32.168476272879566\n - type: nauc_mrr_at_5_std\n value: -44.7915919956712\n - type: nauc_ndcg_at_1000_diff1\n value: 77.79198355957816\n - type: nauc_ndcg_at_1000_max\n value: 31.14363511518406\n - type: nauc_ndcg_at_1000_std\n value: -46.69335151274275\n - type: nauc_ndcg_at_100_diff1\n value: 77.79898090286419\n - type: nauc_ndcg_at_100_max\n value: 31.115103811629215\n - type: nauc_ndcg_at_100_std\n value: -46.73078913421965\n - type: nauc_ndcg_at_10_diff1\n value: 77.74856635461343\n - type: nauc_ndcg_at_10_max\n value: 30.279584686212747\n - type: nauc_ndcg_at_10_std\n value: -50.23514662356807\n - type: nauc_ndcg_at_1_diff1\n value: 79.17833000040999\n - type: nauc_ndcg_at_1_max\n value: 31.703788144510746\n - type: nauc_ndcg_at_1_std\n value: -41.854817402870715\n - type: nauc_ndcg_at_20_diff1\n value: 77.7380353804671\n - type: nauc_ndcg_at_20_max\n value: 30.622294129001553\n - type: nauc_ndcg_at_20_std\n value: -49.035794761065254\n - type: nauc_ndcg_at_3_diff1\n value: 77.41476880573593\n - type: nauc_ndcg_at_3_max\n value: 29.015949978243032\n - type: nauc_ndcg_at_3_std\n value: -49.78627087622648\n - type: nauc_ndcg_at_5_diff1\n value: 77.64439137502896\n - type: nauc_ndcg_at_5_max\n value: 29.444684897492206\n - type: nauc_ndcg_at_5_std\n value: -51.21908400252501\n - type: nauc_precision_at_1000_diff1\n value: -44.92396459446822\n - type: nauc_precision_at_1000_max\n value: -3.674153720989045\n - type: nauc_precision_at_1000_std\n value: 39.56552468277785\n - type: nauc_precision_at_100_diff1\n value: -44.75143023259094\n - type: nauc_precision_at_100_max\n value: -3.705280025140011\n - type: nauc_precision_at_100_std\n value: 39.433619999113326\n - type: nauc_precision_at_10_diff1\n value: -41.0651074726579\n - type: nauc_precision_at_10_max\n value: -0.21097985601783667\n - type: nauc_precision_at_10_std\n value: 26.24652824589493\n - type: nauc_precision_at_1_diff1\n value: 79.17833000040999\n - type: nauc_precision_at_1_max\n value: 31.703788144510746\n - type: nauc_precision_at_1_std\n value: -41.854817402870715\n - type: nauc_precision_at_20_diff1\n value: -43.368001340920294\n - type: nauc_precision_at_20_max\n value: -2.036990010399129\n - type: nauc_precision_at_20_std\n value: 32.37747041406297\n - type: nauc_precision_at_3_diff1\n value: -22.089307548346877\n - type: nauc_precision_at_3_max\n value: 6.2280973175296\n - type: nauc_precision_at_3_std\n value: 5.323992514036145\n - type: nauc_precision_at_5_diff1\n value: -34.07115055244003\n - type: nauc_precision_at_5_max\n value: 2.5955315789198834\n - type: nauc_precision_at_5_std\n value: 16.26096689407332\n - type: nauc_recall_at_1000_diff1\n value: 58.27703860947467\n - type: nauc_recall_at_1000_max\n value: 68.59835835315768\n - type: nauc_recall_at_1000_std\n value: 77.96687006056064\n - type: nauc_recall_at_100_diff1\n value: 73.24371223081737\n - type: nauc_recall_at_100_max\n value: 39.55925344664591\n - type: nauc_recall_at_100_std\n value: -32.25605030215798\n - type: nauc_recall_at_10_diff1\n value: 73.41261201339202\n - type: nauc_recall_at_10_max\n value: 26.822979434062926\n - type: nauc_recall_at_10_std\n value: -74.2909332592806\n - type: nauc_recall_at_1_diff1\n value: 81.64335579973574\n - type: nauc_recall_at_1_max\n value: 21.813832226652174\n - type: nauc_recall_at_1_std\n value: -42.57570978190876\n - type: nauc_recall_at_20_diff1\n value: 72.7621297920656\n - type: nauc_recall_at_20_max\n value: 26.02492304096079\n - type: nauc_recall_at_20_std\n value: -77.8724532438279\n - type: nauc_recall_at_3_diff1\n value: 75.25149312810714\n - type: nauc_recall_at_3_max\n value: 23.20545662481487\n - type: nauc_recall_at_3_std\n value: -59.69689982140521\n - type: nauc_recall_at_5_diff1\n value: 73.69807273001406\n - type: nauc_recall_at_5_max\n value: 24.073666798066057\n - type: nauc_recall_at_5_std\n value: -67.91121268130719\n - type: ndcg_at_1\n value: 82.64\n - type: ndcg_at_10\n value: 89.58\n - type: ndcg_at_100\n value: 90.606\n - type: ndcg_at_1000\n value: 90.676\n - type: ndcg_at_20\n value: 90.132\n - type: ndcg_at_3\n value: 86.88\n - type: ndcg_at_5\n value: 88.40299999999999\n - type: precision_at_1\n value: 82.64\n - type: precision_at_10\n value: 13.604\n - type: precision_at_100\n value: 1.539\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_20\n value: 7.188\n - type: precision_at_3\n value: 38.083\n - type: precision_at_5\n value: 25.018\n - type: recall_at_1\n value: 71.819\n - type: recall_at_10\n value: 96.34700000000001\n - type: recall_at_100\n value: 99.715\n - type: recall_at_1000\n value: 99.995\n - type: recall_at_20\n value: 98.073\n - type: recall_at_3\n value: 88.57300000000001\n - type: recall_at_5\n value: 92.908\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: main_score\n value: 71.18966762070158\n - type: v_measure\n value: 71.18966762070158\n - type: v_measure_std\n value: 2.7498969054457048\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 385e3cb46b4cfa89021f56c4380204149d0efe33\n metrics:\n - type: main_score\n value: 74.42014716862516\n - type: v_measure\n value: 74.42014716862516\n - type: v_measure_std\n value: 9.909739891410648\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: mteb/scidocs\n config: default\n split: test\n revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88\n metrics:\n - type: main_score\n value: 25.041999999999998\n - type: map_at_1\n value: 5.893000000000001\n - type: map_at_10\n value: 15.260000000000002\n - type: map_at_100\n value: 18.084\n - type: map_at_1000\n value: 18.467\n - type: map_at_20\n value: 16.675\n - type: map_at_3\n value: 10.526\n - type: map_at_5\n value: 12.775\n - type: mrr_at_1\n value: 28.999999999999996\n - type: mrr_at_10\n value: 41.03575396825395\n - type: mrr_at_100\n value: 42.136771862785835\n - type: mrr_at_1000\n value: 42.16698555415099\n - type: mrr_at_20\n value: 41.707493696104315\n - type: mrr_at_3\n value: 37.34999999999998\n - type: mrr_at_5\n value: 39.59999999999995\n - type: nauc_map_at_1000_diff1\n value: 12.080002654911883\n - type: nauc_map_at_1000_max\n value: 29.813563682286276\n - type: nauc_map_at_1000_std\n value: 20.36659817908673\n - type: nauc_map_at_100_diff1\n value: 12.108735517749706\n - type: nauc_map_at_100_max\n value: 29.76830671710955\n - type: nauc_map_at_100_std\n value: 20.3433621032846\n - type: nauc_map_at_10_diff1\n value: 12.91575031185637\n - type: nauc_map_at_10_max\n value: 29.427600958386318\n - type: nauc_map_at_10_std\n value: 16.89867275177153\n - type: nauc_map_at_1_diff1\n value: 19.353069488987916\n - type: nauc_map_at_1_max\n value: 17.093914951159693\n - type: nauc_map_at_1_std\n value: 8.19886078055046\n - type: nauc_map_at_20_diff1\n value: 11.977233457943113\n - type: nauc_map_at_20_max\n value: 29.171812822948805\n - type: nauc_map_at_20_std\n value: 18.780517506173965\n - type: nauc_map_at_3_diff1\n value: 14.453129464176092\n - type: nauc_map_at_3_max\n value: 25.801958649112077\n - type: nauc_map_at_3_std\n value: 11.572823684429643\n - type: nauc_map_at_5_diff1\n value: 13.167155808104997\n - type: nauc_map_at_5_max\n value: 27.355626948365792\n - type: nauc_map_at_5_std\n value: 14.414151839192183\n - type: nauc_mrr_at_1000_diff1\n value: 17.262104643988636\n - type: nauc_mrr_at_1000_max\n value: 23.991373837217058\n - type: nauc_mrr_at_1000_std\n value: 12.44755488671623\n - type: nauc_mrr_at_100_diff1\n value: 17.267280132318703\n - type: nauc_mrr_at_100_max\n value: 24.022189287889294\n - type: nauc_mrr_at_100_std\n value: 12.480695500214788\n - type: nauc_mrr_at_10_diff1\n value: 17.012383998246268\n - type: nauc_mrr_at_10_max\n value: 24.192637911171722\n - type: nauc_mrr_at_10_std\n value: 12.524608847408917\n - type: nauc_mrr_at_1_diff1\n value: 19.43518811038007\n - type: nauc_mrr_at_1_max\n value: 17.747482933395602\n - type: nauc_mrr_at_1_std\n value: 8.410779775558684\n - type: nauc_mrr_at_20_diff1\n value: 17.202663281407446\n - type: nauc_mrr_at_20_max\n value: 24.091991130543118\n - type: nauc_mrr_at_20_std\n value: 12.503814263019908\n - type: nauc_mrr_at_3_diff1\n value: 17.52733013432995\n - type: nauc_mrr_at_3_max\n value: 23.569459518780214\n - type: nauc_mrr_at_3_std\n value: 11.770846827520726\n - type: nauc_mrr_at_5_diff1\n value: 17.10817561975543\n - type: nauc_mrr_at_5_max\n value: 23.945141435234678\n - type: nauc_mrr_at_5_std\n value: 12.034468615317719\n - type: nauc_ndcg_at_1000_diff1\n value: 12.317811393346936\n - type: nauc_ndcg_at_1000_max\n value: 30.809991350156103\n - type: nauc_ndcg_at_1000_std\n value: 24.517501065205067\n - type: nauc_ndcg_at_100_diff1\n value: 12.824804203182936\n - type: nauc_ndcg_at_100_max\n value: 30.895499817010748\n - type: nauc_ndcg_at_100_std\n value: 25.424376279745402\n - type: nauc_ndcg_at_10_diff1\n value: 13.32724552457439\n - type: nauc_ndcg_at_10_max\n value: 30.409088666807456\n - type: nauc_ndcg_at_10_std\n value: 18.216330475714113\n - type: nauc_ndcg_at_1_diff1\n value: 19.43518811038007\n - type: nauc_ndcg_at_1_max\n value: 17.747482933395602\n - type: nauc_ndcg_at_1_std\n value: 8.410779775558684\n - type: nauc_ndcg_at_20_diff1\n value: 12.224399111852902\n - type: nauc_ndcg_at_20_max\n value: 29.86352330445272\n - type: nauc_ndcg_at_20_std\n value: 21.196937851331807\n - type: nauc_ndcg_at_3_diff1\n value: 15.367489533734027\n - type: nauc_ndcg_at_3_max\n value: 26.76486390741532\n - type: nauc_ndcg_at_3_std\n value: 12.606077508789923\n - type: nauc_ndcg_at_5_diff1\n value: 13.831157482390935\n - type: nauc_ndcg_at_5_max\n value: 28.070226983968904\n - type: nauc_ndcg_at_5_std\n value: 15.236787943125435\n - type: nauc_precision_at_1000_diff1\n value: 0.016122957101357048\n - type: nauc_precision_at_1000_max\n value: 24.380929903557334\n - type: nauc_precision_at_1000_std\n value: 34.54045112720052\n - type: nauc_precision_at_100_diff1\n value: 7.255224788507301\n - type: nauc_precision_at_100_max\n value: 27.98453788447542\n - type: nauc_precision_at_100_std\n value: 35.38999555441665\n - type: nauc_precision_at_10_diff1\n value: 9.69185099834181\n - type: nauc_precision_at_10_max\n value: 32.532315522580454\n - type: nauc_precision_at_10_std\n value: 21.48948348473612\n - type: nauc_precision_at_1_diff1\n value: 19.43518811038007\n - type: nauc_precision_at_1_max\n value: 17.747482933395602\n - type: nauc_precision_at_1_std\n value: 8.410779775558684\n - type: nauc_precision_at_20_diff1\n value: 6.964076536695672\n - type: nauc_precision_at_20_max\n value: 29.30087236410044\n - type: nauc_precision_at_20_std\n value: 26.413625895571986\n - type: nauc_precision_at_3_diff1\n value: 14.145134359925155\n - type: nauc_precision_at_3_max\n value: 29.915650960808303\n - type: nauc_precision_at_3_std\n value: 14.095370019867797\n - type: nauc_precision_at_5_diff1\n value: 11.043933558522692\n - type: nauc_precision_at_5_max\n value: 30.93016505807111\n - type: nauc_precision_at_5_std\n value: 17.749256196062603\n - type: nauc_recall_at_1000_diff1\n value: -0.7776817772090345\n - type: nauc_recall_at_1000_max\n value: 23.094717340324518\n - type: nauc_recall_at_1000_std\n value: 37.189908681396425\n - type: nauc_recall_at_100_diff1\n value: 6.887748742013364\n - type: nauc_recall_at_100_max\n value: 27.00798435230277\n - type: nauc_recall_at_100_std\n value: 35.908147807345344\n - type: nauc_recall_at_10_diff1\n value: 9.605632017480751\n - type: nauc_recall_at_10_max\n value: 31.845202901168655\n - type: nauc_recall_at_10_std\n value: 21.497414586634683\n - type: nauc_recall_at_1_diff1\n value: 19.353069488987916\n - type: nauc_recall_at_1_max\n value: 17.093914951159693\n - type: nauc_recall_at_1_std\n value: 8.19886078055046\n - type: nauc_recall_at_20_diff1\n value: 6.927503731844782\n - type: nauc_recall_at_20_max\n value: 28.611698183338202\n - type: nauc_recall_at_20_std\n value: 26.69018660149911\n - type: nauc_recall_at_3_diff1\n value: 14.043724087062268\n - type: nauc_recall_at_3_max\n value: 29.269835821380465\n - type: nauc_recall_at_3_std\n value: 14.104419605998094\n - type: nauc_recall_at_5_diff1\n value: 11.017319452873336\n - type: nauc_recall_at_5_max\n value: 30.295720628306228\n - type: nauc_recall_at_5_std\n value: 17.758048545573825\n - type: ndcg_at_1\n value: 28.999999999999996\n - type: ndcg_at_10\n value: 25.041999999999998\n - type: ndcg_at_100\n value: 35.045\n - type: ndcg_at_1000\n value: 40.803\n - type: ndcg_at_20\n value: 28.584\n - type: ndcg_at_3\n value: 23.249\n - type: ndcg_at_5\n value: 20.533\n - type: precision_at_1\n value: 28.999999999999996\n - type: precision_at_10\n value: 13.120000000000001\n - type: precision_at_100\n value: 2.7470000000000003\n - type: precision_at_1000\n value: 0.41200000000000003\n - type: precision_at_20\n value: 8.584999999999999\n - type: precision_at_3\n value: 21.633\n - type: precision_at_5\n value: 18.099999999999998\n - type: recall_at_1\n value: 5.893000000000001\n - type: recall_at_10\n value: 26.567\n - type: recall_at_100\n value: 55.800000000000004\n - type: recall_at_1000\n value: 83.608\n - type: recall_at_20\n value: 34.86\n - type: recall_at_3\n value: 13.153\n - type: recall_at_5\n value: 18.323\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: 20a6d6f312dd54037fe07a32d58e5e168867909d\n metrics:\n - type: cosine_pearson\n value: 86.57284584320382\n - type: cosine_spearman\n value: 82.20531642680812\n - type: euclidean_pearson\n value: 83.94261758556554\n - type: euclidean_spearman\n value: 82.20721497738559\n - type: main_score\n value: 82.20531642680812\n - type: manhattan_pearson\n value: 84.15902154703083\n - type: manhattan_spearman\n value: 82.19506027155957\n - type: pearson\n value: 86.57284584320382\n - type: spearman\n value: 82.20531642680812\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cosine_pearson\n value: 86.28047602146931\n - type: cosine_spearman\n value: 79.51504881448884\n - type: euclidean_pearson\n value: 83.10545189967856\n - type: euclidean_spearman\n value: 79.50586960492797\n - type: main_score\n value: 79.51504881448884\n - type: manhattan_pearson\n value: 83.44244457500889\n - type: manhattan_spearman\n value: 79.730303339846\n - type: pearson\n value: 86.28047602146931\n - type: spearman\n value: 79.51504881448884\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cosine_pearson\n value: 88.74723553048702\n - type: cosine_spearman\n value: 89.18936052329725\n - type: euclidean_pearson\n value: 88.90400878928668\n - type: euclidean_spearman\n value: 89.19174821431281\n - type: main_score\n value: 89.18936052329725\n - type: manhattan_pearson\n value: 88.81504628424054\n - type: manhattan_spearman\n value: 89.18063294142597\n - type: pearson\n value: 88.74723553048702\n - type: spearman\n value: 89.18936052329725\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cosine_pearson\n value: 86.45403437836023\n - type: cosine_spearman\n value: 85.14654611519086\n - type: euclidean_pearson\n value: 85.87509624462743\n - type: euclidean_spearman\n value: 85.1391108856681\n - type: main_score\n value: 85.14654611519086\n - type: manhattan_pearson\n value: 85.96635794953866\n - type: manhattan_spearman\n value: 85.3271371527667\n - type: pearson\n value: 86.45403437836023\n - type: spearman\n value: 85.14654611519086\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cosine_pearson\n value: 87.84742260009705\n - type: cosine_spearman\n value: 89.10215217191254\n - type: euclidean_pearson\n value: 88.97393286325477\n - type: euclidean_spearman\n value: 89.1014105509662\n - type: main_score\n value: 89.10215217191254\n - type: manhattan_pearson\n value: 89.31698781090151\n - type: manhattan_spearman\n value: 89.53000001764433\n - type: pearson\n value: 87.84742260009705\n - type: spearman\n value: 89.10215217191254\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cosine_pearson\n value: 85.22397535461835\n - type: cosine_spearman\n value: 87.14066355879785\n - type: euclidean_pearson\n value: 86.31393364087295\n - type: euclidean_spearman\n value: 87.14018892702765\n - type: main_score\n value: 87.14066355879785\n - type: manhattan_pearson\n value: 86.36366855248434\n - type: manhattan_spearman\n value: 87.20858630423012\n - type: pearson\n value: 85.22397535461835\n - type: spearman\n value: 87.14066355879785\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: faeb762787bd10488a50c8b5be4a3b82e411949c\n metrics:\n - type: cosine_pearson\n value: 90.66131612061355\n - type: cosine_spearman\n value: 90.97082650129164\n - type: euclidean_pearson\n value: 90.98181906744969\n - type: euclidean_spearman\n value: 90.99008476850047\n - type: main_score\n value: 90.97082650129164\n - type: manhattan_pearson\n value: 90.75245040709021\n - type: manhattan_spearman\n value: 90.6199877691265\n - type: pearson\n value: 90.66131612061355\n - type: spearman\n value: 90.97082650129164\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3\n metrics:\n - type: cosine_pearson\n value: 67.270656447085\n - type: cosine_spearman\n value: 67.82870469746828\n - type: euclidean_pearson\n value: 69.03857775285664\n - type: euclidean_spearman\n value: 67.74455108773341\n - type: main_score\n value: 67.82870469746828\n - type: manhattan_pearson\n value: 69.25304172245812\n - type: manhattan_spearman\n value: 68.00987097916055\n - type: pearson\n value: 67.270656447085\n - type: spearman\n value: 67.82870469746828\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cosine_pearson\n value: 87.17245205384889\n - type: cosine_spearman\n value: 87.7360146030987\n - type: euclidean_pearson\n value: 87.48919412794656\n - type: euclidean_spearman\n value: 87.7312047878383\n - type: main_score\n value: 87.7360146030987\n - type: manhattan_pearson\n value: 87.61476224354806\n - type: manhattan_spearman\n value: 87.95220889254693\n - type: pearson\n value: 87.17245205384889\n - type: spearman\n value: 87.7360146030987\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: main_score\n value: 88.43547871921146\n - type: map\n value: 88.43547871921146\n - type: mrr\n value: 96.5564473652709\n - type: nAUC_map_diff1\n value: -13.66029392579231\n - type: nAUC_map_max\n value: 50.325613574053506\n - type: nAUC_map_std\n value: 60.02986231275796\n - type: nAUC_mrr_diff1\n value: 23.83821476411125\n - type: nAUC_mrr_max\n value: 86.72643311769906\n - type: nAUC_mrr_std\n value: 72.12741063469213\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: mteb/scifact\n config: default\n split: test\n revision: 0228b52cf27578f30900b9e5271d331663a030d7\n metrics:\n - type: main_score\n value: 78.233\n - type: map_at_1\n value: 61.49400000000001\n - type: map_at_10\n value: 73.30600000000001\n - type: map_at_100\n value: 73.719\n - type: map_at_1000\n value: 73.724\n - type: map_at_20\n value: 73.611\n - type: map_at_3\n value: 70.626\n - type: map_at_5\n value: 72.417\n - type: mrr_at_1\n value: 64.66666666666666\n - type: mrr_at_10\n value: 74.30357142857143\n - type: mrr_at_100\n value: 74.56950898079988\n - type: mrr_at_1000\n value: 74.57295833098681\n - type: mrr_at_20\n value: 74.46165223665226\n - type: mrr_at_3\n value: 72.3888888888889\n - type: mrr_at_5\n value: 73.60555555555557\n - type: nauc_map_at_1000_diff1\n value: 76.51524604780636\n - type: nauc_map_at_1000_max\n value: 53.48521938401881\n - type: nauc_map_at_1000_std\n value: -7.347799382158861\n - type: nauc_map_at_100_diff1\n value: 76.5122888096236\n - type: nauc_map_at_100_max\n value: 53.49221847471618\n - type: nauc_map_at_100_std\n value: -7.329683735681086\n - type: nauc_map_at_10_diff1\n value: 76.30928630674504\n - type: nauc_map_at_10_max\n value: 53.00102977185941\n - type: nauc_map_at_10_std\n value: -7.7467740085108705\n - type: nauc_map_at_1_diff1\n value: 79.54189281784247\n - type: nauc_map_at_1_max\n value: 46.630071622109526\n - type: nauc_map_at_1_std\n value: -14.395943134644112\n - type: nauc_map_at_20_diff1\n value: 76.41604361947962\n - type: nauc_map_at_20_max\n value: 53.578883876146875\n - type: nauc_map_at_20_std\n value: -7.403103451288041\n - type: nauc_map_at_3_diff1\n value: 76.25911617571941\n - type: nauc_map_at_3_max\n value: 49.140287380513605\n - type: nauc_map_at_3_std\n value: -11.35992449218983\n - type: nauc_map_at_5_diff1\n value: 76.35122077770336\n - type: nauc_map_at_5_max\n value: 52.1744367901208\n - type: nauc_map_at_5_std\n value: -7.85753955055384\n - type: nauc_mrr_at_1000_diff1\n value: 76.97223309515867\n - type: nauc_mrr_at_1000_max\n value: 57.263787498613326\n - type: nauc_mrr_at_1000_std\n value: -4.884090708840035\n - type: nauc_mrr_at_100_diff1\n value: 76.97312970894603\n - type: nauc_mrr_at_100_max\n value: 57.26850730446478\n - type: nauc_mrr_at_100_std\n value: -4.875200894216617\n - type: nauc_mrr_at_10_diff1\n value: 76.65927674223613\n - type: nauc_mrr_at_10_max\n value: 57.30979763941454\n - type: nauc_mrr_at_10_std\n value: -4.863331094022142\n - type: nauc_mrr_at_1_diff1\n value: 80.0454932568644\n - type: nauc_mrr_at_1_max\n value: 56.76038421319305\n - type: nauc_mrr_at_1_std\n value: -4.101939392632653\n - type: nauc_mrr_at_20_diff1\n value: 76.87237970440503\n - type: nauc_mrr_at_20_max\n value: 57.33843605225869\n - type: nauc_mrr_at_20_std\n value: -4.96248984417978\n - type: nauc_mrr_at_3_diff1\n value: 76.74130186666727\n - type: nauc_mrr_at_3_max\n value: 56.19313244846155\n - type: nauc_mrr_at_3_std\n value: -5.684365934009136\n - type: nauc_mrr_at_5_diff1\n value: 76.66406918799962\n - type: nauc_mrr_at_5_max\n value: 57.56110093228628\n - type: nauc_mrr_at_5_std\n value: -3.7464413085588073\n - type: nauc_ndcg_at_1000_diff1\n value: 76.19194173971773\n - type: nauc_ndcg_at_1000_max\n value: 55.57464600170693\n - type: nauc_ndcg_at_1000_std\n value: -6.0761689532372625\n - type: nauc_ndcg_at_100_diff1\n value: 76.14631273843654\n - type: nauc_ndcg_at_100_max\n value: 55.72246565373382\n - type: nauc_ndcg_at_100_std\n value: -5.595160698860595\n - type: nauc_ndcg_at_10_diff1\n value: 75.0108223611192\n - type: nauc_ndcg_at_10_max\n value: 55.27894212877493\n - type: nauc_ndcg_at_10_std\n value: -6.968331740214591\n - type: nauc_ndcg_at_1_diff1\n value: 80.0454932568644\n - type: nauc_ndcg_at_1_max\n value: 56.76038421319305\n - type: nauc_ndcg_at_1_std\n value: -4.101939392632653\n - type: nauc_ndcg_at_20_diff1\n value: 75.54887755702472\n - type: nauc_ndcg_at_20_max\n value: 56.406879417251496\n - type: nauc_ndcg_at_20_std\n value: -6.495231061329629\n - type: nauc_ndcg_at_3_diff1\n value: 75.03620356688509\n - type: nauc_ndcg_at_3_max\n value: 52.147381077773424\n - type: nauc_ndcg_at_3_std\n value: -8.448005688956199\n - type: nauc_ndcg_at_5_diff1\n value: 75.1195898074229\n - type: nauc_ndcg_at_5_max\n value: 54.2321033861173\n - type: nauc_ndcg_at_5_std\n value: -5.882690780895338\n - type: nauc_precision_at_1000_diff1\n value: -28.081979732100532\n - type: nauc_precision_at_1000_max\n value: 35.055348014832916\n - type: nauc_precision_at_1000_std\n value: 59.61280468927384\n - type: nauc_precision_at_100_diff1\n value: -25.112740730587458\n - type: nauc_precision_at_100_max\n value: 38.26331300116496\n - type: nauc_precision_at_100_std\n value: 62.46316222328831\n - type: nauc_precision_at_10_diff1\n value: -2.6766206473658833\n - type: nauc_precision_at_10_max\n value: 45.95321867204845\n - type: nauc_precision_at_10_std\n value: 45.07212468670564\n - type: nauc_precision_at_1_diff1\n value: 80.0454932568644\n - type: nauc_precision_at_1_max\n value: 56.76038421319305\n - type: nauc_precision_at_1_std\n value: -4.101939392632653\n - type: nauc_precision_at_20_diff1\n value: -10.698911116738385\n - type: nauc_precision_at_20_max\n value: 43.467275950182994\n - type: nauc_precision_at_20_std\n value: 48.00467321991766\n - type: nauc_precision_at_3_diff1\n value: 33.6344708541193\n - type: nauc_precision_at_3_max\n value: 49.309242331670504\n - type: nauc_precision_at_3_std\n value: 21.02940391379915\n - type: nauc_precision_at_5_diff1\n value: 13.560415600596318\n - type: nauc_precision_at_5_max\n value: 48.918726500100085\n - type: nauc_precision_at_5_std\n value: 39.940930429172184\n - type: nauc_recall_at_1000_diff1\n value: .nan\n - type: nauc_recall_at_1000_max\n value: .nan\n - type: nauc_recall_at_1000_std\n value: .nan\n - type: nauc_recall_at_100_diff1\n value: 70.82166199813196\n - type: nauc_recall_at_100_max\n value: 76.6106442577042\n - type: nauc_recall_at_100_std\n value: 66.47992530345513\n - type: nauc_recall_at_10_diff1\n value: 62.68908885556092\n - type: nauc_recall_at_10_max\n value: 58.14262437741839\n - type: nauc_recall_at_10_std\n value: -12.946717875063369\n - type: nauc_recall_at_1_diff1\n value: 79.54189281784247\n - type: nauc_recall_at_1_max\n value: 46.630071622109526\n - type: nauc_recall_at_1_std\n value: -14.395943134644112\n - type: nauc_recall_at_20_diff1\n value: 65.79470497876567\n - type: nauc_recall_at_20_max\n value: 71.68308183488456\n - type: nauc_recall_at_20_std\n value: -12.556850697268453\n - type: nauc_recall_at_3_diff1\n value: 68.3240211318129\n - type: nauc_recall_at_3_max\n value: 45.05998217275036\n - type: nauc_recall_at_3_std\n value: -14.23179772593869\n - type: nauc_recall_at_5_diff1\n value: 67.53366869904056\n - type: nauc_recall_at_5_max\n value: 53.57935627081027\n - type: nauc_recall_at_5_std\n value: -3.3271112904853393\n - type: ndcg_at_1\n value: 64.667\n - type: ndcg_at_10\n value: 78.233\n - type: ndcg_at_100\n value: 79.806\n - type: ndcg_at_1000\n value: 79.92099999999999\n - type: ndcg_at_20\n value: 79.006\n - type: ndcg_at_3\n value: 74.018\n - type: ndcg_at_5\n value: 76.334\n - type: precision_at_1\n value: 64.667\n - type: precision_at_10\n value: 10.4\n - type: precision_at_100\n value: 1.1199999999999999\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_20\n value: 5.383\n - type: precision_at_3\n value: 29.444\n - type: precision_at_5\n value: 19.467000000000002\n - type: recall_at_1\n value: 61.49400000000001\n - type: recall_at_10\n value: 92.156\n - type: recall_at_100\n value: 99.167\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_20\n value: 94.833\n - type: recall_at_3\n value: 80.833\n - type: recall_at_5\n value: 86.6\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cosine_accuracy\n value: 99.8039603960396\n - type: cosine_accuracy_threshold\n value: 84.54211950302124\n - type: cosine_ap\n value: 95.59056372734358\n - type: cosine_f1\n value: 90.1394422310757\n - type: cosine_f1_threshold\n value: 84.54211950302124\n - type: cosine_precision\n value: 89.78174603174604\n - type: cosine_recall\n value: 90.5\n - type: dot_accuracy\n value: 99.80594059405941\n - type: dot_accuracy_threshold\n value: 85.57180166244507\n - type: dot_ap\n value: 95.53453431914399\n - type: dot_f1\n value: 90.10442565887618\n - type: dot_f1_threshold\n value: 84.59715843200684\n - type: dot_precision\n value: 89.61424332344214\n - type: dot_recall\n value: 90.60000000000001\n - type: euclidean_accuracy\n value: 99.8039603960396\n - type: euclidean_accuracy_threshold\n value: 53.253382444381714\n - type: euclidean_ap\n value: 95.5850992402159\n - type: euclidean_f1\n value: 90.09457441513192\n - type: euclidean_f1_threshold\n value: 55.725520849227905\n - type: euclidean_precision\n value: 89.69276511397423\n - type: euclidean_recall\n value: 90.5\n - type: main_score\n value: 95.7485189884476\n - type: manhattan_accuracy\n value: 99.81485148514851\n - type: manhattan_accuracy_threshold\n value: 3491.29638671875\n - type: manhattan_ap\n value: 95.7485189884476\n - type: manhattan_f1\n value: 90.464048954615\n - type: manhattan_f1_threshold\n value: 3491.29638671875\n - type: manhattan_precision\n value: 92.2996878251821\n - type: manhattan_recall\n value: 88.7\n - type: max_ap\n value: 95.7485189884476\n - type: max_f1\n value: 90.464048954615\n - type: max_precision\n value: 92.2996878251821\n - type: max_recall\n value: 90.60000000000001\n - type: similarity_accuracy\n value: 99.8039603960396\n - type: similarity_accuracy_threshold\n value: 84.54211950302124\n - type: similarity_ap\n value: 95.59056372734358\n - type: similarity_f1\n value: 90.1394422310757\n - type: similarity_f1_threshold\n value: 84.54211950302124\n - type: similarity_precision\n value: 89.78174603174604\n - type: similarity_recall\n value: 90.5\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: main_score\n value: 78.49205191950675\n - type: v_measure\n value: 78.49205191950675\n - type: v_measure_std\n value: 2.84869550699959\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: main_score\n value: 48.90421736513028\n - type: v_measure\n value: 48.90421736513028\n - type: v_measure_std\n value: 1.6875865714471023\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: main_score\n value: 52.9874730481696\n - type: map\n value: 52.9874730481696\n - type: mrr\n value: 53.85867604617604\n - type: nAUC_map_diff1\n value: 39.633429293407616\n - type: nAUC_map_max\n value: 10.236807988858546\n - type: nAUC_map_std\n value: 10.276522217929674\n - type: nAUC_mrr_diff1\n value: 40.0543079218377\n - type: nAUC_mrr_max\n value: 10.96209807382042\n - type: nAUC_mrr_std\n value: 10.524400196109918\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cosine_pearson\n value: 30.727801109114232\n - type: cosine_spearman\n value: 31.66058223980157\n - type: dot_pearson\n value: 30.78818248622866\n - type: dot_spearman\n value: 31.525158776890265\n - type: main_score\n value: 31.66058223980157\n - type: pearson\n value: 30.727801109114232\n - type: spearman\n value: 31.66058223980157\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: mteb/trec-covid\n config: default\n split: test\n revision: bb9466bac8153a0349341eb1b22e06409e78ef4e\n metrics:\n - type: main_score\n value: 85.206\n - type: map_at_1\n value: 0.246\n - type: map_at_10\n value: 2.1950000000000003\n - type: map_at_100\n value: 14.179\n - type: map_at_1000\n value: 35.037\n - type: map_at_20\n value: 4.143\n - type: map_at_3\n value: 0.7100000000000001\n - type: map_at_5\n value: 1.135\n - type: mrr_at_1\n value: 94.0\n - type: mrr_at_10\n value: 96.66666666666666\n - type: mrr_at_100\n value: 96.66666666666666\n - type: mrr_at_1000\n value: 96.66666666666666\n - type: mrr_at_20\n value: 96.66666666666666\n - type: mrr_at_3\n value: 96.66666666666666\n - type: mrr_at_5\n value: 96.66666666666666\n - type: nauc_map_at_1000_diff1\n value: -4.6264497624527525\n - type: nauc_map_at_1000_max\n value: 44.594457564749355\n - type: nauc_map_at_1000_std\n value: 73.17642341400133\n - type: nauc_map_at_100_diff1\n value: 23.451335157405726\n - type: nauc_map_at_100_max\n value: 25.426398857299525\n - type: nauc_map_at_100_std\n value: 64.07416694472633\n - type: nauc_map_at_10_diff1\n value: 46.57568738568346\n - type: nauc_map_at_10_max\n value: 9.693233249079238\n - type: nauc_map_at_10_std\n value: 28.549530265164357\n - type: nauc_map_at_1_diff1\n value: 53.48238396620123\n - type: nauc_map_at_1_max\n value: 0.33476619393733076\n - type: nauc_map_at_1_std\n value: 8.906362219128463\n - type: nauc_map_at_20_diff1\n value: 39.40719602207749\n - type: nauc_map_at_20_max\n value: 9.635915072074045\n - type: nauc_map_at_20_std\n value: 35.15634791346394\n - type: nauc_map_at_3_diff1\n value: 53.11784737840137\n - type: nauc_map_at_3_max\n value: 3.059682761072153\n - type: nauc_map_at_3_std\n value: 21.310633086556617\n - type: nauc_map_at_5_diff1\n value: 49.91570701185436\n - type: nauc_map_at_5_max\n value: 8.045082896244576\n - type: nauc_map_at_5_std\n value: 20.597686235051647\n - type: nauc_mrr_at_1000_diff1\n value: 41.98412698412726\n - type: nauc_mrr_at_1000_max\n value: 78.24463118580779\n - type: nauc_mrr_at_1000_std\n value: 0.30812324930028195\n - type: nauc_mrr_at_100_diff1\n value: 41.98412698412726\n - type: nauc_mrr_at_100_max\n value: 78.24463118580779\n - type: nauc_mrr_at_100_std\n value: 0.30812324930028195\n - type: nauc_mrr_at_10_diff1\n value: 41.98412698412726\n - type: nauc_mrr_at_10_max\n value: 78.24463118580779\n - type: nauc_mrr_at_10_std\n value: 0.30812324930028195\n - type: nauc_mrr_at_1_diff1\n value: 38.62433862433873\n - type: nauc_mrr_at_1_max\n value: 80.78120136943666\n - type: nauc_mrr_at_1_std\n value: -10.768751945222197\n - type: nauc_mrr_at_20_diff1\n value: 41.98412698412726\n - type: nauc_mrr_at_20_max\n value: 78.24463118580779\n - type: nauc_mrr_at_20_std\n value: 0.30812324930028195\n - type: nauc_mrr_at_3_diff1\n value: 41.98412698412726\n - type: nauc_mrr_at_3_max\n value: 78.24463118580779\n - type: nauc_mrr_at_3_std\n value: 0.30812324930028195\n - type: nauc_mrr_at_5_diff1\n value: 41.98412698412726\n - type: nauc_mrr_at_5_max\n value: 78.24463118580779\n - type: nauc_mrr_at_5_std\n value: 0.30812324930028195\n - type: nauc_ndcg_at_1000_diff1\n value: 0.5174948602880207\n - type: nauc_ndcg_at_1000_max\n value: 48.60686602077053\n - type: nauc_ndcg_at_1000_std\n value: 75.72456343175277\n - type: nauc_ndcg_at_100_diff1\n value: -20.747252137999254\n - type: nauc_ndcg_at_100_max\n value: 49.985132618254994\n - type: nauc_ndcg_at_100_std\n value: 61.096383293836574\n - type: nauc_ndcg_at_10_diff1\n value: 6.791377920463332\n - type: nauc_ndcg_at_10_max\n value: 57.50019332833286\n - type: nauc_ndcg_at_10_std\n value: 49.201028841219426\n - type: nauc_ndcg_at_1_diff1\n value: 54.92683440362145\n - type: nauc_ndcg_at_1_max\n value: 83.8667228129276\n - type: nauc_ndcg_at_1_std\n value: 1.6738604063586122\n - type: nauc_ndcg_at_20_diff1\n value: -5.1948699196314925\n - type: nauc_ndcg_at_20_max\n value: 54.483087684806556\n - type: nauc_ndcg_at_20_std\n value: 50.54823818118781\n - type: nauc_ndcg_at_3_diff1\n value: 26.267246500164372\n - type: nauc_ndcg_at_3_max\n value: 63.0173212926611\n - type: nauc_ndcg_at_3_std\n value: 41.025597406368256\n - type: nauc_ndcg_at_5_diff1\n value: 16.910185454343036\n - type: nauc_ndcg_at_5_max\n value: 60.9328683868778\n - type: nauc_ndcg_at_5_std\n value: 36.70169905857712\n - type: nauc_precision_at_1000_diff1\n value: -46.374447765983525\n - type: nauc_precision_at_1000_max\n value: 35.36052337813863\n - type: nauc_precision_at_1000_std\n value: 14.219220668161018\n - type: nauc_precision_at_100_diff1\n value: -29.7838083657744\n - type: nauc_precision_at_100_max\n value: 43.93589400385112\n - type: nauc_precision_at_100_std\n value: 55.425045718579945\n - type: nauc_precision_at_10_diff1\n value: -12.016613405227687\n - type: nauc_precision_at_10_max\n value: 57.79924427743131\n - type: nauc_precision_at_10_std\n value: 49.022036703550675\n - type: nauc_precision_at_1_diff1\n value: 38.62433862433873\n - type: nauc_precision_at_1_max\n value: 80.78120136943666\n - type: nauc_precision_at_1_std\n value: -10.768751945222197\n - type: nauc_precision_at_20_diff1\n value: -23.95633847880195\n - type: nauc_precision_at_20_max\n value: 48.34715917258276\n - type: nauc_precision_at_20_std\n value: 48.82198285255887\n - type: nauc_precision_at_3_diff1\n value: 6.871296905858807\n - type: nauc_precision_at_3_max\n value: 70.54805793285054\n - type: nauc_precision_at_3_std\n value: 44.65108624094803\n - type: nauc_precision_at_5_diff1\n value: -9.074932448759695\n - type: nauc_precision_at_5_max\n value: 67.41284242437573\n - type: nauc_precision_at_5_std\n value: 23.876891983919577\n - type: nauc_recall_at_1000_diff1\n value: 8.142288830293255\n - type: nauc_recall_at_1000_max\n value: 38.85182826835104\n - type: nauc_recall_at_1000_std\n value: 68.60783819217335\n - type: nauc_recall_at_100_diff1\n value: 34.262914076287466\n - type: nauc_recall_at_100_max\n value: 12.87009658528838\n - type: nauc_recall_at_100_std\n value: 56.21330603762995\n - type: nauc_recall_at_10_diff1\n value: 49.33830945338758\n - type: nauc_recall_at_10_max\n value: 0.3539875530671406\n - type: nauc_recall_at_10_std\n value: 26.85864465557644\n - type: nauc_recall_at_1_diff1\n value: 53.48238396620123\n - type: nauc_recall_at_1_max\n value: 0.33476619393733076\n - type: nauc_recall_at_1_std\n value: 8.906362219128463\n - type: nauc_recall_at_20_diff1\n value: 44.21928181266254\n - type: nauc_recall_at_20_max\n value: -0.9198356057088594\n - type: nauc_recall_at_20_std\n value: 31.484376992896784\n - type: nauc_recall_at_3_diff1\n value: 53.038093080990876\n - type: nauc_recall_at_3_max\n value: -1.4170895916973003\n - type: nauc_recall_at_3_std\n value: 21.890202855574497\n - type: nauc_recall_at_5_diff1\n value: 49.39742214825278\n - type: nauc_recall_at_5_max\n value: 2.8412267611894517\n - type: nauc_recall_at_5_std\n value: 18.01598921859512\n - type: ndcg_at_1\n value: 91.0\n - type: ndcg_at_10\n value: 85.206\n - type: ndcg_at_100\n value: 67.29\n - type: ndcg_at_1000\n value: 60.584\n - type: ndcg_at_20\n value: 82.321\n - type: ndcg_at_3\n value: 88.642\n - type: ndcg_at_5\n value: 87.063\n - type: precision_at_1\n value: 94.0\n - type: precision_at_10\n value: 89.8\n - type: precision_at_100\n value: 69.78\n - type: precision_at_1000\n value: 26.738\n - type: precision_at_20\n value: 87.2\n - type: precision_at_3\n value: 92.0\n - type: precision_at_5\n value: 90.8\n - type: recall_at_1\n value: 0.246\n - type: recall_at_10\n value: 2.344\n - type: recall_at_100\n value: 16.962\n - type: recall_at_1000\n value: 57.325\n - type: recall_at_20\n value: 4.517\n - type: recall_at_3\n value: 0.731\n - type: recall_at_5\n value: 1.1780000000000002\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: mteb/touche2020\n config: default\n split: test\n revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f\n metrics:\n - type: main_score\n value: 31.455\n - type: map_at_1\n value: 2.9739999999999998\n - type: map_at_10\n value: 12.183\n - type: map_at_100\n value: 18.772\n - type: map_at_1000\n value: 20.415\n - type: map_at_20\n value: 14.451\n - type: map_at_3\n value: 6.507000000000001\n - type: map_at_5\n value: 8.66\n - type: mrr_at_1\n value: 40.816326530612244\n - type: mrr_at_10\n value: 57.70975056689341\n - type: mrr_at_100\n value: 58.18379126542391\n - type: mrr_at_1000\n value: 58.18379126542391\n - type: mrr_at_20\n value: 57.85552316164561\n - type: mrr_at_3\n value: 54.08163265306123\n - type: mrr_at_5\n value: 56.42857142857143\n - type: nauc_map_at_1000_diff1\n value: 3.1567471051481437\n - type: nauc_map_at_1000_max\n value: -1.5882060729791523\n - type: nauc_map_at_1000_std\n value: 18.69622198722074\n - type: nauc_map_at_100_diff1\n value: 3.3449677678147536\n - type: nauc_map_at_100_max\n value: -2.8928606866168405\n - type: nauc_map_at_100_std\n value: 15.789984947653412\n - type: nauc_map_at_10_diff1\n value: 2.9696743570444264\n - type: nauc_map_at_10_max\n value: -9.096749212011876\n - type: nauc_map_at_10_std\n value: -5.38545817258353\n - type: nauc_map_at_1_diff1\n value: 20.680780404542546\n - type: nauc_map_at_1_max\n value: -7.04722927447817\n - type: nauc_map_at_1_std\n value: -7.062494733973898\n - type: nauc_map_at_20_diff1\n value: 4.070437790119271\n - type: nauc_map_at_20_max\n value: -4.84491434686032\n - type: nauc_map_at_20_std\n value: 0.5846341109021014\n - type: nauc_map_at_3_diff1\n value: 11.9634978045925\n - type: nauc_map_at_3_max\n value: -8.27834591046608\n - type: nauc_map_at_3_std\n value: -8.687615453381065\n - type: nauc_map_at_5_diff1\n value: 0.9195191526009436\n - type: nauc_map_at_5_max\n value: -1.673813362719489\n - type: nauc_map_at_5_std\n value: -6.67549753473631\n - type: nauc_mrr_at_1000_diff1\n value: 19.877993208719573\n - type: nauc_mrr_at_1000_max\n value: -10.37776706406218\n - type: nauc_mrr_at_1000_std\n value: 7.132169578056367\n - type: nauc_mrr_at_100_diff1\n value: 19.877993208719573\n - type: nauc_mrr_at_100_max\n value: -10.37776706406218\n - type: nauc_mrr_at_100_std\n value: 7.132169578056367\n - type: nauc_mrr_at_10_diff1\n value: 20.414285568401457\n - type: nauc_mrr_at_10_max\n value: -9.677800295687861\n - type: nauc_mrr_at_10_std\n value: 8.001103690180859\n - type: nauc_mrr_at_1_diff1\n value: 22.393284073955723\n - type: nauc_mrr_at_1_max\n value: -5.889370191243167\n - type: nauc_mrr_at_1_std\n value: -1.5183536173658247\n - type: nauc_mrr_at_20_diff1\n value: 20.455564720604055\n - type: nauc_mrr_at_20_max\n value: -10.230642830103074\n - type: nauc_mrr_at_20_std\n value: 7.863582453266621\n - type: nauc_mrr_at_3_diff1\n value: 17.554895390732618\n - type: nauc_mrr_at_3_max\n value: -15.618463505555052\n - type: nauc_mrr_at_3_std\n value: 5.913231577966864\n - type: nauc_mrr_at_5_diff1\n value: 18.393678507779914\n - type: nauc_mrr_at_5_max\n value: -11.903593353147762\n - type: nauc_mrr_at_5_std\n value: 7.580745996262831\n - type: nauc_ndcg_at_1000_diff1\n value: 13.746937095530473\n - type: nauc_ndcg_at_1000_max\n value: -0.9319249687895838\n - type: nauc_ndcg_at_1000_std\n value: 38.56328031451904\n - type: nauc_ndcg_at_100_diff1\n value: 13.854865944415895\n - type: nauc_ndcg_at_100_max\n value: -7.142142012591404\n - type: nauc_ndcg_at_100_std\n value: 35.61341954818848\n - type: nauc_ndcg_at_10_diff1\n value: 9.010144273248759\n - type: nauc_ndcg_at_10_max\n value: -15.320014897424574\n - type: nauc_ndcg_at_10_std\n value: 2.84883880489144\n - type: nauc_ndcg_at_1_diff1\n value: 20.939533945592967\n - type: nauc_ndcg_at_1_max\n value: -6.387319972188946\n - type: nauc_ndcg_at_1_std\n value: -0.5258673122126726\n - type: nauc_ndcg_at_20_diff1\n value: 14.660827309009496\n - type: nauc_ndcg_at_20_max\n value: -13.476196120145994\n - type: nauc_ndcg_at_20_std\n value: 8.22391881710838\n - type: nauc_ndcg_at_3_diff1\n value: 13.429985227235935\n - type: nauc_ndcg_at_3_max\n value: -14.904544592570247\n - type: nauc_ndcg_at_3_std\n value: 1.599779998183342\n - type: nauc_ndcg_at_5_diff1\n value: 8.085466231900622\n - type: nauc_ndcg_at_5_max\n value: -9.09591969526831\n - type: nauc_ndcg_at_5_std\n value: 3.5794092637248505\n - type: nauc_precision_at_1000_diff1\n value: -9.31941215946743\n - type: nauc_precision_at_1000_max\n value: 31.52913520470716\n - type: nauc_precision_at_1000_std\n value: 22.720784312185856\n - type: nauc_precision_at_100_diff1\n value: 8.958548406995279\n - type: nauc_precision_at_100_max\n value: 15.100597910674104\n - type: nauc_precision_at_100_std\n value: 71.04548238175113\n - type: nauc_precision_at_10_diff1\n value: 12.4698194690008\n - type: nauc_precision_at_10_max\n value: -15.84870544871496\n - type: nauc_precision_at_10_std\n value: 7.575297622501928\n - type: nauc_precision_at_1_diff1\n value: 22.393284073955723\n - type: nauc_precision_at_1_max\n value: -5.889370191243167\n - type: nauc_precision_at_1_std\n value: -1.5183536173658247\n - type: nauc_precision_at_20_diff1\n value: 15.393505718138758\n - type: nauc_precision_at_20_max\n value: -3.70684298539384\n - type: nauc_precision_at_20_std\n value: 29.426137824970304\n - type: nauc_precision_at_3_diff1\n value: 9.997768085465394\n - type: nauc_precision_at_3_max\n value: -17.12224314347674\n - type: nauc_precision_at_3_std\n value: -1.343018166772313\n - type: nauc_precision_at_5_diff1\n value: 3.8936997437913554\n - type: nauc_precision_at_5_max\n value: -5.689104289687632\n - type: nauc_precision_at_5_std\n value: 3.181098051304285\n - type: nauc_recall_at_1000_diff1\n value: 9.908303508158387\n - type: nauc_recall_at_1000_max\n value: 6.174506592699848\n - type: nauc_recall_at_1000_std\n value: 77.41931114780012\n - type: nauc_recall_at_100_diff1\n value: 10.286839241876192\n - type: nauc_recall_at_100_max\n value: -6.6138697026666815\n - type: nauc_recall_at_100_std\n value: 49.608313692633224\n - type: nauc_recall_at_10_diff1\n value: 2.215545846659851\n - type: nauc_recall_at_10_max\n value: -17.83025802478445\n - type: nauc_recall_at_10_std\n value: -3.3784768673705465\n - type: nauc_recall_at_1_diff1\n value: 20.680780404542546\n - type: nauc_recall_at_1_max\n value: -7.04722927447817\n - type: nauc_recall_at_1_std\n value: -7.062494733973898\n - type: nauc_recall_at_20_diff1\n value: 6.974410239251615\n - type: nauc_recall_at_20_max\n value: -14.161147924731646\n - type: nauc_recall_at_20_std\n value: 9.328412057721454\n - type: nauc_recall_at_3_diff1\n value: 7.904589805754212\n - type: nauc_recall_at_3_max\n value: -12.1912388648593\n - type: nauc_recall_at_3_std\n value: -9.221542013385555\n - type: nauc_recall_at_5_diff1\n value: -3.2604132752706914\n - type: nauc_recall_at_5_max\n value: -6.886351441658915\n - type: nauc_recall_at_5_std\n value: -7.014252851712789\n - type: ndcg_at_1\n value: 39.796\n - type: ndcg_at_10\n value: 31.455\n - type: ndcg_at_100\n value: 42.388999999999996\n - type: ndcg_at_1000\n value: 53.556000000000004\n - type: ndcg_at_20\n value: 30.808000000000003\n - type: ndcg_at_3\n value: 35.831\n - type: ndcg_at_5\n value: 32.845\n - type: precision_at_1\n value: 40.816\n - type: precision_at_10\n value: 27.143\n - type: precision_at_100\n value: 8.449\n - type: precision_at_1000\n value: 1.6179999999999999\n - type: precision_at_20\n value: 19.387999999999998\n - type: precision_at_3\n value: 35.374\n - type: precision_at_5\n value: 31.019999999999996\n - type: recall_at_1\n value: 2.9739999999999998\n - type: recall_at_10\n value: 19.39\n - type: recall_at_100\n value: 51.636\n - type: recall_at_1000\n value: 86.99900000000001\n - type: recall_at_20\n value: 26.478\n - type: recall_at_3\n value: 7.703\n - type: recall_at_5\n value: 11.42\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de\n metrics:\n - type: accuracy\n value: 86.9384765625\n - type: ap\n value: 31.737513704141552\n - type: ap_weighted\n value: 31.737513704141552\n - type: f1\n value: 71.5490757306975\n - type: f1_weighted\n value: 89.14632533489856\n - type: main_score\n value: 86.9384765625\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 73.57668364459535\n - type: f1\n value: 73.90467103648074\n - type: f1_weighted\n value: 73.42158415034704\n - type: main_score\n value: 73.57668364459535\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: main_score\n value: 58.574148097494685\n - type: v_measure\n value: 58.574148097494685\n - type: v_measure_std\n value: 0.9443161637490822\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cosine_accuracy\n value: 88.1385229778864\n - type: cosine_accuracy_threshold\n value: 83.86307954788208\n - type: cosine_ap\n value: 80.17965893449055\n - type: cosine_f1\n value: 73.0614300100705\n - type: cosine_f1_threshold\n value: 80.7942807674408\n - type: cosine_precision\n value: 69.8603755416466\n - type: cosine_recall\n value: 76.56992084432717\n - type: dot_accuracy\n value: 88.2100494724921\n - type: dot_accuracy_threshold\n value: 83.84793996810913\n - type: dot_ap\n value: 80.18603932881858\n - type: dot_f1\n value: 73.07643714466204\n - type: dot_f1_threshold\n value: 80.87586164474487\n - type: dot_precision\n value: 70.10909090909091\n - type: dot_recall\n value: 76.3060686015831\n - type: euclidean_accuracy\n value: 88.1385229778864\n - type: euclidean_accuracy_threshold\n value: 56.77661895751953\n - type: euclidean_ap\n value: 80.1784070881624\n - type: euclidean_f1\n value: 73.04830369529574\n - type: euclidean_f1_threshold\n value: 61.91838979721069\n - type: euclidean_precision\n value: 69.96859144720948\n - type: euclidean_recall\n value: 76.41160949868075\n - type: main_score\n value: 80.18603932881858\n - type: manhattan_accuracy\n value: 88.0431543184121\n - type: manhattan_accuracy_threshold\n value: 3755.6137084960938\n - type: manhattan_ap\n value: 79.98270453664578\n - type: manhattan_f1\n value: 72.68242015061023\n - type: manhattan_f1_threshold\n value: 3892.494583129883\n - type: manhattan_precision\n value: 71.54907975460122\n - type: manhattan_recall\n value: 73.85224274406332\n - type: max_ap\n value: 80.18603932881858\n - type: max_f1\n value: 73.07643714466204\n - type: max_precision\n value: 71.54907975460122\n - type: max_recall\n value: 76.56992084432717\n - type: similarity_accuracy\n value: 88.1385229778864\n - type: similarity_accuracy_threshold\n value: 83.86307954788208\n - type: similarity_ap\n value: 80.17965893449055\n - type: similarity_f1\n value: 73.0614300100705\n - type: similarity_f1_threshold\n value: 80.7942807674408\n - type: similarity_precision\n value: 69.8603755416466\n - type: similarity_recall\n value: 76.56992084432717\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cosine_accuracy\n value: 89.7892653393876\n - type: cosine_accuracy_threshold\n value: 79.69566583633423\n - type: cosine_ap\n value: 87.4579867302024\n - type: cosine_f1\n value: 79.91620843152658\n - type: cosine_f1_threshold\n value: 78.53609323501587\n - type: cosine_precision\n value: 77.7155329210622\n - type: cosine_recall\n value: 82.24514936864799\n - type: dot_accuracy\n value: 89.78732487289945\n - type: dot_accuracy_threshold\n value: 80.05315661430359\n - type: dot_ap\n value: 87.44916182456272\n - type: dot_f1\n value: 79.90419878751591\n - type: dot_f1_threshold\n value: 78.57890725135803\n - type: dot_precision\n value: 77.73409057812728\n - type: dot_recall\n value: 82.19895287958116\n - type: euclidean_accuracy\n value: 89.78538440641131\n - type: euclidean_accuracy_threshold\n value: 62.29925751686096\n - type: euclidean_ap\n value: 87.45904868911386\n - type: euclidean_f1\n value: 79.93127404474657\n - type: euclidean_f1_threshold\n value: 65.61101078987122\n - type: euclidean_precision\n value: 77.62060210373595\n - type: euclidean_recall\n value: 82.38373883584848\n - type: main_score\n value: 87.46554314325058\n - type: manhattan_accuracy\n value: 89.76597974152986\n - type: manhattan_accuracy_threshold\n value: 3988.5299682617188\n - type: manhattan_ap\n value: 87.46554314325058\n - type: manhattan_f1\n value: 79.97181740645973\n - type: manhattan_f1_threshold\n value: 4235.905838012695\n - type: manhattan_precision\n value: 77.13713427283783\n - type: manhattan_recall\n value: 83.02279026793964\n - type: max_ap\n value: 87.46554314325058\n - type: max_f1\n value: 79.97181740645973\n - type: max_precision\n value: 77.73409057812728\n - type: max_recall\n value: 83.02279026793964\n - type: similarity_accuracy\n value: 89.7892653393876\n - type: similarity_accuracy_threshold\n value: 79.69566583633423\n - type: similarity_ap\n value: 87.4579867302024\n - type: similarity_f1\n value: 79.91620843152658\n - type: similarity_f1_threshold\n value: 78.53609323501587\n - type: similarity_precision\n value: 77.7155329210622\n - type: similarity_recall\n value: 82.24514936864799\n---\n\n\n\n# Updates\n\nNew open-source models and ToDoList will be listed on https://github.com/DunZhang/Stella/blob/main/news_and_todo.md.\n\nYou can also find these models on my [homepage](https://huggingface.co/infgrad).\n\n# Introduction\n\nThe models are trained based on `Alibaba-NLP/gte-large-en-v1.5` and `Alibaba-NLP/gte-Qwen2-1.5B-instruct`. Thanks for\ntheir contributions!\n\n**We simplify usage of prompts, providing two prompts for most general tasks, one is for s2p, another one is for s2s.**\n\nPrompt of s2p task(e.g. retrieve task):\n\n```text\nInstruct: Given a web search query, retrieve relevant passages that answer the query.\\nQuery: {query}\n```\n\nPrompt of s2s task(e.g. semantic textual similarity task):\n\n```text\nInstruct: Retrieve semantically similar text.\\nQuery: {query}\n```\n\nThe models are finally trained by [MRL](https://arxiv.org/abs/2205.13147), so they have multiple dimensions: 512, 768,\n1024, 2048, 4096, 6144 and 8192.\n\nThe higher the dimension, the better the performance.\n**Generally speaking, 1024d is good enough.** The MTEB score of 1024d is only 0.001 lower than 8192d.\n\n# Model directory structure\n\nThe model directory structure is very simple, it is a standard SentenceTransformer directory **with a series\nof `2_Dense_{dims}`\nfolders**, where `dims` represents the final vector dimension.\n\nFor example, the `2_Dense_256` folder stores Linear weights that convert vector dimensions to 256 dimensions.\nPlease refer to the following chapters for specific instructions on how to use them.\n\n# Usage\n\nYou can use `SentenceTransformers` or `transformers` library to encode text.\n\n## Sentence Transformers\n\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# This model supports two prompts: \"s2p_query\" and \"s2s_query\" for sentence-to-passage and sentence-to-sentence tasks, respectively.\n# They are defined in `config_sentence_transformers.json`\nquery_prompt_name = \"s2p_query\"\nqueries = [\n \"What are some ways to reduce stress?\",\n \"What are the benefits of drinking green tea?\",\n]\n# docs do not need any prompts\ndocs = [\n \"There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent stress from building up.\",\n \"Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties.\",\n]\n\n# !The default dimension is 1024, if you need other dimensions, please clone the model and modify `modules.json` to replace `2_Dense_1024` with another dimension, e.g. `2_Dense_256` or `2_Dense_8192` !\n# on gpu\nmodel = SentenceTransformer(\"dunzhang/stella_en_400M_v5\", trust_remote_code=True).cuda()\n# you can also use this model without the features of `use_memory_efficient_attention` and `unpad_inputs`. It can be worked in CPU.\n# model = SentenceTransformer(\n# \"dunzhang/stella_en_400M_v5\",\n# trust_remote_code=True,\n# device=\"cpu\",\n# config_kwargs={\"use_memory_efficient_attention\": False, \"unpad_inputs\": False}\n# )\nquery_embeddings = model.encode(queries, prompt_name=query_prompt_name)\ndoc_embeddings = model.encode(docs)\nprint(query_embeddings.shape, doc_embeddings.shape)\n# (2, 1024) (2, 1024)\n\nsimilarities = model.similarity(query_embeddings, doc_embeddings)\nprint(similarities)\n# tensor([[0.8398, 0.2990],\n# [0.3282, 0.8095]])\n```\n\n## Transformers\n\n```python\nimport os\nimport torch\nfrom transformers import AutoModel, AutoTokenizer\nfrom sklearn.preprocessing import normalize\n\nquery_prompt = \"Instruct: Given a web search query, retrieve relevant passages that answer the query.\\nQuery: \"\nqueries = [\n \"What are some ways to reduce stress?\",\n \"What are the benefits of drinking green tea?\",\n]\nqueries = [query_prompt + query for query in queries]\n# docs do not need any prompts\ndocs = [\n \"There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent stress from building up.\",\n \"Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties.\",\n]\n\n# The path of your model after cloning it\nmodel_dir = \"{Your MODEL_PATH}\"\n\nvector_dim = 1024\nvector_linear_directory = f\"2_Dense_{vector_dim}\"\nmodel = AutoModel.from_pretrained(model_dir, trust_remote_code=True).cuda().eval()\n# you can also use this model without the features of `use_memory_efficient_attention` and `unpad_inputs`. It can be worked in CPU.\n# model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,use_memory_efficient_attention=False,unpad_inputs=False).cuda().eval()\ntokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True)\nvector_linear = torch.nn.Linear(in_features=model.config.hidden_size, out_features=vector_dim)\nvector_linear_dict = {\n k.replace(\"linear.\", \"\"): v for k, v in\n torch.load(os.path.join(model_dir, f\"{vector_linear_directory}/pytorch_model.bin\")).items()\n}\nvector_linear.load_state_dict(vector_linear_dict)\nvector_linear.cuda()\n\n# Embed the queries\nwith torch.no_grad():\n input_data = tokenizer(queries, padding=\"longest\", truncation=True, max_length=512, return_tensors=\"pt\")\n input_data = {k: v.cuda() for k, v in input_data.items()}\n attention_mask = input_data[\"attention_mask\"]\n last_hidden_state = model(**input_data)[0]\n last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0)\n query_vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]\n query_vectors = normalize(vector_linear(query_vectors).cpu().numpy())\n\n# Embed the documents\nwith torch.no_grad():\n input_data = tokenizer(docs, padding=\"longest\", truncation=True, max_length=512, return_tensors=\"pt\")\n input_data = {k: v.cuda() for k, v in input_data.items()}\n attention_mask = input_data[\"attention_mask\"]\n last_hidden_state = model(**input_data)[0]\n last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0)\n docs_vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None]\n docs_vectors = normalize(vector_linear(docs_vectors).cpu().numpy())\n\nprint(query_vectors.shape, docs_vectors.shape)\n# (2, 1024) (2, 1024)\n\nsimilarities = query_vectors @ docs_vectors.T\nprint(similarities)\n# [[0.8397531 0.29900077]\n# [0.32818374 0.80954516]]\n```\n\n# FAQ\n\nQ: The details of training?\n\nA: The training method and datasets will be released in the future. (specific time unknown, may be provided in a paper)\n\nQ: How to choose a suitable prompt for my own task?\n\nA: In most cases, please use the s2p and s2s prompts. These two prompts account for the vast majority of the training\ndata.\n\nQ: How to reproduce MTEB results?\n\nA: Please use evaluation scripts in `Alibaba-NLP/gte-Qwen2-1.5B-instruct` or `intfloat/e5-mistral-7b-instruct`\n\nQ: Why each dimension has a linear weight?\n\nA: MRL has multiple training methods, we choose this method which has the best performance.\n\nQ: What is the sequence length of models?\n\nA: 512 is recommended, in our experiments, almost all models perform poorly on specialized long text retrieval datasets. Besides, the\nmodel is trained on datasets of 512 length. This may be an optimization term.\n\nIf you have any questions, please start a discussion on community."},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2499,"cells":{"id":{"kind":"string","value":"pingkeest/learning2_model"},"author":{"kind":"string","value":"pingkeest"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["transformers","safetensors","new","feature-extraction","sentence-transformers","gte","mteb","transformers.js","sentence-similarity","custom_code","en","dataset:allenai/c4","arxiv:2407.19669","arxiv:2308.03281","license:apache-2.0","model-index","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"new\",\n \"feature-extraction\",\n \"sentence-transformers\",\n \"gte\",\n \"mteb\",\n \"transformers.js\",\n \"sentence-similarity\",\n \"custom_code\",\n \"en\",\n \"dataset:allenai/c4\",\n \"arxiv:2407.19669\",\n \"arxiv:2308.03281\",\n \"license:apache-2.0\",\n \"model-index\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-25T09:58:09Z","string":"2024-10-25T09:58:09Z"},"last_modified":{"kind":"string","value":"2024-10-25T10:00:18+00:00"},"downloads":{"kind":"number","value":18,"string":"18"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\r\ndatasets:\r\n- allenai/c4\r\nlanguage:\r\n- en\r\nlibrary_name: transformers\r\nlicense: apache-2.0\r\ntags:\r\n- sentence-transformers\r\n- gte\r\n- mteb\r\n- transformers.js\r\n- sentence-similarity\r\nmodel-index:\r\n- name: gte-large-en-v1.5\r\n results:\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB AmazonCounterfactualClassification (en)\r\n type: mteb/amazon_counterfactual\r\n config: en\r\n split: test\r\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\r\n metrics:\r\n - type: accuracy\r\n value: 73.01492537313432\r\n - type: ap\r\n value: 35.05341696659522\r\n - type: f1\r\n value: 66.71270310883853\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB AmazonPolarityClassification\r\n type: mteb/amazon_polarity\r\n config: default\r\n split: test\r\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\r\n metrics:\r\n - type: accuracy\r\n value: 93.97189999999999\r\n - type: ap\r\n value: 90.5952493948908\r\n - type: f1\r\n value: 93.95848137716877\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB AmazonReviewsClassification (en)\r\n type: mteb/amazon_reviews_multi\r\n config: en\r\n split: test\r\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\r\n metrics:\r\n - type: accuracy\r\n value: 54.196\r\n - type: f1\r\n value: 53.80122334012787\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB ArguAna\r\n type: mteb/arguana\r\n config: default\r\n split: test\r\n revision: c22ab2a51041ffd869aaddef7af8d8215647e41a\r\n metrics:\r\n - type: map_at_1\r\n value: 47.297\r\n - type: map_at_10\r\n value: 64.303\r\n - type: map_at_100\r\n value: 64.541\r\n - type: map_at_1000\r\n value: 64.541\r\n - type: map_at_3\r\n value: 60.728\r\n - type: map_at_5\r\n value: 63.114000000000004\r\n - type: mrr_at_1\r\n value: 48.435\r\n - type: mrr_at_10\r\n value: 64.657\r\n - type: mrr_at_100\r\n value: 64.901\r\n - type: mrr_at_1000\r\n value: 64.901\r\n - type: mrr_at_3\r\n value: 61.06\r\n - type: mrr_at_5\r\n value: 63.514\r\n - type: ndcg_at_1\r\n value: 47.297\r\n - type: ndcg_at_10\r\n value: 72.107\r\n - type: ndcg_at_100\r\n value: 72.963\r\n - type: ndcg_at_1000\r\n value: 72.963\r\n - type: ndcg_at_3\r\n value: 65.063\r\n - type: ndcg_at_5\r\n value: 69.352\r\n - type: precision_at_1\r\n value: 47.297\r\n - type: precision_at_10\r\n value: 9.623\r\n - type: precision_at_100\r\n value: 0.996\r\n - type: precision_at_1000\r\n value: 0.1\r\n - type: precision_at_3\r\n value: 25.865\r\n - type: precision_at_5\r\n value: 17.596\r\n - type: recall_at_1\r\n value: 47.297\r\n - type: recall_at_10\r\n value: 96.23\r\n - type: recall_at_100\r\n value: 99.644\r\n - type: recall_at_1000\r\n value: 99.644\r\n - type: recall_at_3\r\n value: 77.596\r\n - type: recall_at_5\r\n value: 87.98\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB ArxivClusteringP2P\r\n type: mteb/arxiv-clustering-p2p\r\n config: default\r\n split: test\r\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\r\n metrics:\r\n - type: v_measure\r\n value: 48.467787861077475\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB ArxivClusteringS2S\r\n type: mteb/arxiv-clustering-s2s\r\n config: default\r\n split: test\r\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\r\n metrics:\r\n - type: v_measure\r\n value: 43.39198391914257\r\n - task:\r\n type: Reranking\r\n dataset:\r\n name: MTEB AskUbuntuDupQuestions\r\n type: mteb/askubuntudupquestions-reranking\r\n config: default\r\n split: test\r\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\r\n metrics:\r\n - type: map\r\n value: 63.12794820591384\r\n - type: mrr\r\n value: 75.9331442641692\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB BIOSSES\r\n type: mteb/biosses-sts\r\n config: default\r\n split: test\r\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 87.85062993863319\r\n - type: cos_sim_spearman\r\n value: 85.39049989733459\r\n - type: euclidean_pearson\r\n value: 86.00222680278333\r\n - type: euclidean_spearman\r\n value: 85.45556162077396\r\n - type: manhattan_pearson\r\n value: 85.88769871785621\r\n - type: manhattan_spearman\r\n value: 85.11760211290839\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB Banking77Classification\r\n type: mteb/banking77\r\n config: default\r\n split: test\r\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\r\n metrics:\r\n - type: accuracy\r\n value: 87.32792207792208\r\n - type: f1\r\n value: 87.29132945999555\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB BiorxivClusteringP2P\r\n type: mteb/biorxiv-clustering-p2p\r\n config: default\r\n split: test\r\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\r\n metrics:\r\n - type: v_measure\r\n value: 40.5779328301945\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB BiorxivClusteringS2S\r\n type: mteb/biorxiv-clustering-s2s\r\n config: default\r\n split: test\r\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\r\n metrics:\r\n - type: v_measure\r\n value: 37.94425623865118\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackAndroidRetrieval\r\n type: mteb/cqadupstack-android\r\n config: default\r\n split: test\r\n revision: f46a197baaae43b4f621051089b82a364682dfeb\r\n metrics:\r\n - type: map_at_1\r\n value: 32.978\r\n - type: map_at_10\r\n value: 44.45\r\n - type: map_at_100\r\n value: 46.19\r\n - type: map_at_1000\r\n value: 46.303\r\n - type: map_at_3\r\n value: 40.849000000000004\r\n - type: map_at_5\r\n value: 42.55\r\n - type: mrr_at_1\r\n value: 40.629\r\n - type: mrr_at_10\r\n value: 50.848000000000006\r\n - type: mrr_at_100\r\n value: 51.669\r\n - type: mrr_at_1000\r\n value: 51.705\r\n - type: mrr_at_3\r\n value: 47.997\r\n - type: mrr_at_5\r\n value: 49.506\r\n - type: ndcg_at_1\r\n value: 40.629\r\n - type: ndcg_at_10\r\n value: 51.102000000000004\r\n - type: ndcg_at_100\r\n value: 57.159000000000006\r\n - type: ndcg_at_1000\r\n value: 58.669000000000004\r\n - type: ndcg_at_3\r\n value: 45.738\r\n - type: ndcg_at_5\r\n value: 47.632999999999996\r\n - type: precision_at_1\r\n value: 40.629\r\n - type: precision_at_10\r\n value: 9.700000000000001\r\n - type: precision_at_100\r\n value: 1.5970000000000002\r\n - type: precision_at_1000\r\n value: 0.202\r\n - type: precision_at_3\r\n value: 21.698\r\n - type: precision_at_5\r\n value: 15.393\r\n - type: recall_at_1\r\n value: 32.978\r\n - type: recall_at_10\r\n value: 63.711\r\n - type: recall_at_100\r\n value: 88.39399999999999\r\n - type: recall_at_1000\r\n value: 97.513\r\n - type: recall_at_3\r\n value: 48.025\r\n - type: recall_at_5\r\n value: 53.52\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackEnglishRetrieval\r\n type: mteb/cqadupstack-english\r\n config: default\r\n split: test\r\n revision: ad9991cb51e31e31e430383c75ffb2885547b5f0\r\n metrics:\r\n - type: map_at_1\r\n value: 30.767\r\n - type: map_at_10\r\n value: 42.195\r\n - type: map_at_100\r\n value: 43.541999999999994\r\n - type: map_at_1000\r\n value: 43.673\r\n - type: map_at_3\r\n value: 38.561\r\n - type: map_at_5\r\n value: 40.532000000000004\r\n - type: mrr_at_1\r\n value: 38.79\r\n - type: mrr_at_10\r\n value: 48.021\r\n - type: mrr_at_100\r\n value: 48.735\r\n - type: mrr_at_1000\r\n value: 48.776\r\n - type: mrr_at_3\r\n value: 45.594\r\n - type: mrr_at_5\r\n value: 46.986\r\n - type: ndcg_at_1\r\n value: 38.79\r\n - type: ndcg_at_10\r\n value: 48.468\r\n - type: ndcg_at_100\r\n value: 53.037\r\n - type: ndcg_at_1000\r\n value: 55.001999999999995\r\n - type: ndcg_at_3\r\n value: 43.409\r\n - type: ndcg_at_5\r\n value: 45.654\r\n - type: precision_at_1\r\n value: 38.79\r\n - type: precision_at_10\r\n value: 9.452\r\n - type: precision_at_100\r\n value: 1.518\r\n - type: precision_at_1000\r\n value: 0.201\r\n - type: precision_at_3\r\n value: 21.21\r\n - type: precision_at_5\r\n value: 15.171999999999999\r\n - type: recall_at_1\r\n value: 30.767\r\n - type: recall_at_10\r\n value: 60.118\r\n - type: recall_at_100\r\n value: 79.271\r\n - type: recall_at_1000\r\n value: 91.43299999999999\r\n - type: recall_at_3\r\n value: 45.36\r\n - type: recall_at_5\r\n value: 51.705\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackGamingRetrieval\r\n type: mteb/cqadupstack-gaming\r\n config: default\r\n split: test\r\n revision: 4885aa143210c98657558c04aaf3dc47cfb54340\r\n metrics:\r\n - type: map_at_1\r\n value: 40.007\r\n - type: map_at_10\r\n value: 53.529\r\n - type: map_at_100\r\n value: 54.602\r\n - type: map_at_1000\r\n value: 54.647\r\n - type: map_at_3\r\n value: 49.951\r\n - type: map_at_5\r\n value: 52.066\r\n - type: mrr_at_1\r\n value: 45.705\r\n - type: mrr_at_10\r\n value: 56.745000000000005\r\n - type: mrr_at_100\r\n value: 57.43899999999999\r\n - type: mrr_at_1000\r\n value: 57.462999999999994\r\n - type: mrr_at_3\r\n value: 54.25299999999999\r\n - type: mrr_at_5\r\n value: 55.842000000000006\r\n - type: ndcg_at_1\r\n value: 45.705\r\n - type: ndcg_at_10\r\n value: 59.809\r\n - type: ndcg_at_100\r\n value: 63.837999999999994\r\n - type: ndcg_at_1000\r\n value: 64.729\r\n - type: ndcg_at_3\r\n value: 53.994\r\n - type: ndcg_at_5\r\n value: 57.028\r\n - type: precision_at_1\r\n value: 45.705\r\n - type: precision_at_10\r\n value: 9.762\r\n - type: precision_at_100\r\n value: 1.275\r\n - type: precision_at_1000\r\n value: 0.13899999999999998\r\n - type: precision_at_3\r\n value: 24.368000000000002\r\n - type: precision_at_5\r\n value: 16.84\r\n - type: recall_at_1\r\n value: 40.007\r\n - type: recall_at_10\r\n value: 75.017\r\n - type: recall_at_100\r\n value: 91.99000000000001\r\n - type: recall_at_1000\r\n value: 98.265\r\n - type: recall_at_3\r\n value: 59.704\r\n - type: recall_at_5\r\n value: 67.109\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackGisRetrieval\r\n type: mteb/cqadupstack-gis\r\n config: default\r\n split: test\r\n revision: 5003b3064772da1887988e05400cf3806fe491f2\r\n metrics:\r\n - type: map_at_1\r\n value: 26.639000000000003\r\n - type: map_at_10\r\n value: 35.926\r\n - type: map_at_100\r\n value: 37.126999999999995\r\n - type: map_at_1000\r\n value: 37.202\r\n - type: map_at_3\r\n value: 32.989000000000004\r\n - type: map_at_5\r\n value: 34.465\r\n - type: mrr_at_1\r\n value: 28.475\r\n - type: mrr_at_10\r\n value: 37.7\r\n - type: mrr_at_100\r\n value: 38.753\r\n - type: mrr_at_1000\r\n value: 38.807\r\n - type: mrr_at_3\r\n value: 35.066\r\n - type: mrr_at_5\r\n value: 36.512\r\n - type: ndcg_at_1\r\n value: 28.475\r\n - type: ndcg_at_10\r\n value: 41.245\r\n - type: ndcg_at_100\r\n value: 46.814\r\n - type: ndcg_at_1000\r\n value: 48.571\r\n - type: ndcg_at_3\r\n value: 35.528999999999996\r\n - type: ndcg_at_5\r\n value: 38.066\r\n - type: precision_at_1\r\n value: 28.475\r\n - type: precision_at_10\r\n value: 6.497\r\n - type: precision_at_100\r\n value: 0.9650000000000001\r\n - type: precision_at_1000\r\n value: 0.11499999999999999\r\n - type: precision_at_3\r\n value: 15.065999999999999\r\n - type: precision_at_5\r\n value: 10.599\r\n - type: recall_at_1\r\n value: 26.639000000000003\r\n - type: recall_at_10\r\n value: 55.759\r\n - type: recall_at_100\r\n value: 80.913\r\n - type: recall_at_1000\r\n value: 93.929\r\n - type: recall_at_3\r\n value: 40.454\r\n - type: recall_at_5\r\n value: 46.439\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackMathematicaRetrieval\r\n type: mteb/cqadupstack-mathematica\r\n config: default\r\n split: test\r\n revision: 90fceea13679c63fe563ded68f3b6f06e50061de\r\n metrics:\r\n - type: map_at_1\r\n value: 15.767999999999999\r\n - type: map_at_10\r\n value: 24.811\r\n - type: map_at_100\r\n value: 26.064999999999998\r\n - type: map_at_1000\r\n value: 26.186999999999998\r\n - type: map_at_3\r\n value: 21.736\r\n - type: map_at_5\r\n value: 23.283\r\n - type: mrr_at_1\r\n value: 19.527\r\n - type: mrr_at_10\r\n value: 29.179\r\n - type: mrr_at_100\r\n value: 30.153999999999996\r\n - type: mrr_at_1000\r\n value: 30.215999999999998\r\n - type: mrr_at_3\r\n value: 26.223000000000003\r\n - type: mrr_at_5\r\n value: 27.733999999999998\r\n - type: ndcg_at_1\r\n value: 19.527\r\n - type: ndcg_at_10\r\n value: 30.786\r\n - type: ndcg_at_100\r\n value: 36.644\r\n - type: ndcg_at_1000\r\n value: 39.440999999999995\r\n - type: ndcg_at_3\r\n value: 24.958\r\n - type: ndcg_at_5\r\n value: 27.392\r\n - type: precision_at_1\r\n value: 19.527\r\n - type: precision_at_10\r\n value: 5.995\r\n - type: precision_at_100\r\n value: 1.03\r\n - type: precision_at_1000\r\n value: 0.14100000000000001\r\n - type: precision_at_3\r\n value: 12.520999999999999\r\n - type: precision_at_5\r\n value: 9.129\r\n - type: recall_at_1\r\n value: 15.767999999999999\r\n - type: recall_at_10\r\n value: 44.824000000000005\r\n - type: recall_at_100\r\n value: 70.186\r\n - type: recall_at_1000\r\n value: 89.934\r\n - type: recall_at_3\r\n value: 28.607\r\n - type: recall_at_5\r\n value: 34.836\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackPhysicsRetrieval\r\n type: mteb/cqadupstack-physics\r\n config: default\r\n split: test\r\n revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4\r\n metrics:\r\n - type: map_at_1\r\n value: 31.952\r\n - type: map_at_10\r\n value: 44.438\r\n - type: map_at_100\r\n value: 45.778\r\n - type: map_at_1000\r\n value: 45.883\r\n - type: map_at_3\r\n value: 41.044000000000004\r\n - type: map_at_5\r\n value: 42.986000000000004\r\n - type: mrr_at_1\r\n value: 39.172000000000004\r\n - type: mrr_at_10\r\n value: 49.76\r\n - type: mrr_at_100\r\n value: 50.583999999999996\r\n - type: mrr_at_1000\r\n value: 50.621\r\n - type: mrr_at_3\r\n value: 47.353\r\n - type: mrr_at_5\r\n value: 48.739\r\n - type: ndcg_at_1\r\n value: 39.172000000000004\r\n - type: ndcg_at_10\r\n value: 50.760000000000005\r\n - type: ndcg_at_100\r\n value: 56.084\r\n - type: ndcg_at_1000\r\n value: 57.865\r\n - type: ndcg_at_3\r\n value: 45.663\r\n - type: ndcg_at_5\r\n value: 48.178\r\n - type: precision_at_1\r\n value: 39.172000000000004\r\n - type: precision_at_10\r\n value: 9.22\r\n - type: precision_at_100\r\n value: 1.387\r\n - type: precision_at_1000\r\n value: 0.17099999999999999\r\n - type: precision_at_3\r\n value: 21.976000000000003\r\n - type: precision_at_5\r\n value: 15.457\r\n - type: recall_at_1\r\n value: 31.952\r\n - type: recall_at_10\r\n value: 63.900999999999996\r\n - type: recall_at_100\r\n value: 85.676\r\n - type: recall_at_1000\r\n value: 97.03699999999999\r\n - type: recall_at_3\r\n value: 49.781\r\n - type: recall_at_5\r\n value: 56.330000000000005\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackProgrammersRetrieval\r\n type: mteb/cqadupstack-programmers\r\n config: default\r\n split: test\r\n revision: 6184bc1440d2dbc7612be22b50686b8826d22b32\r\n metrics:\r\n - type: map_at_1\r\n value: 25.332\r\n - type: map_at_10\r\n value: 36.874\r\n - type: map_at_100\r\n value: 38.340999999999994\r\n - type: map_at_1000\r\n value: 38.452\r\n - type: map_at_3\r\n value: 33.068\r\n - type: map_at_5\r\n value: 35.324\r\n - type: mrr_at_1\r\n value: 30.822\r\n - type: mrr_at_10\r\n value: 41.641\r\n - type: mrr_at_100\r\n value: 42.519\r\n - type: mrr_at_1000\r\n value: 42.573\r\n - type: mrr_at_3\r\n value: 38.413000000000004\r\n - type: mrr_at_5\r\n value: 40.542\r\n - type: ndcg_at_1\r\n value: 30.822\r\n - type: ndcg_at_10\r\n value: 43.414\r\n - type: ndcg_at_100\r\n value: 49.196\r\n - type: ndcg_at_1000\r\n value: 51.237\r\n - type: ndcg_at_3\r\n value: 37.230000000000004\r\n - type: ndcg_at_5\r\n value: 40.405\r\n - type: precision_at_1\r\n value: 30.822\r\n - type: precision_at_10\r\n value: 8.379\r\n - type: precision_at_100\r\n value: 1.315\r\n - type: precision_at_1000\r\n value: 0.168\r\n - type: precision_at_3\r\n value: 18.417\r\n - type: precision_at_5\r\n value: 13.744\r\n - type: recall_at_1\r\n value: 25.332\r\n - type: recall_at_10\r\n value: 57.774\r\n - type: recall_at_100\r\n value: 82.071\r\n - type: recall_at_1000\r\n value: 95.60600000000001\r\n - type: recall_at_3\r\n value: 40.722\r\n - type: recall_at_5\r\n value: 48.754999999999995\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackRetrieval\r\n type: mteb/cqadupstack\r\n config: default\r\n split: test\r\n revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4\r\n metrics:\r\n - type: map_at_1\r\n value: 25.91033333333334\r\n - type: map_at_10\r\n value: 36.23225000000001\r\n - type: map_at_100\r\n value: 37.55766666666667\r\n - type: map_at_1000\r\n value: 37.672583333333336\r\n - type: map_at_3\r\n value: 32.95666666666667\r\n - type: map_at_5\r\n value: 34.73375\r\n - type: mrr_at_1\r\n value: 30.634\r\n - type: mrr_at_10\r\n value: 40.19449999999999\r\n - type: mrr_at_100\r\n value: 41.099250000000005\r\n - type: mrr_at_1000\r\n value: 41.15091666666667\r\n - type: mrr_at_3\r\n value: 37.4615\r\n - type: mrr_at_5\r\n value: 39.00216666666667\r\n - type: ndcg_at_1\r\n value: 30.634\r\n - type: ndcg_at_10\r\n value: 42.162166666666664\r\n - type: ndcg_at_100\r\n value: 47.60708333333333\r\n - type: ndcg_at_1000\r\n value: 49.68616666666666\r\n - type: ndcg_at_3\r\n value: 36.60316666666666\r\n - type: ndcg_at_5\r\n value: 39.15616666666668\r\n - type: precision_at_1\r\n value: 30.634\r\n - type: precision_at_10\r\n value: 7.6193333333333335\r\n - type: precision_at_100\r\n value: 1.2198333333333333\r\n - type: precision_at_1000\r\n value: 0.15975000000000003\r\n - type: precision_at_3\r\n value: 17.087\r\n - type: precision_at_5\r\n value: 12.298333333333334\r\n - type: recall_at_1\r\n value: 25.91033333333334\r\n - type: recall_at_10\r\n value: 55.67300000000001\r\n - type: recall_at_100\r\n value: 79.20608333333334\r\n - type: recall_at_1000\r\n value: 93.34866666666667\r\n - type: recall_at_3\r\n value: 40.34858333333333\r\n - type: recall_at_5\r\n value: 46.834083333333325\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackStatsRetrieval\r\n type: mteb/cqadupstack-stats\r\n config: default\r\n split: test\r\n revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a\r\n metrics:\r\n - type: map_at_1\r\n value: 25.006\r\n - type: map_at_10\r\n value: 32.177\r\n - type: map_at_100\r\n value: 33.324999999999996\r\n - type: map_at_1000\r\n value: 33.419\r\n - type: map_at_3\r\n value: 29.952\r\n - type: map_at_5\r\n value: 31.095\r\n - type: mrr_at_1\r\n value: 28.066999999999997\r\n - type: mrr_at_10\r\n value: 34.995\r\n - type: mrr_at_100\r\n value: 35.978\r\n - type: mrr_at_1000\r\n value: 36.042\r\n - type: mrr_at_3\r\n value: 33.103\r\n - type: mrr_at_5\r\n value: 34.001\r\n - type: ndcg_at_1\r\n value: 28.066999999999997\r\n - type: ndcg_at_10\r\n value: 36.481\r\n - type: ndcg_at_100\r\n value: 42.022999999999996\r\n - type: ndcg_at_1000\r\n value: 44.377\r\n - type: ndcg_at_3\r\n value: 32.394\r\n - type: ndcg_at_5\r\n value: 34.108\r\n - type: precision_at_1\r\n value: 28.066999999999997\r\n - type: precision_at_10\r\n value: 5.736\r\n - type: precision_at_100\r\n value: 0.9259999999999999\r\n - type: precision_at_1000\r\n value: 0.12\r\n - type: precision_at_3\r\n value: 13.804\r\n - type: precision_at_5\r\n value: 9.508999999999999\r\n - type: recall_at_1\r\n value: 25.006\r\n - type: recall_at_10\r\n value: 46.972\r\n - type: recall_at_100\r\n value: 72.138\r\n - type: recall_at_1000\r\n value: 89.479\r\n - type: recall_at_3\r\n value: 35.793\r\n - type: recall_at_5\r\n value: 39.947\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackTexRetrieval\r\n type: mteb/cqadupstack-tex\r\n config: default\r\n split: test\r\n revision: 46989137a86843e03a6195de44b09deda022eec7\r\n metrics:\r\n - type: map_at_1\r\n value: 16.07\r\n - type: map_at_10\r\n value: 24.447\r\n - type: map_at_100\r\n value: 25.685999999999996\r\n - type: map_at_1000\r\n value: 25.813999999999997\r\n - type: map_at_3\r\n value: 21.634\r\n - type: map_at_5\r\n value: 23.133\r\n - type: mrr_at_1\r\n value: 19.580000000000002\r\n - type: mrr_at_10\r\n value: 28.127999999999997\r\n - type: mrr_at_100\r\n value: 29.119\r\n - type: mrr_at_1000\r\n value: 29.192\r\n - type: mrr_at_3\r\n value: 25.509999999999998\r\n - type: mrr_at_5\r\n value: 26.878\r\n - type: ndcg_at_1\r\n value: 19.580000000000002\r\n - type: ndcg_at_10\r\n value: 29.804000000000002\r\n - type: ndcg_at_100\r\n value: 35.555\r\n - type: ndcg_at_1000\r\n value: 38.421\r\n - type: ndcg_at_3\r\n value: 24.654999999999998\r\n - type: ndcg_at_5\r\n value: 26.881\r\n - type: precision_at_1\r\n value: 19.580000000000002\r\n - type: precision_at_10\r\n value: 5.736\r\n - type: precision_at_100\r\n value: 1.005\r\n - type: precision_at_1000\r\n value: 0.145\r\n - type: precision_at_3\r\n value: 12.033000000000001\r\n - type: precision_at_5\r\n value: 8.871\r\n - type: recall_at_1\r\n value: 16.07\r\n - type: recall_at_10\r\n value: 42.364000000000004\r\n - type: recall_at_100\r\n value: 68.01899999999999\r\n - type: recall_at_1000\r\n value: 88.122\r\n - type: recall_at_3\r\n value: 27.846\r\n - type: recall_at_5\r\n value: 33.638\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackUnixRetrieval\r\n type: mteb/cqadupstack-unix\r\n config: default\r\n split: test\r\n revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53\r\n metrics:\r\n - type: map_at_1\r\n value: 26.365\r\n - type: map_at_10\r\n value: 36.591\r\n - type: map_at_100\r\n value: 37.730000000000004\r\n - type: map_at_1000\r\n value: 37.84\r\n - type: map_at_3\r\n value: 33.403\r\n - type: map_at_5\r\n value: 35.272999999999996\r\n - type: mrr_at_1\r\n value: 30.503999999999998\r\n - type: mrr_at_10\r\n value: 39.940999999999995\r\n - type: mrr_at_100\r\n value: 40.818\r\n - type: mrr_at_1000\r\n value: 40.876000000000005\r\n - type: mrr_at_3\r\n value: 37.065\r\n - type: mrr_at_5\r\n value: 38.814\r\n - type: ndcg_at_1\r\n value: 30.503999999999998\r\n - type: ndcg_at_10\r\n value: 42.185\r\n - type: ndcg_at_100\r\n value: 47.416000000000004\r\n - type: ndcg_at_1000\r\n value: 49.705\r\n - type: ndcg_at_3\r\n value: 36.568\r\n - type: ndcg_at_5\r\n value: 39.416000000000004\r\n - type: precision_at_1\r\n value: 30.503999999999998\r\n - type: precision_at_10\r\n value: 7.276000000000001\r\n - type: precision_at_100\r\n value: 1.118\r\n - type: precision_at_1000\r\n value: 0.14300000000000002\r\n - type: precision_at_3\r\n value: 16.729\r\n - type: precision_at_5\r\n value: 12.107999999999999\r\n - type: recall_at_1\r\n value: 26.365\r\n - type: recall_at_10\r\n value: 55.616\r\n - type: recall_at_100\r\n value: 78.129\r\n - type: recall_at_1000\r\n value: 93.95599999999999\r\n - type: recall_at_3\r\n value: 40.686\r\n - type: recall_at_5\r\n value: 47.668\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackWebmastersRetrieval\r\n type: mteb/cqadupstack-webmasters\r\n config: default\r\n split: test\r\n revision: 160c094312a0e1facb97e55eeddb698c0abe3571\r\n metrics:\r\n - type: map_at_1\r\n value: 22.750999999999998\r\n - type: map_at_10\r\n value: 33.446\r\n - type: map_at_100\r\n value: 35.235\r\n - type: map_at_1000\r\n value: 35.478\r\n - type: map_at_3\r\n value: 29.358\r\n - type: map_at_5\r\n value: 31.525\r\n - type: mrr_at_1\r\n value: 27.668\r\n - type: mrr_at_10\r\n value: 37.694\r\n - type: mrr_at_100\r\n value: 38.732\r\n - type: mrr_at_1000\r\n value: 38.779\r\n - type: mrr_at_3\r\n value: 34.223\r\n - type: mrr_at_5\r\n value: 36.08\r\n - type: ndcg_at_1\r\n value: 27.668\r\n - type: ndcg_at_10\r\n value: 40.557\r\n - type: ndcg_at_100\r\n value: 46.605999999999995\r\n - type: ndcg_at_1000\r\n value: 48.917\r\n - type: ndcg_at_3\r\n value: 33.677\r\n - type: ndcg_at_5\r\n value: 36.85\r\n - type: precision_at_1\r\n value: 27.668\r\n - type: precision_at_10\r\n value: 8.3\r\n - type: precision_at_100\r\n value: 1.6260000000000001\r\n - type: precision_at_1000\r\n value: 0.253\r\n - type: precision_at_3\r\n value: 16.008\r\n - type: precision_at_5\r\n value: 12.292\r\n - type: recall_at_1\r\n value: 22.750999999999998\r\n - type: recall_at_10\r\n value: 55.643\r\n - type: recall_at_100\r\n value: 82.151\r\n - type: recall_at_1000\r\n value: 95.963\r\n - type: recall_at_3\r\n value: 36.623\r\n - type: recall_at_5\r\n value: 44.708\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB CQADupstackWordpressRetrieval\r\n type: mteb/cqadupstack-wordpress\r\n config: default\r\n split: test\r\n revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4\r\n metrics:\r\n - type: map_at_1\r\n value: 17.288999999999998\r\n - type: map_at_10\r\n value: 25.903\r\n - type: map_at_100\r\n value: 27.071\r\n - type: map_at_1000\r\n value: 27.173000000000002\r\n - type: map_at_3\r\n value: 22.935\r\n - type: map_at_5\r\n value: 24.573\r\n - type: mrr_at_1\r\n value: 18.669\r\n - type: mrr_at_10\r\n value: 27.682000000000002\r\n - type: mrr_at_100\r\n value: 28.691\r\n - type: mrr_at_1000\r\n value: 28.761\r\n - type: mrr_at_3\r\n value: 24.738\r\n - type: mrr_at_5\r\n value: 26.392\r\n - type: ndcg_at_1\r\n value: 18.669\r\n - type: ndcg_at_10\r\n value: 31.335\r\n - type: ndcg_at_100\r\n value: 36.913000000000004\r\n - type: ndcg_at_1000\r\n value: 39.300000000000004\r\n - type: ndcg_at_3\r\n value: 25.423000000000002\r\n - type: ndcg_at_5\r\n value: 28.262999999999998\r\n - type: precision_at_1\r\n value: 18.669\r\n - type: precision_at_10\r\n value: 5.379\r\n - type: precision_at_100\r\n value: 0.876\r\n - type: precision_at_1000\r\n value: 0.11900000000000001\r\n - type: precision_at_3\r\n value: 11.214\r\n - type: precision_at_5\r\n value: 8.466\r\n - type: recall_at_1\r\n value: 17.288999999999998\r\n - type: recall_at_10\r\n value: 46.377\r\n - type: recall_at_100\r\n value: 71.53500000000001\r\n - type: recall_at_1000\r\n value: 88.947\r\n - type: recall_at_3\r\n value: 30.581999999999997\r\n - type: recall_at_5\r\n value: 37.354\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB ClimateFEVER\r\n type: mteb/climate-fever\r\n config: default\r\n split: test\r\n revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380\r\n metrics:\r\n - type: map_at_1\r\n value: 21.795\r\n - type: map_at_10\r\n value: 37.614999999999995\r\n - type: map_at_100\r\n value: 40.037\r\n - type: map_at_1000\r\n value: 40.184999999999995\r\n - type: map_at_3\r\n value: 32.221\r\n - type: map_at_5\r\n value: 35.154999999999994\r\n - type: mrr_at_1\r\n value: 50.358000000000004\r\n - type: mrr_at_10\r\n value: 62.129\r\n - type: mrr_at_100\r\n value: 62.613\r\n - type: mrr_at_1000\r\n value: 62.62\r\n - type: mrr_at_3\r\n value: 59.272999999999996\r\n - type: mrr_at_5\r\n value: 61.138999999999996\r\n - type: ndcg_at_1\r\n value: 50.358000000000004\r\n - type: ndcg_at_10\r\n value: 48.362\r\n - type: ndcg_at_100\r\n value: 55.932\r\n - type: ndcg_at_1000\r\n value: 58.062999999999995\r\n - type: ndcg_at_3\r\n value: 42.111\r\n - type: ndcg_at_5\r\n value: 44.063\r\n - type: precision_at_1\r\n value: 50.358000000000004\r\n - type: precision_at_10\r\n value: 14.677999999999999\r\n - type: precision_at_100\r\n value: 2.2950000000000004\r\n - type: precision_at_1000\r\n value: 0.271\r\n - type: precision_at_3\r\n value: 31.77\r\n - type: precision_at_5\r\n value: 23.375\r\n - type: recall_at_1\r\n value: 21.795\r\n - type: recall_at_10\r\n value: 53.846000000000004\r\n - type: recall_at_100\r\n value: 78.952\r\n - type: recall_at_1000\r\n value: 90.41900000000001\r\n - type: recall_at_3\r\n value: 37.257\r\n - type: recall_at_5\r\n value: 44.661\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB DBPedia\r\n type: mteb/dbpedia\r\n config: default\r\n split: test\r\n revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659\r\n metrics:\r\n - type: map_at_1\r\n value: 9.728\r\n - type: map_at_10\r\n value: 22.691\r\n - type: map_at_100\r\n value: 31.734\r\n - type: map_at_1000\r\n value: 33.464\r\n - type: map_at_3\r\n value: 16.273\r\n - type: map_at_5\r\n value: 19.016\r\n - type: mrr_at_1\r\n value: 73.25\r\n - type: mrr_at_10\r\n value: 80.782\r\n - type: mrr_at_100\r\n value: 81.01899999999999\r\n - type: mrr_at_1000\r\n value: 81.021\r\n - type: mrr_at_3\r\n value: 79.583\r\n - type: mrr_at_5\r\n value: 80.146\r\n - type: ndcg_at_1\r\n value: 59.62499999999999\r\n - type: ndcg_at_10\r\n value: 46.304\r\n - type: ndcg_at_100\r\n value: 51.23\r\n - type: ndcg_at_1000\r\n value: 58.048\r\n - type: ndcg_at_3\r\n value: 51.541000000000004\r\n - type: ndcg_at_5\r\n value: 48.635\r\n - type: precision_at_1\r\n value: 73.25\r\n - type: precision_at_10\r\n value: 36.375\r\n - type: precision_at_100\r\n value: 11.53\r\n - type: precision_at_1000\r\n value: 2.23\r\n - type: precision_at_3\r\n value: 55.583000000000006\r\n - type: precision_at_5\r\n value: 47.15\r\n - type: recall_at_1\r\n value: 9.728\r\n - type: recall_at_10\r\n value: 28.793999999999997\r\n - type: recall_at_100\r\n value: 57.885\r\n - type: recall_at_1000\r\n value: 78.759\r\n - type: recall_at_3\r\n value: 17.79\r\n - type: recall_at_5\r\n value: 21.733\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB EmotionClassification\r\n type: mteb/emotion\r\n config: default\r\n split: test\r\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\r\n metrics:\r\n - type: accuracy\r\n value: 46.775\r\n - type: f1\r\n value: 41.89794273264891\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB FEVER\r\n type: mteb/fever\r\n config: default\r\n split: test\r\n revision: bea83ef9e8fb933d90a2f1d5515737465d613e12\r\n metrics:\r\n - type: map_at_1\r\n value: 85.378\r\n - type: map_at_10\r\n value: 91.51\r\n - type: map_at_100\r\n value: 91.666\r\n - type: map_at_1000\r\n value: 91.676\r\n - type: map_at_3\r\n value: 90.757\r\n - type: map_at_5\r\n value: 91.277\r\n - type: mrr_at_1\r\n value: 91.839\r\n - type: mrr_at_10\r\n value: 95.49\r\n - type: mrr_at_100\r\n value: 95.493\r\n - type: mrr_at_1000\r\n value: 95.493\r\n - type: mrr_at_3\r\n value: 95.345\r\n - type: mrr_at_5\r\n value: 95.47200000000001\r\n - type: ndcg_at_1\r\n value: 91.839\r\n - type: ndcg_at_10\r\n value: 93.806\r\n - type: ndcg_at_100\r\n value: 94.255\r\n - type: ndcg_at_1000\r\n value: 94.399\r\n - type: ndcg_at_3\r\n value: 93.027\r\n - type: ndcg_at_5\r\n value: 93.51\r\n - type: precision_at_1\r\n value: 91.839\r\n - type: precision_at_10\r\n value: 10.93\r\n - type: precision_at_100\r\n value: 1.1400000000000001\r\n - type: precision_at_1000\r\n value: 0.117\r\n - type: precision_at_3\r\n value: 34.873\r\n - type: precision_at_5\r\n value: 21.44\r\n - type: recall_at_1\r\n value: 85.378\r\n - type: recall_at_10\r\n value: 96.814\r\n - type: recall_at_100\r\n value: 98.386\r\n - type: recall_at_1000\r\n value: 99.21600000000001\r\n - type: recall_at_3\r\n value: 94.643\r\n - type: recall_at_5\r\n value: 95.976\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB FiQA2018\r\n type: mteb/fiqa\r\n config: default\r\n split: test\r\n revision: 27a168819829fe9bcd655c2df245fb19452e8e06\r\n metrics:\r\n - type: map_at_1\r\n value: 32.190000000000005\r\n - type: map_at_10\r\n value: 53.605000000000004\r\n - type: map_at_100\r\n value: 55.550999999999995\r\n - type: map_at_1000\r\n value: 55.665\r\n - type: map_at_3\r\n value: 46.62\r\n - type: map_at_5\r\n value: 50.517999999999994\r\n - type: mrr_at_1\r\n value: 60.34\r\n - type: mrr_at_10\r\n value: 70.775\r\n - type: mrr_at_100\r\n value: 71.238\r\n - type: mrr_at_1000\r\n value: 71.244\r\n - type: mrr_at_3\r\n value: 68.72399999999999\r\n - type: mrr_at_5\r\n value: 69.959\r\n - type: ndcg_at_1\r\n value: 60.34\r\n - type: ndcg_at_10\r\n value: 63.226000000000006\r\n - type: ndcg_at_100\r\n value: 68.60300000000001\r\n - type: ndcg_at_1000\r\n value: 69.901\r\n - type: ndcg_at_3\r\n value: 58.048\r\n - type: ndcg_at_5\r\n value: 59.789\r\n - type: precision_at_1\r\n value: 60.34\r\n - type: precision_at_10\r\n value: 17.130000000000003\r\n - type: precision_at_100\r\n value: 2.29\r\n - type: precision_at_1000\r\n value: 0.256\r\n - type: precision_at_3\r\n value: 38.323\r\n - type: precision_at_5\r\n value: 27.87\r\n - type: recall_at_1\r\n value: 32.190000000000005\r\n - type: recall_at_10\r\n value: 73.041\r\n - type: recall_at_100\r\n value: 91.31\r\n - type: recall_at_1000\r\n value: 98.104\r\n - type: recall_at_3\r\n value: 53.70399999999999\r\n - type: recall_at_5\r\n value: 62.358999999999995\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB HotpotQA\r\n type: mteb/hotpotqa\r\n config: default\r\n split: test\r\n revision: ab518f4d6fcca38d87c25209f94beba119d02014\r\n metrics:\r\n - type: map_at_1\r\n value: 43.511\r\n - type: map_at_10\r\n value: 58.15\r\n - type: map_at_100\r\n value: 58.95399999999999\r\n - type: map_at_1000\r\n value: 59.018\r\n - type: map_at_3\r\n value: 55.31700000000001\r\n - type: map_at_5\r\n value: 57.04900000000001\r\n - type: mrr_at_1\r\n value: 87.022\r\n - type: mrr_at_10\r\n value: 91.32000000000001\r\n - type: mrr_at_100\r\n value: 91.401\r\n - type: mrr_at_1000\r\n value: 91.403\r\n - type: mrr_at_3\r\n value: 90.77\r\n - type: mrr_at_5\r\n value: 91.156\r\n - type: ndcg_at_1\r\n value: 87.022\r\n - type: ndcg_at_10\r\n value: 68.183\r\n - type: ndcg_at_100\r\n value: 70.781\r\n - type: ndcg_at_1000\r\n value: 72.009\r\n - type: ndcg_at_3\r\n value: 64.334\r\n - type: ndcg_at_5\r\n value: 66.449\r\n - type: precision_at_1\r\n value: 87.022\r\n - type: precision_at_10\r\n value: 13.406\r\n - type: precision_at_100\r\n value: 1.542\r\n - type: precision_at_1000\r\n value: 0.17099999999999999\r\n - type: precision_at_3\r\n value: 39.023\r\n - type: precision_at_5\r\n value: 25.080000000000002\r\n - type: recall_at_1\r\n value: 43.511\r\n - type: recall_at_10\r\n value: 67.02900000000001\r\n - type: recall_at_100\r\n value: 77.11\r\n - type: recall_at_1000\r\n value: 85.294\r\n - type: recall_at_3\r\n value: 58.535000000000004\r\n - type: recall_at_5\r\n value: 62.70099999999999\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB ImdbClassification\r\n type: mteb/imdb\r\n config: default\r\n split: test\r\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\r\n metrics:\r\n - type: accuracy\r\n value: 92.0996\r\n - type: ap\r\n value: 87.86206089096373\r\n - type: f1\r\n value: 92.07554547510763\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB MSMARCO\r\n type: mteb/msmarco\r\n config: default\r\n split: dev\r\n revision: c5a29a104738b98a9e76336939199e264163d4a0\r\n metrics:\r\n - type: map_at_1\r\n value: 23.179\r\n - type: map_at_10\r\n value: 35.86\r\n - type: map_at_100\r\n value: 37.025999999999996\r\n - type: map_at_1000\r\n value: 37.068\r\n - type: map_at_3\r\n value: 31.921\r\n - type: map_at_5\r\n value: 34.172000000000004\r\n - type: mrr_at_1\r\n value: 23.926\r\n - type: mrr_at_10\r\n value: 36.525999999999996\r\n - type: mrr_at_100\r\n value: 37.627\r\n - type: mrr_at_1000\r\n value: 37.665\r\n - type: mrr_at_3\r\n value: 32.653\r\n - type: mrr_at_5\r\n value: 34.897\r\n - type: ndcg_at_1\r\n value: 23.910999999999998\r\n - type: ndcg_at_10\r\n value: 42.927\r\n - type: ndcg_at_100\r\n value: 48.464\r\n - type: ndcg_at_1000\r\n value: 49.533\r\n - type: ndcg_at_3\r\n value: 34.910000000000004\r\n - type: ndcg_at_5\r\n value: 38.937\r\n - type: precision_at_1\r\n value: 23.910999999999998\r\n - type: precision_at_10\r\n value: 6.758\r\n - type: precision_at_100\r\n value: 0.9520000000000001\r\n - type: precision_at_1000\r\n value: 0.104\r\n - type: precision_at_3\r\n value: 14.838000000000001\r\n - type: precision_at_5\r\n value: 10.934000000000001\r\n - type: recall_at_1\r\n value: 23.179\r\n - type: recall_at_10\r\n value: 64.622\r\n - type: recall_at_100\r\n value: 90.135\r\n - type: recall_at_1000\r\n value: 98.301\r\n - type: recall_at_3\r\n value: 42.836999999999996\r\n - type: recall_at_5\r\n value: 52.512\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB MTOPDomainClassification (en)\r\n type: mteb/mtop_domain\r\n config: en\r\n split: test\r\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\r\n metrics:\r\n - type: accuracy\r\n value: 96.59598723210215\r\n - type: f1\r\n value: 96.41913500001952\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB MTOPIntentClassification (en)\r\n type: mteb/mtop_intent\r\n config: en\r\n split: test\r\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\r\n metrics:\r\n - type: accuracy\r\n value: 82.89557683538533\r\n - type: f1\r\n value: 63.379319722356264\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB MassiveIntentClassification (en)\r\n type: mteb/amazon_massive_intent\r\n config: en\r\n split: test\r\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\r\n metrics:\r\n - type: accuracy\r\n value: 78.93745796906524\r\n - type: f1\r\n value: 75.71616541785902\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB MassiveScenarioClassification (en)\r\n type: mteb/amazon_massive_scenario\r\n config: en\r\n split: test\r\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\r\n metrics:\r\n - type: accuracy\r\n value: 81.41223940820443\r\n - type: f1\r\n value: 81.2877893719078\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB MedrxivClusteringP2P\r\n type: mteb/medrxiv-clustering-p2p\r\n config: default\r\n split: test\r\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\r\n metrics:\r\n - type: v_measure\r\n value: 35.03682528325662\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB MedrxivClusteringS2S\r\n type: mteb/medrxiv-clustering-s2s\r\n config: default\r\n split: test\r\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\r\n metrics:\r\n - type: v_measure\r\n value: 32.942529406124\r\n - task:\r\n type: Reranking\r\n dataset:\r\n name: MTEB MindSmallReranking\r\n type: mteb/mind_small\r\n config: default\r\n split: test\r\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\r\n metrics:\r\n - type: map\r\n value: 31.459949660460317\r\n - type: mrr\r\n value: 32.70509582031616\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB NFCorpus\r\n type: mteb/nfcorpus\r\n config: default\r\n split: test\r\n revision: ec0fa4fe99da2ff19ca1214b7966684033a58814\r\n metrics:\r\n - type: map_at_1\r\n value: 6.497\r\n - type: map_at_10\r\n value: 13.843\r\n - type: map_at_100\r\n value: 17.713\r\n - type: map_at_1000\r\n value: 19.241\r\n - type: map_at_3\r\n value: 10.096\r\n - type: map_at_5\r\n value: 11.85\r\n - type: mrr_at_1\r\n value: 48.916\r\n - type: mrr_at_10\r\n value: 57.764\r\n - type: mrr_at_100\r\n value: 58.251\r\n - type: mrr_at_1000\r\n value: 58.282999999999994\r\n - type: mrr_at_3\r\n value: 55.623999999999995\r\n - type: mrr_at_5\r\n value: 57.018\r\n - type: ndcg_at_1\r\n value: 46.594\r\n - type: ndcg_at_10\r\n value: 36.945\r\n - type: ndcg_at_100\r\n value: 34.06\r\n - type: ndcg_at_1000\r\n value: 43.05\r\n - type: ndcg_at_3\r\n value: 41.738\r\n - type: ndcg_at_5\r\n value: 39.330999999999996\r\n - type: precision_at_1\r\n value: 48.916\r\n - type: precision_at_10\r\n value: 27.43\r\n - type: precision_at_100\r\n value: 8.616\r\n - type: precision_at_1000\r\n value: 2.155\r\n - type: precision_at_3\r\n value: 39.112\r\n - type: precision_at_5\r\n value: 33.808\r\n - type: recall_at_1\r\n value: 6.497\r\n - type: recall_at_10\r\n value: 18.163\r\n - type: recall_at_100\r\n value: 34.566\r\n - type: recall_at_1000\r\n value: 67.15\r\n - type: recall_at_3\r\n value: 11.100999999999999\r\n - type: recall_at_5\r\n value: 14.205000000000002\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB NQ\r\n type: mteb/nq\r\n config: default\r\n split: test\r\n revision: b774495ed302d8c44a3a7ea25c90dbce03968f31\r\n metrics:\r\n - type: map_at_1\r\n value: 31.916\r\n - type: map_at_10\r\n value: 48.123\r\n - type: map_at_100\r\n value: 49.103\r\n - type: map_at_1000\r\n value: 49.131\r\n - type: map_at_3\r\n value: 43.711\r\n - type: map_at_5\r\n value: 46.323\r\n - type: mrr_at_1\r\n value: 36.181999999999995\r\n - type: mrr_at_10\r\n value: 50.617999999999995\r\n - type: mrr_at_100\r\n value: 51.329\r\n - type: mrr_at_1000\r\n value: 51.348000000000006\r\n - type: mrr_at_3\r\n value: 47.010999999999996\r\n - type: mrr_at_5\r\n value: 49.175000000000004\r\n - type: ndcg_at_1\r\n value: 36.181999999999995\r\n - type: ndcg_at_10\r\n value: 56.077999999999996\r\n - type: ndcg_at_100\r\n value: 60.037\r\n - type: ndcg_at_1000\r\n value: 60.63499999999999\r\n - type: ndcg_at_3\r\n value: 47.859\r\n - type: ndcg_at_5\r\n value: 52.178999999999995\r\n - type: precision_at_1\r\n value: 36.181999999999995\r\n - type: precision_at_10\r\n value: 9.284\r\n - type: precision_at_100\r\n value: 1.149\r\n - type: precision_at_1000\r\n value: 0.121\r\n - type: precision_at_3\r\n value: 22.006999999999998\r\n - type: precision_at_5\r\n value: 15.695\r\n - type: recall_at_1\r\n value: 31.916\r\n - type: recall_at_10\r\n value: 77.771\r\n - type: recall_at_100\r\n value: 94.602\r\n - type: recall_at_1000\r\n value: 98.967\r\n - type: recall_at_3\r\n value: 56.528\r\n - type: recall_at_5\r\n value: 66.527\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB QuoraRetrieval\r\n type: mteb/quora\r\n config: default\r\n split: test\r\n revision: None\r\n metrics:\r\n - type: map_at_1\r\n value: 71.486\r\n - type: map_at_10\r\n value: 85.978\r\n - type: map_at_100\r\n value: 86.587\r\n - type: map_at_1000\r\n value: 86.598\r\n - type: map_at_3\r\n value: 83.04899999999999\r\n - type: map_at_5\r\n value: 84.857\r\n - type: mrr_at_1\r\n value: 82.32000000000001\r\n - type: mrr_at_10\r\n value: 88.64\r\n - type: mrr_at_100\r\n value: 88.702\r\n - type: mrr_at_1000\r\n value: 88.702\r\n - type: mrr_at_3\r\n value: 87.735\r\n - type: mrr_at_5\r\n value: 88.36\r\n - type: ndcg_at_1\r\n value: 82.34\r\n - type: ndcg_at_10\r\n value: 89.67\r\n - type: ndcg_at_100\r\n value: 90.642\r\n - type: ndcg_at_1000\r\n value: 90.688\r\n - type: ndcg_at_3\r\n value: 86.932\r\n - type: ndcg_at_5\r\n value: 88.408\r\n - type: precision_at_1\r\n value: 82.34\r\n - type: precision_at_10\r\n value: 13.675999999999998\r\n - type: precision_at_100\r\n value: 1.544\r\n - type: precision_at_1000\r\n value: 0.157\r\n - type: precision_at_3\r\n value: 38.24\r\n - type: precision_at_5\r\n value: 25.068\r\n - type: recall_at_1\r\n value: 71.486\r\n - type: recall_at_10\r\n value: 96.844\r\n - type: recall_at_100\r\n value: 99.843\r\n - type: recall_at_1000\r\n value: 99.996\r\n - type: recall_at_3\r\n value: 88.92099999999999\r\n - type: recall_at_5\r\n value: 93.215\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB RedditClustering\r\n type: mteb/reddit-clustering\r\n config: default\r\n split: test\r\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\r\n metrics:\r\n - type: v_measure\r\n value: 59.75758437908334\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB RedditClusteringP2P\r\n type: mteb/reddit-clustering-p2p\r\n config: default\r\n split: test\r\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\r\n metrics:\r\n - type: v_measure\r\n value: 68.03497914092789\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB SCIDOCS\r\n type: mteb/scidocs\r\n config: default\r\n split: test\r\n revision: None\r\n metrics:\r\n - type: map_at_1\r\n value: 5.808\r\n - type: map_at_10\r\n value: 16.059\r\n - type: map_at_100\r\n value: 19.048000000000002\r\n - type: map_at_1000\r\n value: 19.43\r\n - type: map_at_3\r\n value: 10.953\r\n - type: map_at_5\r\n value: 13.363\r\n - type: mrr_at_1\r\n value: 28.7\r\n - type: mrr_at_10\r\n value: 42.436\r\n - type: mrr_at_100\r\n value: 43.599\r\n - type: mrr_at_1000\r\n value: 43.62\r\n - type: mrr_at_3\r\n value: 38.45\r\n - type: mrr_at_5\r\n value: 40.89\r\n - type: ndcg_at_1\r\n value: 28.7\r\n - type: ndcg_at_10\r\n value: 26.346000000000004\r\n - type: ndcg_at_100\r\n value: 36.758\r\n - type: ndcg_at_1000\r\n value: 42.113\r\n - type: ndcg_at_3\r\n value: 24.254\r\n - type: ndcg_at_5\r\n value: 21.506\r\n - type: precision_at_1\r\n value: 28.7\r\n - type: precision_at_10\r\n value: 13.969999999999999\r\n - type: precision_at_100\r\n value: 2.881\r\n - type: precision_at_1000\r\n value: 0.414\r\n - type: precision_at_3\r\n value: 22.933\r\n - type: precision_at_5\r\n value: 19.220000000000002\r\n - type: recall_at_1\r\n value: 5.808\r\n - type: recall_at_10\r\n value: 28.310000000000002\r\n - type: recall_at_100\r\n value: 58.475\r\n - type: recall_at_1000\r\n value: 84.072\r\n - type: recall_at_3\r\n value: 13.957\r\n - type: recall_at_5\r\n value: 19.515\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB SICK-R\r\n type: mteb/sickr-sts\r\n config: default\r\n split: test\r\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 82.39274129958557\r\n - type: cos_sim_spearman\r\n value: 79.78021235170053\r\n - type: euclidean_pearson\r\n value: 79.35335401300166\r\n - type: euclidean_spearman\r\n value: 79.7271870968275\r\n - type: manhattan_pearson\r\n value: 79.35256263340601\r\n - type: manhattan_spearman\r\n value: 79.76036386976321\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS12\r\n type: mteb/sts12-sts\r\n config: default\r\n split: test\r\n revision: a0d554a64d88156834ff5ae9920b964011b16384\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 83.99130429246708\r\n - type: cos_sim_spearman\r\n value: 73.88322811171203\r\n - type: euclidean_pearson\r\n value: 80.7569419170376\r\n - type: euclidean_spearman\r\n value: 73.82542155409597\r\n - type: manhattan_pearson\r\n value: 80.79468183847625\r\n - type: manhattan_spearman\r\n value: 73.87027144047784\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS13\r\n type: mteb/sts13-sts\r\n config: default\r\n split: test\r\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 84.88548789489907\r\n - type: cos_sim_spearman\r\n value: 85.07535893847255\r\n - type: euclidean_pearson\r\n value: 84.6637222061494\r\n - type: euclidean_spearman\r\n value: 85.14200626702456\r\n - type: manhattan_pearson\r\n value: 84.75327892344734\r\n - type: manhattan_spearman\r\n value: 85.24406181838596\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS14\r\n type: mteb/sts14-sts\r\n config: default\r\n split: test\r\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 82.88140039325008\r\n - type: cos_sim_spearman\r\n value: 79.61211268112362\r\n - type: euclidean_pearson\r\n value: 81.29639728816458\r\n - type: euclidean_spearman\r\n value: 79.51284578041442\r\n - type: manhattan_pearson\r\n value: 81.3381797137111\r\n - type: manhattan_spearman\r\n value: 79.55683684039808\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS15\r\n type: mteb/sts15-sts\r\n config: default\r\n split: test\r\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 85.16716737270485\r\n - type: cos_sim_spearman\r\n value: 86.14823841857738\r\n - type: euclidean_pearson\r\n value: 85.36325733440725\r\n - type: euclidean_spearman\r\n value: 86.04919691402029\r\n - type: manhattan_pearson\r\n value: 85.3147511385052\r\n - type: manhattan_spearman\r\n value: 86.00676205857764\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS16\r\n type: mteb/sts16-sts\r\n config: default\r\n split: test\r\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 80.34266645861588\r\n - type: cos_sim_spearman\r\n value: 81.59914035005882\r\n - type: euclidean_pearson\r\n value: 81.15053076245988\r\n - type: euclidean_spearman\r\n value: 81.52776915798489\r\n - type: manhattan_pearson\r\n value: 81.1819647418673\r\n - type: manhattan_spearman\r\n value: 81.57479527353556\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS17 (en-en)\r\n type: mteb/sts17-crosslingual-sts\r\n config: en-en\r\n split: test\r\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 89.38263326821439\r\n - type: cos_sim_spearman\r\n value: 89.10946308202642\r\n - type: euclidean_pearson\r\n value: 88.87831312540068\r\n - type: euclidean_spearman\r\n value: 89.03615865973664\r\n - type: manhattan_pearson\r\n value: 88.79835539970384\r\n - type: manhattan_spearman\r\n value: 88.9766156339753\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STS22 (en)\r\n type: mteb/sts22-crosslingual-sts\r\n config: en\r\n split: test\r\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 70.1574915581685\r\n - type: cos_sim_spearman\r\n value: 70.59144980004054\r\n - type: euclidean_pearson\r\n value: 71.43246306918755\r\n - type: euclidean_spearman\r\n value: 70.5544189562984\r\n - type: manhattan_pearson\r\n value: 71.4071414609503\r\n - type: manhattan_spearman\r\n value: 70.31799126163712\r\n - task:\r\n type: STS\r\n dataset:\r\n name: MTEB STSBenchmark\r\n type: mteb/stsbenchmark-sts\r\n config: default\r\n split: test\r\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 83.36215796635351\r\n - type: cos_sim_spearman\r\n value: 83.07276756467208\r\n - type: euclidean_pearson\r\n value: 83.06690453635584\r\n - type: euclidean_spearman\r\n value: 82.9635366303289\r\n - type: manhattan_pearson\r\n value: 83.04994049700815\r\n - type: manhattan_spearman\r\n value: 82.98120125356036\r\n - task:\r\n type: Reranking\r\n dataset:\r\n name: MTEB SciDocsRR\r\n type: mteb/scidocs-reranking\r\n config: default\r\n split: test\r\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\r\n metrics:\r\n - type: map\r\n value: 86.92530011616722\r\n - type: mrr\r\n value: 96.21826793395421\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB SciFact\r\n type: mteb/scifact\r\n config: default\r\n split: test\r\n revision: 0228b52cf27578f30900b9e5271d331663a030d7\r\n metrics:\r\n - type: map_at_1\r\n value: 65.75\r\n - type: map_at_10\r\n value: 77.701\r\n - type: map_at_100\r\n value: 78.005\r\n - type: map_at_1000\r\n value: 78.006\r\n - type: map_at_3\r\n value: 75.48\r\n - type: map_at_5\r\n value: 76.927\r\n - type: mrr_at_1\r\n value: 68.333\r\n - type: mrr_at_10\r\n value: 78.511\r\n - type: mrr_at_100\r\n value: 78.704\r\n - type: mrr_at_1000\r\n value: 78.704\r\n - type: mrr_at_3\r\n value: 77\r\n - type: mrr_at_5\r\n value: 78.083\r\n - type: ndcg_at_1\r\n value: 68.333\r\n - type: ndcg_at_10\r\n value: 82.42699999999999\r\n - type: ndcg_at_100\r\n value: 83.486\r\n - type: ndcg_at_1000\r\n value: 83.511\r\n - type: ndcg_at_3\r\n value: 78.96300000000001\r\n - type: ndcg_at_5\r\n value: 81.028\r\n - type: precision_at_1\r\n value: 68.333\r\n - type: precision_at_10\r\n value: 10.667\r\n - type: precision_at_100\r\n value: 1.127\r\n - type: precision_at_1000\r\n value: 0.11299999999999999\r\n - type: precision_at_3\r\n value: 31.333\r\n - type: precision_at_5\r\n value: 20.133000000000003\r\n - type: recall_at_1\r\n value: 65.75\r\n - type: recall_at_10\r\n value: 95.578\r\n - type: recall_at_100\r\n value: 99.833\r\n - type: recall_at_1000\r\n value: 100\r\n - type: recall_at_3\r\n value: 86.506\r\n - type: recall_at_5\r\n value: 91.75\r\n - task:\r\n type: PairClassification\r\n dataset:\r\n name: MTEB SprintDuplicateQuestions\r\n type: mteb/sprintduplicatequestions-pairclassification\r\n config: default\r\n split: test\r\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\r\n metrics:\r\n - type: cos_sim_accuracy\r\n value: 99.75247524752476\r\n - type: cos_sim_ap\r\n value: 94.16065078045173\r\n - type: cos_sim_f1\r\n value: 87.22986247544205\r\n - type: cos_sim_precision\r\n value: 85.71428571428571\r\n - type: cos_sim_recall\r\n value: 88.8\r\n - type: dot_accuracy\r\n value: 99.74554455445545\r\n - type: dot_ap\r\n value: 93.90633887037264\r\n - type: dot_f1\r\n value: 86.9873417721519\r\n - type: dot_precision\r\n value: 88.1025641025641\r\n - type: dot_recall\r\n value: 85.9\r\n - type: euclidean_accuracy\r\n value: 99.75247524752476\r\n - type: euclidean_ap\r\n value: 94.17466319018055\r\n - type: euclidean_f1\r\n value: 87.3405299313052\r\n - type: euclidean_precision\r\n value: 85.74181117533719\r\n - type: euclidean_recall\r\n value: 89\r\n - type: manhattan_accuracy\r\n value: 99.75445544554455\r\n - type: manhattan_ap\r\n value: 94.27688371923577\r\n - type: manhattan_f1\r\n value: 87.74002954209749\r\n - type: manhattan_precision\r\n value: 86.42095053346266\r\n - type: manhattan_recall\r\n value: 89.1\r\n - type: max_accuracy\r\n value: 99.75445544554455\r\n - type: max_ap\r\n value: 94.27688371923577\r\n - type: max_f1\r\n value: 87.74002954209749\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB StackExchangeClustering\r\n type: mteb/stackexchange-clustering\r\n config: default\r\n split: test\r\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\r\n metrics:\r\n - type: v_measure\r\n value: 71.26500637517056\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB StackExchangeClusteringP2P\r\n type: mteb/stackexchange-clustering-p2p\r\n config: default\r\n split: test\r\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\r\n metrics:\r\n - type: v_measure\r\n value: 39.17507906280528\r\n - task:\r\n type: Reranking\r\n dataset:\r\n name: MTEB StackOverflowDupQuestions\r\n type: mteb/stackoverflowdupquestions-reranking\r\n config: default\r\n split: test\r\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\r\n metrics:\r\n - type: map\r\n value: 52.4848744828509\r\n - type: mrr\r\n value: 53.33678168236992\r\n - task:\r\n type: Summarization\r\n dataset:\r\n name: MTEB SummEval\r\n type: mteb/summeval\r\n config: default\r\n split: test\r\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\r\n metrics:\r\n - type: cos_sim_pearson\r\n value: 30.599864323827887\r\n - type: cos_sim_spearman\r\n value: 30.91116204665598\r\n - type: dot_pearson\r\n value: 30.82637894269936\r\n - type: dot_spearman\r\n value: 30.957573868416066\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB TRECCOVID\r\n type: mteb/trec-covid\r\n config: default\r\n split: test\r\n revision: None\r\n metrics:\r\n - type: map_at_1\r\n value: 0.23600000000000002\r\n - type: map_at_10\r\n value: 1.892\r\n - type: map_at_100\r\n value: 11.586\r\n - type: map_at_1000\r\n value: 27.761999999999997\r\n - type: map_at_3\r\n value: 0.653\r\n - type: map_at_5\r\n value: 1.028\r\n - type: mrr_at_1\r\n value: 88\r\n - type: mrr_at_10\r\n value: 94\r\n - type: mrr_at_100\r\n value: 94\r\n - type: mrr_at_1000\r\n value: 94\r\n - type: mrr_at_3\r\n value: 94\r\n - type: mrr_at_5\r\n value: 94\r\n - type: ndcg_at_1\r\n value: 82\r\n - type: ndcg_at_10\r\n value: 77.48899999999999\r\n - type: ndcg_at_100\r\n value: 60.141\r\n - type: ndcg_at_1000\r\n value: 54.228\r\n - type: ndcg_at_3\r\n value: 82.358\r\n - type: ndcg_at_5\r\n value: 80.449\r\n - type: precision_at_1\r\n value: 88\r\n - type: precision_at_10\r\n value: 82.19999999999999\r\n - type: precision_at_100\r\n value: 61.760000000000005\r\n - type: precision_at_1000\r\n value: 23.684\r\n - type: precision_at_3\r\n value: 88\r\n - type: precision_at_5\r\n value: 85.6\r\n - type: recall_at_1\r\n value: 0.23600000000000002\r\n - type: recall_at_10\r\n value: 2.117\r\n - type: recall_at_100\r\n value: 14.985000000000001\r\n - type: recall_at_1000\r\n value: 51.107\r\n - type: recall_at_3\r\n value: 0.688\r\n - type: recall_at_5\r\n value: 1.1039999999999999\r\n - task:\r\n type: Retrieval\r\n dataset:\r\n name: MTEB Touche2020\r\n type: mteb/touche2020\r\n config: default\r\n split: test\r\n revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f\r\n metrics:\r\n - type: map_at_1\r\n value: 2.3040000000000003\r\n - type: map_at_10\r\n value: 9.025\r\n - type: map_at_100\r\n value: 15.312999999999999\r\n - type: map_at_1000\r\n value: 16.954\r\n - type: map_at_3\r\n value: 4.981\r\n - type: map_at_5\r\n value: 6.32\r\n - type: mrr_at_1\r\n value: 24.490000000000002\r\n - type: mrr_at_10\r\n value: 39.835\r\n - type: mrr_at_100\r\n value: 40.8\r\n - type: mrr_at_1000\r\n value: 40.8\r\n - type: mrr_at_3\r\n value: 35.034\r\n - type: mrr_at_5\r\n value: 37.687\r\n - type: ndcg_at_1\r\n value: 22.448999999999998\r\n - type: ndcg_at_10\r\n value: 22.545\r\n - type: ndcg_at_100\r\n value: 35.931999999999995\r\n - type: ndcg_at_1000\r\n value: 47.665\r\n - type: ndcg_at_3\r\n value: 23.311\r\n - type: ndcg_at_5\r\n value: 22.421\r\n - type: precision_at_1\r\n value: 24.490000000000002\r\n - type: precision_at_10\r\n value: 20.408\r\n - type: precision_at_100\r\n value: 7.815999999999999\r\n - type: precision_at_1000\r\n value: 1.553\r\n - type: precision_at_3\r\n value: 25.169999999999998\r\n - type: precision_at_5\r\n value: 23.265\r\n - type: recall_at_1\r\n value: 2.3040000000000003\r\n - type: recall_at_10\r\n value: 15.693999999999999\r\n - type: recall_at_100\r\n value: 48.917\r\n - type: recall_at_1000\r\n value: 84.964\r\n - type: recall_at_3\r\n value: 6.026\r\n - type: recall_at_5\r\n value: 9.066\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB ToxicConversationsClassification\r\n type: mteb/toxic_conversations_50k\r\n config: default\r\n split: test\r\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\r\n metrics:\r\n - type: accuracy\r\n value: 82.6074\r\n - type: ap\r\n value: 23.187467098602013\r\n - type: f1\r\n value: 65.36829506379657\r\n - task:\r\n type: Classification\r\n dataset:\r\n name: MTEB TweetSentimentExtractionClassification\r\n type: mteb/tweet_sentiment_extraction\r\n config: default\r\n split: test\r\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\r\n metrics:\r\n - type: accuracy\r\n value: 63.16355404640635\r\n - type: f1\r\n value: 63.534725639863346\r\n - task:\r\n type: Clustering\r\n dataset:\r\n name: MTEB TwentyNewsgroupsClustering\r\n type: mteb/twentynewsgroups-clustering\r\n config: default\r\n split: test\r\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\r\n metrics:\r\n - type: v_measure\r\n value: 50.91004094411276\r\n - task:\r\n type: PairClassification\r\n dataset:\r\n name: MTEB TwitterSemEval2015\r\n type: mteb/twittersemeval2015-pairclassification\r\n config: default\r\n split: test\r\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\r\n metrics:\r\n - type: cos_sim_accuracy\r\n value: 86.55301901412649\r\n - type: cos_sim_ap\r\n value: 75.25312618556728\r\n - type: cos_sim_f1\r\n value: 68.76561719140429\r\n - type: cos_sim_precision\r\n value: 65.3061224489796\r\n - type: cos_sim_recall\r\n value: 72.61213720316623\r\n - type: dot_accuracy\r\n value: 86.29671574178936\r\n - type: dot_ap\r\n value: 75.11910195501207\r\n - type: dot_f1\r\n value: 68.44048376830045\r\n - type: dot_precision\r\n value: 66.12546125461255\r\n - type: dot_recall\r\n value: 70.92348284960423\r\n - type: euclidean_accuracy\r\n value: 86.5828217202122\r\n - type: euclidean_ap\r\n value: 75.22986344900924\r\n - type: euclidean_f1\r\n value: 68.81267797449549\r\n - type: euclidean_precision\r\n value: 64.8238861674831\r\n - type: euclidean_recall\r\n value: 73.3245382585752\r\n - type: manhattan_accuracy\r\n value: 86.61262442629791\r\n - type: manhattan_ap\r\n value: 75.24401608557328\r\n - type: manhattan_f1\r\n value: 68.80473982483257\r\n - type: manhattan_precision\r\n value: 67.21187720181177\r\n - type: manhattan_recall\r\n value: 70.47493403693932\r\n - type: max_accuracy\r\n value: 86.61262442629791\r\n - type: max_ap\r\n value: 75.25312618556728\r\n - type: max_f1\r\n value: 68.81267797449549\r\n - task:\r\n type: PairClassification\r\n dataset:\r\n name: MTEB TwitterURLCorpus\r\n type: mteb/twitterurlcorpus-pairclassification\r\n config: default\r\n split: test\r\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\r\n metrics:\r\n - type: cos_sim_accuracy\r\n value: 88.10688089416696\r\n - type: cos_sim_ap\r\n value: 84.17862178779863\r\n - type: cos_sim_f1\r\n value: 76.17305208781748\r\n - type: cos_sim_precision\r\n value: 71.31246641590543\r\n - type: cos_sim_recall\r\n value: 81.74468740375731\r\n - type: dot_accuracy\r\n value: 88.1844995536927\r\n - type: dot_ap\r\n value: 84.33816725235876\r\n - type: dot_f1\r\n value: 76.43554032918746\r\n - type: dot_precision\r\n value: 74.01557767200346\r\n - type: dot_recall\r\n value: 79.0190945488143\r\n - type: euclidean_accuracy\r\n value: 88.07001203089223\r\n - type: euclidean_ap\r\n value: 84.12267000814985\r\n - type: euclidean_f1\r\n value: 76.12232600180778\r\n - type: euclidean_precision\r\n value: 74.50604541433205\r\n - type: euclidean_recall\r\n value: 77.81028641823221\r\n - type: manhattan_accuracy\r\n value: 88.06419063142779\r\n - type: manhattan_ap\r\n value: 84.11648917164187\r\n - type: manhattan_f1\r\n value: 76.20579953925474\r\n - type: manhattan_precision\r\n value: 72.56772755762935\r\n - type: manhattan_recall\r\n value: 80.22790267939637\r\n - type: max_accuracy\r\n value: 88.1844995536927\r\n - type: max_ap\r\n value: 84.33816725235876\r\n - type: max_f1\r\n value: 76.43554032918746\r\n---\r\n\r\n\r\n\r\n# gte-large-en-v1.5\r\n\r\nWe introduce `gte-v1.5` series, upgraded `gte` embeddings that support the context length of up to **8192**, while further enhancing model performance.\r\nThe models are built upon the `transformer++` encoder [backbone](https://huggingface.co/Alibaba-NLP/new-impl) (BERT + RoPE + GLU).\r\n\r\nThe `gte-v1.5` series achieve state-of-the-art scores on the MTEB benchmark within the same model size category and prodvide competitive on the LoCo long-context retrieval tests (refer to [Evaluation](#evaluation)).\r\n\r\nWe also present the [`gte-Qwen1.5-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen1.5-7B-instruct),\r\na SOTA instruction-tuned multi-lingual embedding model that ranked 2nd in MTEB and 1st in C-MTEB.\r\n\r\n\r\n\r\n- **Developed by:** Institute for Intelligent Computing, Alibaba Group\r\n- **Model type:** Text Embeddings\r\n- **Paper:** [mGTE: Generalized Long-Context Text Representation and Reranking\r\nModels for Multilingual Text Retrieval](https://arxiv.org/pdf/2407.19669)\r\n\r\n\r\n\r\n### Model list\r\n\r\n| Models | Language | Model Size | Max Seq. Length | Dimension | MTEB-en | LoCo |\r\n|:-----: | :-----: |:-----: |:-----: |:-----: | :-----: | :-----: |\r\n|[`gte-Qwen1.5-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen1.5-7B-instruct)| Multiple | 7720 | 32768 | 4096 | 67.34 | 87.57 |\r\n|[`gte-large-en-v1.5`](https://huggingface.co/Alibaba-NLP/gte-large-en-v1.5) | English | 434 | 8192 | 1024 | 65.39 | 86.71 |\r\n|[`gte-base-en-v1.5`](https://huggingface.co/Alibaba-NLP/gte-base-en-v1.5) | English | 137 | 8192 | 768 | 64.11 | 87.44 |\r\n\r\n\r\n## How to Get Started with the Model\r\n\r\nUse the code below to get started with the model.\r\n\r\n```python\r\n# Requires transformers>=4.36.0\r\n\r\nimport torch.nn.functional as F\r\nfrom transformers import AutoModel, AutoTokenizer\r\n\r\ninput_texts = [\r\n \"what is the capital of China?\",\r\n \"how to implement quick sort in python?\",\r\n \"Beijing\",\r\n \"sorting algorithms\"\r\n]\r\n\r\nmodel_path = 'Alibaba-NLP/gte-large-en-v1.5'\r\ntokenizer = AutoTokenizer.from_pretrained(model_path)\r\nmodel = AutoModel.from_pretrained(model_path, trust_remote_code=True)\r\n\r\n# Tokenize the input texts\r\nbatch_dict = tokenizer(input_texts, max_length=8192, padding=True, truncation=True, return_tensors='pt')\r\n\r\noutputs = model(**batch_dict)\r\nembeddings = outputs.last_hidden_state[:, 0]\r\n \r\n# (Optionally) normalize embeddings\r\nembeddings = F.normalize(embeddings, p=2, dim=1)\r\nscores = (embeddings[:1] @ embeddings[1:].T) * 100\r\nprint(scores.tolist())\r\n```\r\n\r\n**It is recommended to install xformers and enable unpadding for acceleration, refer to [enable-unpadding-and-xformers](https://huggingface.co/Alibaba-NLP/new-impl#recommendation-enable-unpadding-and-acceleration-with-xformers).**\r\n\r\n\r\nUse with sentence-transformers:\r\n\r\n```python\r\n# Requires sentence_transformers>=2.7.0\r\n\r\nfrom sentence_transformers import SentenceTransformer\r\nfrom sentence_transformers.util import cos_sim\r\n\r\nsentences = ['That is a happy person', 'That is a very happy person']\r\n\r\nmodel = SentenceTransformer('Alibaba-NLP/gte-large-en-v1.5', trust_remote_code=True)\r\nembeddings = model.encode(sentences)\r\nprint(cos_sim(embeddings[0], embeddings[1]))\r\n```\r\n\r\nUse with `transformers.js`:\r\n\r\n```js\r\n// npm i @xenova/transformers\r\nimport { pipeline, dot } from '@xenova/transformers';\r\n\r\n// Create feature extraction pipeline\r\nconst extractor = await pipeline('feature-extraction', 'Alibaba-NLP/gte-large-en-v1.5', {\r\n quantized: false, // Comment out this line to use the quantized version\r\n});\r\n\r\n// Generate sentence embeddings\r\nconst sentences = [\r\n \"what is the capital of China?\",\r\n \"how to implement quick sort in python?\",\r\n \"Beijing\",\r\n \"sorting algorithms\"\r\n]\r\nconst output = await extractor(sentences, { normalize: true, pooling: 'cls' });\r\n\r\n// Compute similarity scores\r\nconst [source_embeddings, ...document_embeddings ] = output.tolist();\r\nconst similarities = document_embeddings.map(x => 100 * dot(source_embeddings, x));\r\nconsole.log(similarities); // [41.86354093370361, 77.07076371259589, 37.02981979677899]\r\n```\r\n\r\n## Training Details\r\n\r\n### Training Data\r\n\r\n- Masked language modeling (MLM): `c4-en`\r\n- Weak-supervised contrastive pre-training (CPT): [GTE](https://arxiv.org/pdf/2308.03281.pdf) pre-training data\r\n- Supervised contrastive fine-tuning: [GTE](https://arxiv.org/pdf/2308.03281.pdf) fine-tuning data\r\n\r\n### Training Procedure \r\n\r\nTo enable the backbone model to support a context length of 8192, we adopted a multi-stage training strategy.\r\nThe model first undergoes preliminary MLM pre-training on shorter lengths.\r\nAnd then, we resample the data, reducing the proportion of short texts, and continue the MLM pre-training.\r\n\r\nThe entire training process is as follows:\r\n- MLM-512: lr 2e-4, mlm_probability 0.3, batch_size 4096, num_steps 300000, rope_base 10000\r\n- MLM-2048: lr 5e-5, mlm_probability 0.3, batch_size 4096, num_steps 30000, rope_base 10000\r\n- [MLM-8192](https://huggingface.co/Alibaba-NLP/gte-en-mlm-large): lr 5e-5, mlm_probability 0.3, batch_size 1024, num_steps 30000, rope_base 160000\r\n- CPT: max_len 512, lr 5e-5, batch_size 28672, num_steps 100000\r\n- Fine-tuning: TODO\r\n\r\n\r\n## Evaluation\r\n\r\n\r\n### MTEB\r\n\r\nThe results of other models are retrieved from [MTEB leaderboard](https://huggingface.co/spaces/mteb/leaderboard).\r\n\r\nThe gte evaluation setting: `mteb==1.2.0, fp16 auto mix precision, max_length=8192`, and set ntk scaling factor to 2 (equivalent to rope_base * 2).\r\n\r\n| Model Name | Param Size (M) | Dimension | Sequence Length | Average (56) | Class. (12) | Clust. (11) | Pair Class. (3) | Reran. (4) | Retr. (15) | STS (10) | Summ. (1) |\r\n|:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\r\n| [**gte-large-en-v1.5**](https://huggingface.co/Alibaba-NLP/gte-large-en-v1.5) | 409 | 1024 | 8192 | **65.39** | 77.75 | 47.95 | 84.63 | 58.50 | 57.91 | 81.43 | 30.91 |\r\n| [mxbai-embed-large-v1](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) | 335 | 1024 | 512 | 64.68 | 75.64 | 46.71 | 87.2 | 60.11 | 54.39 | 85 | 32.71 |\r\n| [multilingual-e5-large-instruct](https://huggingface.co/intfloat/multilingual-e5-large-instruct) | 560 | 1024 | 514 | 64.41 | 77.56 | 47.1 | 86.19 | 58.58 | 52.47 | 84.78 | 30.39 |\r\n| [bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5)| 335 | 1024 | 512 | 64.23 | 75.97 | 46.08 | 87.12 | 60.03 | 54.29 | 83.11 | 31.61 |\r\n| [**gte-base-en-v1.5**](https://huggingface.co/Alibaba-NLP/gte-base-en-v1.5) | 137 | 768 | 8192 | **64.11** | 77.17 | 46.82 | 85.33 | 57.66 | 54.09 | 81.97 | 31.17 |\r\n| [bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5)| 109 | 768 | 512 | 63.55 | 75.53 | 45.77 | 86.55 | 58.86 | 53.25 | 82.4 | 31.07 |\r\n\r\n\r\n### LoCo\r\n\r\n| Model Name | Dimension | Sequence Length | Average (5) | QsmsumRetrieval | SummScreenRetrieval | QasperAbastractRetrieval | QasperTitleRetrieval | GovReportRetrieval |\r\n|:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|\r\n| [gte-qwen1.5-7b](https://huggingface.co/Alibaba-NLP/gte-qwen1.5-7b) | 4096 | 32768 | 87.57 | 49.37 | 93.10 | 99.67 | 97.54 | 98.21 | \r\n| [gte-large-v1.5](https://huggingface.co/Alibaba-NLP/gte-large-v1.5) |1024 | 8192 | 86.71 | 44.55 | 92.61 | 99.82 | 97.81 | 98.74 |\r\n| [gte-base-v1.5](https://huggingface.co/Alibaba-NLP/gte-base-v1.5) | 768 | 8192 | 87.44 | 49.91 | 91.78 | 99.82 | 97.13 | 98.58 |\r\n\r\n\r\n\r\n## Citation\r\n\r\nIf you find our paper or models helpful, please consider citing them as follows:\r\n\r\n```\r\n@article{zhang2024mgte,\r\n title={mGTE: Generalized Long-Context Text Representation and Reranking Models for Multilingual Text Retrieval},\r\n author={Zhang, Xin and Zhang, Yanzhao and Long, Dingkun and Xie, Wen and Dai, Ziqi and Tang, Jialong and Lin, Huan and Yang, Baosong and Xie, Pengjun and Huang, Fei and others},\r\n journal={arXiv preprint arXiv:2407.19669},\r\n year={2024}\r\n}\r\n\r\n@article{li2023towards,\r\n title={Towards general text embeddings with multi-stage contrastive learning},\r\n author={Li, Zehan and Zhang, Xin and Zhang, Yanzhao and Long, Dingkun and Xie, Pengjun and Zhang, Meishan},\r\n journal={arXiv preprint arXiv:2308.03281},\r\n year={2023}\r\n}\r\n```"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":24,"numItemsPerPage":100,"numTotalItems":5602,"offset":2400,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODcxOTg0NCwic3ViIjoiL2RhdGFzZXRzL0V1YW55dS9iaWdiaW9fZGF0YXNldF9tb2RlbHMiLCJleHAiOjE3NTg3MjM0NDQsImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.xAOOu6HM-2kxvG5b73dJETCpvSK6XwaqXgKQtRTR__GWbQcssqY6SobWnGQH4M2vIrQPYGOxmVSmurtM5of-AA","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">

id
stringlengths
9
104
author
stringlengths
3
36
task_category
stringclasses
32 values
tags
listlengths
1
4.05k
created_time
timestamp[ns, tz=UTC]date
2022-03-02 23:29:04
2025-03-18 02:34:30
last_modified
stringdate
2021-02-13 00:06:56
2025-03-18 09:30:19
downloads
int64
0
15.6M
likes
int64
0
4.86k
README
stringlengths
44
1.01M
matched_bigbio_names
listlengths
1
8
aimarsg/prueba1
aimarsg
token-classification
[ "transformers", "pytorch", "tensorboard", "roberta", "token-classification", "generated_from_trainer", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-25T17:47:11Z
2023-03-25T18:25:04+00:00
19
0
--- license: apache-2.0 metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer model-index: - name: prueba1 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # prueba1 This model is a fine-tuned version of [PlanTL-GOB-ES/bsc-bio-ehr-es-pharmaconer](https://huggingface.co/PlanTL-GOB-ES/bsc-bio-ehr-es-pharmaconer) on an unknown dataset. It achieves the following results on the evaluation set: - Loss: 0.1842 - Precision: 0.7072 - Recall: 0.6255 - F1: 0.6638 - Accuracy: 0.9724 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 3.5e-05 - train_batch_size: 32 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 32 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | No log | 1.0 | 29 | 0.1520 | 0.5625 | 0.6813 | 0.6162 | 0.9659 | | No log | 2.0 | 58 | 0.1552 | 0.6293 | 0.5817 | 0.6046 | 0.9686 | | No log | 3.0 | 87 | 0.1586 | 0.6667 | 0.5737 | 0.6167 | 0.9709 | | No log | 4.0 | 116 | 0.1595 | 0.6981 | 0.5896 | 0.6393 | 0.9722 | | No log | 5.0 | 145 | 0.1699 | 0.6729 | 0.5737 | 0.6194 | 0.9676 | | No log | 6.0 | 174 | 0.1753 | 0.6577 | 0.5817 | 0.6173 | 0.9689 | | No log | 7.0 | 203 | 0.1665 | 0.6540 | 0.6175 | 0.6352 | 0.9681 | | No log | 8.0 | 232 | 0.1792 | 0.7157 | 0.5618 | 0.6295 | 0.9712 | | No log | 9.0 | 261 | 0.1682 | 0.7048 | 0.5896 | 0.6421 | 0.9714 | | No log | 10.0 | 290 | 0.1732 | 0.7366 | 0.6016 | 0.6623 | 0.9724 | | No log | 11.0 | 319 | 0.1663 | 0.672 | 0.6693 | 0.6707 | 0.9725 | | No log | 12.0 | 348 | 0.1882 | 0.7071 | 0.5578 | 0.6236 | 0.9692 | | No log | 13.0 | 377 | 0.1825 | 0.7103 | 0.6056 | 0.6538 | 0.9710 | | No log | 14.0 | 406 | 0.1755 | 0.7164 | 0.5737 | 0.6372 | 0.9709 | | No log | 15.0 | 435 | 0.1950 | 0.6842 | 0.5697 | 0.6217 | 0.9689 | | No log | 16.0 | 464 | 0.1660 | 0.7240 | 0.6375 | 0.6780 | 0.9727 | | No log | 17.0 | 493 | 0.1833 | 0.7255 | 0.5896 | 0.6505 | 0.9724 | | 0.0061 | 18.0 | 522 | 0.1832 | 0.7190 | 0.6016 | 0.6551 | 0.9702 | | 0.0061 | 19.0 | 551 | 0.1762 | 0.6828 | 0.6175 | 0.6485 | 0.9707 | | 0.0061 | 20.0 | 580 | 0.1785 | 0.7346 | 0.6175 | 0.6710 | 0.9734 | | 0.0061 | 21.0 | 609 | 0.1791 | 0.7093 | 0.6414 | 0.6736 | 0.9739 | | 0.0061 | 22.0 | 638 | 0.1843 | 0.7476 | 0.6255 | 0.6811 | 0.9737 | | 0.0061 | 23.0 | 667 | 0.1837 | 0.7371 | 0.6255 | 0.6767 | 0.9734 | | 0.0061 | 24.0 | 696 | 0.1867 | 0.7176 | 0.6175 | 0.6638 | 0.9715 | | 0.0061 | 25.0 | 725 | 0.1844 | 0.7089 | 0.6016 | 0.6509 | 0.9710 | | 0.0061 | 26.0 | 754 | 0.1815 | 0.7072 | 0.6255 | 0.6638 | 0.9725 | | 0.0061 | 27.0 | 783 | 0.1822 | 0.7021 | 0.6574 | 0.6790 | 0.9737 | | 0.0061 | 28.0 | 812 | 0.1853 | 0.7048 | 0.6375 | 0.6695 | 0.9732 | | 0.0061 | 29.0 | 841 | 0.1845 | 0.7069 | 0.6534 | 0.6791 | 0.9735 | | 0.0061 | 30.0 | 870 | 0.1827 | 0.7004 | 0.6614 | 0.6803 | 0.9735 | | 0.0061 | 31.0 | 899 | 0.1850 | 0.7014 | 0.6175 | 0.6568 | 0.9719 | | 0.0061 | 32.0 | 928 | 0.1842 | 0.7072 | 0.6255 | 0.6638 | 0.9724 | ### Framework versions - Transformers 4.27.3 - Pytorch 1.13.1+cu116 - Datasets 2.10.1 - Tokenizers 0.13.2
[ "PHARMACONER" ]
annafavaro/BIO_GPT_NER_FINETUNED_C
annafavaro
token-classification
[ "transformers", "pytorch", "tensorboard", "gpt2", "token-classification", "generated_from_trainer", "dataset:ncbi_disease", "license:mit", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-04-14T14:33:34Z
2023-04-14T14:54:08+00:00
19
0
--- datasets: - ncbi_disease license: mit metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer model-index: - name: BIO_GPT_NER_FINETUNED_C results: - task: type: token-classification name: Token Classification dataset: name: ncbi_disease type: ncbi_disease config: ncbi_disease split: validation args: ncbi_disease metrics: - type: precision value: 0.44176706827309237 name: Precision - type: recall value: 0.5583756345177665 name: Recall - type: f1 value: 0.4932735426008968 name: F1 - type: accuracy value: 0.958054734877935 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # BIO_GPT_NER_FINETUNED_C This model is a fine-tuned version of [microsoft/biogpt](https://huggingface.co/microsoft/biogpt) on the ncbi_disease dataset. It achieves the following results on the evaluation set: - Loss: 0.1466 - Precision: 0.4418 - Recall: 0.5584 - F1: 0.4933 - Accuracy: 0.9581 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.2989 | 1.0 | 680 | 0.1687 | 0.3350 | 0.4226 | 0.3737 | 0.9474 | | 0.1764 | 2.0 | 1360 | 0.1425 | 0.4289 | 0.5241 | 0.4717 | 0.9560 | | 0.0942 | 3.0 | 2040 | 0.1466 | 0.4418 | 0.5584 | 0.4933 | 0.9581 | ### Framework versions - Transformers 4.28.0 - Pytorch 2.0.0+cu118 - Datasets 2.11.0 - Tokenizers 0.13.3
[ "NCBI DISEASE" ]
Consensus/instructor-base
Consensus
sentence-similarity
[ "sentence-transformers", "pytorch", "t5", "text-embedding", "embeddings", "information-retrieval", "beir", "text-classification", "language-model", "text-clustering", "text-semantic-similarity", "text-evaluation", "prompt-retrieval", "text-reranking", "feature-extraction", "sentence-similarity", "transformers", "English", "Sentence Similarity", "natural_questions", "ms_marco", "fever", "hotpot_qa", "mteb", "en", "arxiv:2212.09741", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "region:us" ]
2023-05-10T18:03:14Z
2023-05-10T18:07:07+00:00
19
0
--- language: en license: apache-2.0 pipeline_tag: sentence-similarity tags: - text-embedding - embeddings - information-retrieval - beir - text-classification - language-model - text-clustering - text-semantic-similarity - text-evaluation - prompt-retrieval - text-reranking - sentence-transformers - feature-extraction - sentence-similarity - transformers - t5 - English - Sentence Similarity - natural_questions - ms_marco - fever - hotpot_qa - mteb inference: false model-index: - name: final_base_results results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 86.2089552238806 - type: ap value: 55.76273850794966 - type: f1 value: 81.26104211414781 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 88.35995000000001 - type: ap value: 84.18839957309655 - type: f1 value: 88.317619250081 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.64 - type: f1 value: 42.48663956478136 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 27.383000000000003 - type: map_at_10 value: 43.024 - type: map_at_100 value: 44.023 - type: map_at_1000 value: 44.025999999999996 - type: map_at_3 value: 37.684 - type: map_at_5 value: 40.884 - type: mrr_at_1 value: 28.094 - type: mrr_at_10 value: 43.315 - type: mrr_at_100 value: 44.313 - type: mrr_at_1000 value: 44.317 - type: mrr_at_3 value: 37.862 - type: mrr_at_5 value: 41.155 - type: ndcg_at_1 value: 27.383000000000003 - type: ndcg_at_10 value: 52.032000000000004 - type: ndcg_at_100 value: 56.19499999999999 - type: ndcg_at_1000 value: 56.272 - type: ndcg_at_3 value: 41.166000000000004 - type: ndcg_at_5 value: 46.92 - type: precision_at_1 value: 27.383000000000003 - type: precision_at_10 value: 8.087 - type: precision_at_100 value: 0.989 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 17.093 - type: precision_at_5 value: 13.044 - type: recall_at_1 value: 27.383000000000003 - type: recall_at_10 value: 80.868 - type: recall_at_100 value: 98.86200000000001 - type: recall_at_1000 value: 99.431 - type: recall_at_3 value: 51.28 - type: recall_at_5 value: 65.22 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 39.68441054431849 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 29.188539728343844 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 63.173362687519784 - type: mrr value: 76.18860748362133 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_spearman value: 82.30789953771232 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 77.03571428571428 - type: f1 value: 75.87384305045917 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 32.98041170516364 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 25.71652988451154 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 33.739999999999995 - type: map_at_10 value: 46.197 - type: map_at_100 value: 47.814 - type: map_at_1000 value: 47.934 - type: map_at_3 value: 43.091 - type: map_at_5 value: 44.81 - type: mrr_at_1 value: 41.059 - type: mrr_at_10 value: 52.292 - type: mrr_at_100 value: 52.978 - type: mrr_at_1000 value: 53.015 - type: mrr_at_3 value: 49.976 - type: mrr_at_5 value: 51.449999999999996 - type: ndcg_at_1 value: 41.059 - type: ndcg_at_10 value: 52.608 - type: ndcg_at_100 value: 57.965 - type: ndcg_at_1000 value: 59.775999999999996 - type: ndcg_at_3 value: 48.473 - type: ndcg_at_5 value: 50.407999999999994 - type: precision_at_1 value: 41.059 - type: precision_at_10 value: 9.943 - type: precision_at_100 value: 1.6070000000000002 - type: precision_at_1000 value: 0.20500000000000002 - type: precision_at_3 value: 23.413999999999998 - type: precision_at_5 value: 16.481 - type: recall_at_1 value: 33.739999999999995 - type: recall_at_10 value: 63.888999999999996 - type: recall_at_100 value: 85.832 - type: recall_at_1000 value: 97.475 - type: recall_at_3 value: 51.953 - type: recall_at_5 value: 57.498000000000005 - type: map_at_1 value: 31.169999999999998 - type: map_at_10 value: 41.455 - type: map_at_100 value: 42.716 - type: map_at_1000 value: 42.847 - type: map_at_3 value: 38.568999999999996 - type: map_at_5 value: 40.099000000000004 - type: mrr_at_1 value: 39.427 - type: mrr_at_10 value: 47.818 - type: mrr_at_100 value: 48.519 - type: mrr_at_1000 value: 48.558 - type: mrr_at_3 value: 45.86 - type: mrr_at_5 value: 46.936 - type: ndcg_at_1 value: 39.427 - type: ndcg_at_10 value: 47.181 - type: ndcg_at_100 value: 51.737 - type: ndcg_at_1000 value: 53.74 - type: ndcg_at_3 value: 43.261 - type: ndcg_at_5 value: 44.891 - type: precision_at_1 value: 39.427 - type: precision_at_10 value: 8.847 - type: precision_at_100 value: 1.425 - type: precision_at_1000 value: 0.189 - type: precision_at_3 value: 20.785999999999998 - type: precision_at_5 value: 14.560999999999998 - type: recall_at_1 value: 31.169999999999998 - type: recall_at_10 value: 56.971000000000004 - type: recall_at_100 value: 76.31400000000001 - type: recall_at_1000 value: 88.93900000000001 - type: recall_at_3 value: 45.208 - type: recall_at_5 value: 49.923 - type: map_at_1 value: 39.682 - type: map_at_10 value: 52.766000000000005 - type: map_at_100 value: 53.84100000000001 - type: map_at_1000 value: 53.898 - type: map_at_3 value: 49.291000000000004 - type: map_at_5 value: 51.365 - type: mrr_at_1 value: 45.266 - type: mrr_at_10 value: 56.093 - type: mrr_at_100 value: 56.763 - type: mrr_at_1000 value: 56.793000000000006 - type: mrr_at_3 value: 53.668000000000006 - type: mrr_at_5 value: 55.1 - type: ndcg_at_1 value: 45.266 - type: ndcg_at_10 value: 58.836 - type: ndcg_at_100 value: 62.863 - type: ndcg_at_1000 value: 63.912 - type: ndcg_at_3 value: 53.19199999999999 - type: ndcg_at_5 value: 56.125 - type: precision_at_1 value: 45.266 - type: precision_at_10 value: 9.492 - type: precision_at_100 value: 1.236 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 23.762 - type: precision_at_5 value: 16.414 - type: recall_at_1 value: 39.682 - type: recall_at_10 value: 73.233 - type: recall_at_100 value: 90.335 - type: recall_at_1000 value: 97.452 - type: recall_at_3 value: 58.562000000000005 - type: recall_at_5 value: 65.569 - type: map_at_1 value: 26.743 - type: map_at_10 value: 34.016000000000005 - type: map_at_100 value: 35.028999999999996 - type: map_at_1000 value: 35.113 - type: map_at_3 value: 31.763 - type: map_at_5 value: 33.013999999999996 - type: mrr_at_1 value: 28.927000000000003 - type: mrr_at_10 value: 36.32 - type: mrr_at_100 value: 37.221 - type: mrr_at_1000 value: 37.281 - type: mrr_at_3 value: 34.105000000000004 - type: mrr_at_5 value: 35.371 - type: ndcg_at_1 value: 28.927000000000003 - type: ndcg_at_10 value: 38.474000000000004 - type: ndcg_at_100 value: 43.580000000000005 - type: ndcg_at_1000 value: 45.64 - type: ndcg_at_3 value: 34.035 - type: ndcg_at_5 value: 36.186 - type: precision_at_1 value: 28.927000000000003 - type: precision_at_10 value: 5.74 - type: precision_at_100 value: 0.8710000000000001 - type: precision_at_1000 value: 0.108 - type: precision_at_3 value: 14.124 - type: precision_at_5 value: 9.74 - type: recall_at_1 value: 26.743 - type: recall_at_10 value: 49.955 - type: recall_at_100 value: 73.904 - type: recall_at_1000 value: 89.133 - type: recall_at_3 value: 38.072 - type: recall_at_5 value: 43.266 - type: map_at_1 value: 16.928 - type: map_at_10 value: 23.549 - type: map_at_100 value: 24.887 - type: map_at_1000 value: 25.018 - type: map_at_3 value: 21.002000000000002 - type: map_at_5 value: 22.256 - type: mrr_at_1 value: 21.02 - type: mrr_at_10 value: 27.898 - type: mrr_at_100 value: 29.018 - type: mrr_at_1000 value: 29.099999999999998 - type: mrr_at_3 value: 25.456 - type: mrr_at_5 value: 26.625 - type: ndcg_at_1 value: 21.02 - type: ndcg_at_10 value: 28.277 - type: ndcg_at_100 value: 34.54 - type: ndcg_at_1000 value: 37.719 - type: ndcg_at_3 value: 23.707 - type: ndcg_at_5 value: 25.482 - type: precision_at_1 value: 21.02 - type: precision_at_10 value: 5.361 - type: precision_at_100 value: 0.9809999999999999 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 11.401 - type: precision_at_5 value: 8.209 - type: recall_at_1 value: 16.928 - type: recall_at_10 value: 38.601 - type: recall_at_100 value: 65.759 - type: recall_at_1000 value: 88.543 - type: recall_at_3 value: 25.556 - type: recall_at_5 value: 30.447000000000003 - type: map_at_1 value: 28.549000000000003 - type: map_at_10 value: 38.426 - type: map_at_100 value: 39.845000000000006 - type: map_at_1000 value: 39.956 - type: map_at_3 value: 35.372 - type: map_at_5 value: 37.204 - type: mrr_at_1 value: 35.034 - type: mrr_at_10 value: 44.041000000000004 - type: mrr_at_100 value: 44.95 - type: mrr_at_1000 value: 44.997 - type: mrr_at_3 value: 41.498000000000005 - type: mrr_at_5 value: 43.077 - type: ndcg_at_1 value: 35.034 - type: ndcg_at_10 value: 44.218 - type: ndcg_at_100 value: 49.958000000000006 - type: ndcg_at_1000 value: 52.019000000000005 - type: ndcg_at_3 value: 39.34 - type: ndcg_at_5 value: 41.892 - type: precision_at_1 value: 35.034 - type: precision_at_10 value: 7.911 - type: precision_at_100 value: 1.26 - type: precision_at_1000 value: 0.16 - type: precision_at_3 value: 18.511 - type: precision_at_5 value: 13.205 - type: recall_at_1 value: 28.549000000000003 - type: recall_at_10 value: 56.035999999999994 - type: recall_at_100 value: 79.701 - type: recall_at_1000 value: 93.149 - type: recall_at_3 value: 42.275 - type: recall_at_5 value: 49.097 - type: map_at_1 value: 29.391000000000002 - type: map_at_10 value: 39.48 - type: map_at_100 value: 40.727000000000004 - type: map_at_1000 value: 40.835 - type: map_at_3 value: 36.234 - type: map_at_5 value: 37.877 - type: mrr_at_1 value: 35.959 - type: mrr_at_10 value: 44.726 - type: mrr_at_100 value: 45.531 - type: mrr_at_1000 value: 45.582 - type: mrr_at_3 value: 42.047000000000004 - type: mrr_at_5 value: 43.611 - type: ndcg_at_1 value: 35.959 - type: ndcg_at_10 value: 45.303 - type: ndcg_at_100 value: 50.683 - type: ndcg_at_1000 value: 52.818 - type: ndcg_at_3 value: 39.987 - type: ndcg_at_5 value: 42.243 - type: precision_at_1 value: 35.959 - type: precision_at_10 value: 8.241999999999999 - type: precision_at_100 value: 1.274 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 18.836 - type: precision_at_5 value: 13.196 - type: recall_at_1 value: 29.391000000000002 - type: recall_at_10 value: 57.364000000000004 - type: recall_at_100 value: 80.683 - type: recall_at_1000 value: 94.918 - type: recall_at_3 value: 42.263 - type: recall_at_5 value: 48.634 - type: map_at_1 value: 26.791749999999997 - type: map_at_10 value: 35.75541666666667 - type: map_at_100 value: 37.00791666666667 - type: map_at_1000 value: 37.12408333333333 - type: map_at_3 value: 33.02966666666667 - type: map_at_5 value: 34.56866666666667 - type: mrr_at_1 value: 31.744333333333337 - type: mrr_at_10 value: 39.9925 - type: mrr_at_100 value: 40.86458333333333 - type: mrr_at_1000 value: 40.92175000000001 - type: mrr_at_3 value: 37.68183333333334 - type: mrr_at_5 value: 39.028499999999994 - type: ndcg_at_1 value: 31.744333333333337 - type: ndcg_at_10 value: 40.95008333333334 - type: ndcg_at_100 value: 46.25966666666667 - type: ndcg_at_1000 value: 48.535333333333334 - type: ndcg_at_3 value: 36.43333333333333 - type: ndcg_at_5 value: 38.602333333333334 - type: precision_at_1 value: 31.744333333333337 - type: precision_at_10 value: 7.135166666666666 - type: precision_at_100 value: 1.1535833333333334 - type: precision_at_1000 value: 0.15391666666666665 - type: precision_at_3 value: 16.713 - type: precision_at_5 value: 11.828416666666666 - type: recall_at_1 value: 26.791749999999997 - type: recall_at_10 value: 51.98625 - type: recall_at_100 value: 75.30358333333334 - type: recall_at_1000 value: 91.05433333333333 - type: recall_at_3 value: 39.39583333333333 - type: recall_at_5 value: 45.05925 - type: map_at_1 value: 22.219 - type: map_at_10 value: 29.162 - type: map_at_100 value: 30.049999999999997 - type: map_at_1000 value: 30.144 - type: map_at_3 value: 27.204 - type: map_at_5 value: 28.351 - type: mrr_at_1 value: 25.153 - type: mrr_at_10 value: 31.814999999999998 - type: mrr_at_100 value: 32.573 - type: mrr_at_1000 value: 32.645 - type: mrr_at_3 value: 29.934 - type: mrr_at_5 value: 30.946 - type: ndcg_at_1 value: 25.153 - type: ndcg_at_10 value: 33.099000000000004 - type: ndcg_at_100 value: 37.768 - type: ndcg_at_1000 value: 40.331 - type: ndcg_at_3 value: 29.473 - type: ndcg_at_5 value: 31.206 - type: precision_at_1 value: 25.153 - type: precision_at_10 value: 5.183999999999999 - type: precision_at_100 value: 0.8170000000000001 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 12.831999999999999 - type: precision_at_5 value: 8.895999999999999 - type: recall_at_1 value: 22.219 - type: recall_at_10 value: 42.637 - type: recall_at_100 value: 64.704 - type: recall_at_1000 value: 83.963 - type: recall_at_3 value: 32.444 - type: recall_at_5 value: 36.802 - type: map_at_1 value: 17.427999999999997 - type: map_at_10 value: 24.029 - type: map_at_100 value: 25.119999999999997 - type: map_at_1000 value: 25.257 - type: map_at_3 value: 22.016 - type: map_at_5 value: 23.143 - type: mrr_at_1 value: 21.129 - type: mrr_at_10 value: 27.750000000000004 - type: mrr_at_100 value: 28.666999999999998 - type: mrr_at_1000 value: 28.754999999999995 - type: mrr_at_3 value: 25.849 - type: mrr_at_5 value: 26.939999999999998 - type: ndcg_at_1 value: 21.129 - type: ndcg_at_10 value: 28.203 - type: ndcg_at_100 value: 33.44 - type: ndcg_at_1000 value: 36.61 - type: ndcg_at_3 value: 24.648999999999997 - type: ndcg_at_5 value: 26.316 - type: precision_at_1 value: 21.129 - type: precision_at_10 value: 5.055 - type: precision_at_100 value: 0.909 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 11.666 - type: precision_at_5 value: 8.3 - type: recall_at_1 value: 17.427999999999997 - type: recall_at_10 value: 36.923 - type: recall_at_100 value: 60.606 - type: recall_at_1000 value: 83.19 - type: recall_at_3 value: 26.845000000000002 - type: recall_at_5 value: 31.247000000000003 - type: map_at_1 value: 26.457000000000004 - type: map_at_10 value: 35.228 - type: map_at_100 value: 36.475 - type: map_at_1000 value: 36.585 - type: map_at_3 value: 32.444 - type: map_at_5 value: 34.046 - type: mrr_at_1 value: 30.784 - type: mrr_at_10 value: 39.133 - type: mrr_at_100 value: 40.11 - type: mrr_at_1000 value: 40.169 - type: mrr_at_3 value: 36.692 - type: mrr_at_5 value: 38.17 - type: ndcg_at_1 value: 30.784 - type: ndcg_at_10 value: 40.358 - type: ndcg_at_100 value: 46.119 - type: ndcg_at_1000 value: 48.428 - type: ndcg_at_3 value: 35.504000000000005 - type: ndcg_at_5 value: 37.864 - type: precision_at_1 value: 30.784 - type: precision_at_10 value: 6.800000000000001 - type: precision_at_100 value: 1.083 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 15.920000000000002 - type: precision_at_5 value: 11.437 - type: recall_at_1 value: 26.457000000000004 - type: recall_at_10 value: 51.845 - type: recall_at_100 value: 77.046 - type: recall_at_1000 value: 92.892 - type: recall_at_3 value: 38.89 - type: recall_at_5 value: 44.688 - type: map_at_1 value: 29.378999999999998 - type: map_at_10 value: 37.373 - type: map_at_100 value: 39.107 - type: map_at_1000 value: 39.317 - type: map_at_3 value: 34.563 - type: map_at_5 value: 36.173 - type: mrr_at_1 value: 35.178 - type: mrr_at_10 value: 42.44 - type: mrr_at_100 value: 43.434 - type: mrr_at_1000 value: 43.482 - type: mrr_at_3 value: 39.987 - type: mrr_at_5 value: 41.370000000000005 - type: ndcg_at_1 value: 35.178 - type: ndcg_at_10 value: 42.82 - type: ndcg_at_100 value: 48.935 - type: ndcg_at_1000 value: 51.28 - type: ndcg_at_3 value: 38.562999999999995 - type: ndcg_at_5 value: 40.687 - type: precision_at_1 value: 35.178 - type: precision_at_10 value: 7.945 - type: precision_at_100 value: 1.524 - type: precision_at_1000 value: 0.242 - type: precision_at_3 value: 17.721 - type: precision_at_5 value: 12.925 - type: recall_at_1 value: 29.378999999999998 - type: recall_at_10 value: 52.141999999999996 - type: recall_at_100 value: 79.49000000000001 - type: recall_at_1000 value: 93.782 - type: recall_at_3 value: 39.579 - type: recall_at_5 value: 45.462 - type: map_at_1 value: 19.814999999999998 - type: map_at_10 value: 27.383999999999997 - type: map_at_100 value: 28.483999999999998 - type: map_at_1000 value: 28.585 - type: map_at_3 value: 24.807000000000002 - type: map_at_5 value: 26.485999999999997 - type: mrr_at_1 value: 21.996 - type: mrr_at_10 value: 29.584 - type: mrr_at_100 value: 30.611 - type: mrr_at_1000 value: 30.684 - type: mrr_at_3 value: 27.11 - type: mrr_at_5 value: 28.746 - type: ndcg_at_1 value: 21.996 - type: ndcg_at_10 value: 32.024 - type: ndcg_at_100 value: 37.528 - type: ndcg_at_1000 value: 40.150999999999996 - type: ndcg_at_3 value: 27.016000000000002 - type: ndcg_at_5 value: 29.927999999999997 - type: precision_at_1 value: 21.996 - type: precision_at_10 value: 5.102 - type: precision_at_100 value: 0.856 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 11.583 - type: precision_at_5 value: 8.577 - type: recall_at_1 value: 19.814999999999998 - type: recall_at_10 value: 44.239 - type: recall_at_100 value: 69.269 - type: recall_at_1000 value: 89.216 - type: recall_at_3 value: 31.102999999999998 - type: recall_at_5 value: 38.078 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 11.349 - type: map_at_10 value: 19.436 - type: map_at_100 value: 21.282999999999998 - type: map_at_1000 value: 21.479 - type: map_at_3 value: 15.841 - type: map_at_5 value: 17.558 - type: mrr_at_1 value: 25.863000000000003 - type: mrr_at_10 value: 37.218 - type: mrr_at_100 value: 38.198 - type: mrr_at_1000 value: 38.236 - type: mrr_at_3 value: 33.409 - type: mrr_at_5 value: 35.602000000000004 - type: ndcg_at_1 value: 25.863000000000003 - type: ndcg_at_10 value: 27.953 - type: ndcg_at_100 value: 35.327 - type: ndcg_at_1000 value: 38.708999999999996 - type: ndcg_at_3 value: 21.985 - type: ndcg_at_5 value: 23.957 - type: precision_at_1 value: 25.863000000000003 - type: precision_at_10 value: 8.99 - type: precision_at_100 value: 1.6889999999999998 - type: precision_at_1000 value: 0.232 - type: precision_at_3 value: 16.308 - type: precision_at_5 value: 12.912 - type: recall_at_1 value: 11.349 - type: recall_at_10 value: 34.581 - type: recall_at_100 value: 60.178 - type: recall_at_1000 value: 78.88199999999999 - type: recall_at_3 value: 20.041999999999998 - type: recall_at_5 value: 25.458 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 7.893 - type: map_at_10 value: 15.457 - type: map_at_100 value: 20.905 - type: map_at_1000 value: 22.116 - type: map_at_3 value: 11.593 - type: map_at_5 value: 13.134 - type: mrr_at_1 value: 57.49999999999999 - type: mrr_at_10 value: 65.467 - type: mrr_at_100 value: 66.022 - type: mrr_at_1000 value: 66.039 - type: mrr_at_3 value: 63.458000000000006 - type: mrr_at_5 value: 64.546 - type: ndcg_at_1 value: 45.875 - type: ndcg_at_10 value: 33.344 - type: ndcg_at_100 value: 36.849 - type: ndcg_at_1000 value: 44.03 - type: ndcg_at_3 value: 37.504 - type: ndcg_at_5 value: 34.892 - type: precision_at_1 value: 57.49999999999999 - type: precision_at_10 value: 25.95 - type: precision_at_100 value: 7.89 - type: precision_at_1000 value: 1.669 - type: precision_at_3 value: 40.333000000000006 - type: precision_at_5 value: 33.050000000000004 - type: recall_at_1 value: 7.893 - type: recall_at_10 value: 20.724999999999998 - type: recall_at_100 value: 42.516 - type: recall_at_1000 value: 65.822 - type: recall_at_3 value: 12.615000000000002 - type: recall_at_5 value: 15.482000000000001 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 51.760000000000005 - type: f1 value: 45.51690565701713 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 53.882 - type: map_at_10 value: 65.902 - type: map_at_100 value: 66.33 - type: map_at_1000 value: 66.348 - type: map_at_3 value: 63.75999999999999 - type: map_at_5 value: 65.181 - type: mrr_at_1 value: 58.041 - type: mrr_at_10 value: 70.133 - type: mrr_at_100 value: 70.463 - type: mrr_at_1000 value: 70.47 - type: mrr_at_3 value: 68.164 - type: mrr_at_5 value: 69.465 - type: ndcg_at_1 value: 58.041 - type: ndcg_at_10 value: 71.84700000000001 - type: ndcg_at_100 value: 73.699 - type: ndcg_at_1000 value: 74.06700000000001 - type: ndcg_at_3 value: 67.855 - type: ndcg_at_5 value: 70.203 - type: precision_at_1 value: 58.041 - type: precision_at_10 value: 9.427000000000001 - type: precision_at_100 value: 1.049 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 27.278000000000002 - type: precision_at_5 value: 17.693 - type: recall_at_1 value: 53.882 - type: recall_at_10 value: 85.99 - type: recall_at_100 value: 94.09100000000001 - type: recall_at_1000 value: 96.612 - type: recall_at_3 value: 75.25 - type: recall_at_5 value: 80.997 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 19.165 - type: map_at_10 value: 31.845000000000002 - type: map_at_100 value: 33.678999999999995 - type: map_at_1000 value: 33.878 - type: map_at_3 value: 27.881 - type: map_at_5 value: 30.049999999999997 - type: mrr_at_1 value: 38.272 - type: mrr_at_10 value: 47.04 - type: mrr_at_100 value: 47.923 - type: mrr_at_1000 value: 47.973 - type: mrr_at_3 value: 44.985 - type: mrr_at_5 value: 46.150000000000006 - type: ndcg_at_1 value: 38.272 - type: ndcg_at_10 value: 39.177 - type: ndcg_at_100 value: 45.995000000000005 - type: ndcg_at_1000 value: 49.312 - type: ndcg_at_3 value: 36.135 - type: ndcg_at_5 value: 36.936 - type: precision_at_1 value: 38.272 - type: precision_at_10 value: 10.926 - type: precision_at_100 value: 1.809 - type: precision_at_1000 value: 0.23700000000000002 - type: precision_at_3 value: 24.331 - type: precision_at_5 value: 17.747 - type: recall_at_1 value: 19.165 - type: recall_at_10 value: 45.103 - type: recall_at_100 value: 70.295 - type: recall_at_1000 value: 90.592 - type: recall_at_3 value: 32.832 - type: recall_at_5 value: 37.905 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 32.397 - type: map_at_10 value: 44.83 - type: map_at_100 value: 45.716 - type: map_at_1000 value: 45.797 - type: map_at_3 value: 41.955999999999996 - type: map_at_5 value: 43.736999999999995 - type: mrr_at_1 value: 64.794 - type: mrr_at_10 value: 71.866 - type: mrr_at_100 value: 72.22 - type: mrr_at_1000 value: 72.238 - type: mrr_at_3 value: 70.416 - type: mrr_at_5 value: 71.304 - type: ndcg_at_1 value: 64.794 - type: ndcg_at_10 value: 54.186 - type: ndcg_at_100 value: 57.623000000000005 - type: ndcg_at_1000 value: 59.302 - type: ndcg_at_3 value: 49.703 - type: ndcg_at_5 value: 52.154999999999994 - type: precision_at_1 value: 64.794 - type: precision_at_10 value: 11.219 - type: precision_at_100 value: 1.394 - type: precision_at_1000 value: 0.16199999999999998 - type: precision_at_3 value: 30.767 - type: precision_at_5 value: 20.397000000000002 - type: recall_at_1 value: 32.397 - type: recall_at_10 value: 56.096999999999994 - type: recall_at_100 value: 69.696 - type: recall_at_1000 value: 80.88499999999999 - type: recall_at_3 value: 46.150999999999996 - type: recall_at_5 value: 50.993 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 81.1744 - type: ap value: 75.44973697032414 - type: f1 value: 81.09901117955782 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 19.519000000000002 - type: map_at_10 value: 31.025000000000002 - type: map_at_100 value: 32.275999999999996 - type: map_at_1000 value: 32.329 - type: map_at_3 value: 27.132 - type: map_at_5 value: 29.415999999999997 - type: mrr_at_1 value: 20.115 - type: mrr_at_10 value: 31.569000000000003 - type: mrr_at_100 value: 32.768 - type: mrr_at_1000 value: 32.816 - type: mrr_at_3 value: 27.748 - type: mrr_at_5 value: 29.956 - type: ndcg_at_1 value: 20.115 - type: ndcg_at_10 value: 37.756 - type: ndcg_at_100 value: 43.858000000000004 - type: ndcg_at_1000 value: 45.199 - type: ndcg_at_3 value: 29.818 - type: ndcg_at_5 value: 33.875 - type: precision_at_1 value: 20.115 - type: precision_at_10 value: 6.122 - type: precision_at_100 value: 0.919 - type: precision_at_1000 value: 0.10300000000000001 - type: precision_at_3 value: 12.794 - type: precision_at_5 value: 9.731 - type: recall_at_1 value: 19.519000000000002 - type: recall_at_10 value: 58.62500000000001 - type: recall_at_100 value: 86.99 - type: recall_at_1000 value: 97.268 - type: recall_at_3 value: 37.002 - type: recall_at_5 value: 46.778 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.71865025079799 - type: f1 value: 93.38906173610519 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 70.2576379388965 - type: f1 value: 49.20405830249464 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.48486886348351 - type: f1 value: 64.92199176095157 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.59246805648958 - type: f1 value: 72.1222026389164 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 30.887642595096825 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.3764418784054 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.81544126336991 - type: mrr value: 32.82666576268031 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.185 - type: map_at_10 value: 11.158 - type: map_at_100 value: 14.041 - type: map_at_1000 value: 15.360999999999999 - type: map_at_3 value: 8.417 - type: map_at_5 value: 9.378 - type: mrr_at_1 value: 44.582 - type: mrr_at_10 value: 53.083999999999996 - type: mrr_at_100 value: 53.787 - type: mrr_at_1000 value: 53.824000000000005 - type: mrr_at_3 value: 51.187000000000005 - type: mrr_at_5 value: 52.379 - type: ndcg_at_1 value: 42.57 - type: ndcg_at_10 value: 31.593 - type: ndcg_at_100 value: 29.093999999999998 - type: ndcg_at_1000 value: 37.909 - type: ndcg_at_3 value: 37.083 - type: ndcg_at_5 value: 34.397 - type: precision_at_1 value: 43.963 - type: precision_at_10 value: 23.498 - type: precision_at_100 value: 7.6160000000000005 - type: precision_at_1000 value: 2.032 - type: precision_at_3 value: 34.572 - type: precision_at_5 value: 29.412 - type: recall_at_1 value: 5.185 - type: recall_at_10 value: 15.234 - type: recall_at_100 value: 29.49 - type: recall_at_1000 value: 62.273999999999994 - type: recall_at_3 value: 9.55 - type: recall_at_5 value: 11.103 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 23.803 - type: map_at_10 value: 38.183 - type: map_at_100 value: 39.421 - type: map_at_1000 value: 39.464 - type: map_at_3 value: 33.835 - type: map_at_5 value: 36.327 - type: mrr_at_1 value: 26.68 - type: mrr_at_10 value: 40.439 - type: mrr_at_100 value: 41.415 - type: mrr_at_1000 value: 41.443999999999996 - type: mrr_at_3 value: 36.612 - type: mrr_at_5 value: 38.877 - type: ndcg_at_1 value: 26.68 - type: ndcg_at_10 value: 45.882 - type: ndcg_at_100 value: 51.227999999999994 - type: ndcg_at_1000 value: 52.207 - type: ndcg_at_3 value: 37.511 - type: ndcg_at_5 value: 41.749 - type: precision_at_1 value: 26.68 - type: precision_at_10 value: 7.9750000000000005 - type: precision_at_100 value: 1.0959999999999999 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_3 value: 17.449 - type: precision_at_5 value: 12.897 - type: recall_at_1 value: 23.803 - type: recall_at_10 value: 67.152 - type: recall_at_100 value: 90.522 - type: recall_at_1000 value: 97.743 - type: recall_at_3 value: 45.338 - type: recall_at_5 value: 55.106 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 70.473 - type: map_at_10 value: 84.452 - type: map_at_100 value: 85.101 - type: map_at_1000 value: 85.115 - type: map_at_3 value: 81.435 - type: map_at_5 value: 83.338 - type: mrr_at_1 value: 81.19 - type: mrr_at_10 value: 87.324 - type: mrr_at_100 value: 87.434 - type: mrr_at_1000 value: 87.435 - type: mrr_at_3 value: 86.31 - type: mrr_at_5 value: 87.002 - type: ndcg_at_1 value: 81.21000000000001 - type: ndcg_at_10 value: 88.19 - type: ndcg_at_100 value: 89.44 - type: ndcg_at_1000 value: 89.526 - type: ndcg_at_3 value: 85.237 - type: ndcg_at_5 value: 86.892 - type: precision_at_1 value: 81.21000000000001 - type: precision_at_10 value: 13.417000000000002 - type: precision_at_100 value: 1.537 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.31 - type: precision_at_5 value: 24.59 - type: recall_at_1 value: 70.473 - type: recall_at_10 value: 95.367 - type: recall_at_100 value: 99.616 - type: recall_at_1000 value: 99.996 - type: recall_at_3 value: 86.936 - type: recall_at_5 value: 91.557 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 59.25776525253911 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 63.22135271663078 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.003 - type: map_at_10 value: 10.062999999999999 - type: map_at_100 value: 11.854000000000001 - type: map_at_1000 value: 12.145999999999999 - type: map_at_3 value: 7.242 - type: map_at_5 value: 8.652999999999999 - type: mrr_at_1 value: 19.7 - type: mrr_at_10 value: 29.721999999999998 - type: mrr_at_100 value: 30.867 - type: mrr_at_1000 value: 30.944 - type: mrr_at_3 value: 26.683 - type: mrr_at_5 value: 28.498 - type: ndcg_at_1 value: 19.7 - type: ndcg_at_10 value: 17.095 - type: ndcg_at_100 value: 24.375 - type: ndcg_at_1000 value: 29.831000000000003 - type: ndcg_at_3 value: 16.305 - type: ndcg_at_5 value: 14.291 - type: precision_at_1 value: 19.7 - type: precision_at_10 value: 8.799999999999999 - type: precision_at_100 value: 1.9349999999999998 - type: precision_at_1000 value: 0.32399999999999995 - type: precision_at_3 value: 15.2 - type: precision_at_5 value: 12.540000000000001 - type: recall_at_1 value: 4.003 - type: recall_at_10 value: 17.877000000000002 - type: recall_at_100 value: 39.217 - type: recall_at_1000 value: 65.862 - type: recall_at_3 value: 9.242 - type: recall_at_5 value: 12.715000000000002 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_spearman value: 80.25888668589654 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_spearman value: 77.02037527837669 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_spearman value: 86.58432681008449 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_spearman value: 81.31697756099051 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_spearman value: 88.18867599667057 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_spearman value: 84.87853941747623 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 89.46479925383916 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_spearman value: 66.45272113649146 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_spearman value: 86.43357313527851 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 78.82761687254882 - type: mrr value: 93.46223674655047 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 44.583 - type: map_at_10 value: 52.978 - type: map_at_100 value: 53.803 - type: map_at_1000 value: 53.839999999999996 - type: map_at_3 value: 50.03300000000001 - type: map_at_5 value: 51.939 - type: mrr_at_1 value: 47.0 - type: mrr_at_10 value: 54.730000000000004 - type: mrr_at_100 value: 55.31399999999999 - type: mrr_at_1000 value: 55.346 - type: mrr_at_3 value: 52.0 - type: mrr_at_5 value: 53.783 - type: ndcg_at_1 value: 47.0 - type: ndcg_at_10 value: 57.82899999999999 - type: ndcg_at_100 value: 61.49400000000001 - type: ndcg_at_1000 value: 62.676 - type: ndcg_at_3 value: 52.373000000000005 - type: ndcg_at_5 value: 55.481 - type: precision_at_1 value: 47.0 - type: precision_at_10 value: 7.867 - type: precision_at_100 value: 0.997 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 20.556 - type: precision_at_5 value: 14.066999999999998 - type: recall_at_1 value: 44.583 - type: recall_at_10 value: 71.172 - type: recall_at_100 value: 87.7 - type: recall_at_1000 value: 97.333 - type: recall_at_3 value: 56.511 - type: recall_at_5 value: 64.206 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.66237623762376 - type: cos_sim_ap value: 90.35465126226322 - type: cos_sim_f1 value: 82.44575936883628 - type: cos_sim_precision value: 81.32295719844358 - type: cos_sim_recall value: 83.6 - type: dot_accuracy value: 99.66237623762376 - type: dot_ap value: 90.35464287920453 - type: dot_f1 value: 82.44575936883628 - type: dot_precision value: 81.32295719844358 - type: dot_recall value: 83.6 - type: euclidean_accuracy value: 99.66237623762376 - type: euclidean_ap value: 90.3546512622632 - type: euclidean_f1 value: 82.44575936883628 - type: euclidean_precision value: 81.32295719844358 - type: euclidean_recall value: 83.6 - type: manhattan_accuracy value: 99.65940594059406 - type: manhattan_ap value: 90.29220174849843 - type: manhattan_f1 value: 82.4987605354487 - type: manhattan_precision value: 81.80924287118977 - type: manhattan_recall value: 83.2 - type: max_accuracy value: 99.66237623762376 - type: max_ap value: 90.35465126226322 - type: max_f1 value: 82.4987605354487 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 65.0394225901397 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 35.27954189859326 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 50.99055979974896 - type: mrr value: 51.82745257193787 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.21655465344237 - type: cos_sim_spearman value: 29.853205339630172 - type: dot_pearson value: 30.216540628083564 - type: dot_spearman value: 29.868978894753027 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.2 - type: map_at_10 value: 1.398 - type: map_at_100 value: 7.406 - type: map_at_1000 value: 18.401 - type: map_at_3 value: 0.479 - type: map_at_5 value: 0.772 - type: mrr_at_1 value: 70.0 - type: mrr_at_10 value: 79.25999999999999 - type: mrr_at_100 value: 79.25999999999999 - type: mrr_at_1000 value: 79.25999999999999 - type: mrr_at_3 value: 77.333 - type: mrr_at_5 value: 78.133 - type: ndcg_at_1 value: 63.0 - type: ndcg_at_10 value: 58.548 - type: ndcg_at_100 value: 45.216 - type: ndcg_at_1000 value: 41.149 - type: ndcg_at_3 value: 60.641999999999996 - type: ndcg_at_5 value: 61.135 - type: precision_at_1 value: 70.0 - type: precision_at_10 value: 64.0 - type: precision_at_100 value: 46.92 - type: precision_at_1000 value: 18.642 - type: precision_at_3 value: 64.667 - type: precision_at_5 value: 66.4 - type: recall_at_1 value: 0.2 - type: recall_at_10 value: 1.6729999999999998 - type: recall_at_100 value: 10.856 - type: recall_at_1000 value: 38.964999999999996 - type: recall_at_3 value: 0.504 - type: recall_at_5 value: 0.852 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 1.6629999999999998 - type: map_at_10 value: 8.601 - type: map_at_100 value: 14.354 - type: map_at_1000 value: 15.927 - type: map_at_3 value: 4.1930000000000005 - type: map_at_5 value: 5.655 - type: mrr_at_1 value: 18.367 - type: mrr_at_10 value: 34.466 - type: mrr_at_100 value: 35.235 - type: mrr_at_1000 value: 35.27 - type: mrr_at_3 value: 28.571 - type: mrr_at_5 value: 31.531 - type: ndcg_at_1 value: 14.285999999999998 - type: ndcg_at_10 value: 20.374 - type: ndcg_at_100 value: 33.532000000000004 - type: ndcg_at_1000 value: 45.561 - type: ndcg_at_3 value: 18.442 - type: ndcg_at_5 value: 18.076 - type: precision_at_1 value: 18.367 - type: precision_at_10 value: 20.204 - type: precision_at_100 value: 7.489999999999999 - type: precision_at_1000 value: 1.5630000000000002 - type: precision_at_3 value: 21.769 - type: precision_at_5 value: 20.408 - type: recall_at_1 value: 1.6629999999999998 - type: recall_at_10 value: 15.549 - type: recall_at_100 value: 47.497 - type: recall_at_1000 value: 84.524 - type: recall_at_3 value: 5.289 - type: recall_at_5 value: 8.035 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.8194 - type: ap value: 14.447702451658554 - type: f1 value: 55.13659412856185 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 63.310696095076416 - type: f1 value: 63.360434851097814 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.30677907335145 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.12386004649221 - type: cos_sim_ap value: 73.99096426215495 - type: cos_sim_f1 value: 68.18416968442834 - type: cos_sim_precision value: 66.86960933536275 - type: cos_sim_recall value: 69.55145118733509 - type: dot_accuracy value: 86.12386004649221 - type: dot_ap value: 73.99096813038672 - type: dot_f1 value: 68.18416968442834 - type: dot_precision value: 66.86960933536275 - type: dot_recall value: 69.55145118733509 - type: euclidean_accuracy value: 86.12386004649221 - type: euclidean_ap value: 73.99095984980165 - type: euclidean_f1 value: 68.18416968442834 - type: euclidean_precision value: 66.86960933536275 - type: euclidean_recall value: 69.55145118733509 - type: manhattan_accuracy value: 86.09405734040651 - type: manhattan_ap value: 73.96825745608601 - type: manhattan_f1 value: 68.13888179729383 - type: manhattan_precision value: 65.99901088031652 - type: manhattan_recall value: 70.42216358839049 - type: max_accuracy value: 86.12386004649221 - type: max_ap value: 73.99096813038672 - type: max_f1 value: 68.18416968442834 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.99367407924865 - type: cos_sim_ap value: 86.19720829843081 - type: cos_sim_f1 value: 78.39889075384951 - type: cos_sim_precision value: 74.5110278818144 - type: cos_sim_recall value: 82.71481367416075 - type: dot_accuracy value: 88.99367407924865 - type: dot_ap value: 86.19718471454047 - type: dot_f1 value: 78.39889075384951 - type: dot_precision value: 74.5110278818144 - type: dot_recall value: 82.71481367416075 - type: euclidean_accuracy value: 88.99367407924865 - type: euclidean_ap value: 86.1972021422436 - type: euclidean_f1 value: 78.39889075384951 - type: euclidean_precision value: 74.5110278818144 - type: euclidean_recall value: 82.71481367416075 - type: manhattan_accuracy value: 88.95680521597392 - type: manhattan_ap value: 86.16659921351506 - type: manhattan_f1 value: 78.39125971550081 - type: manhattan_precision value: 74.82502799552073 - type: manhattan_recall value: 82.31444410224823 - type: max_accuracy value: 88.99367407924865 - type: max_ap value: 86.19720829843081 - type: max_f1 value: 78.39889075384951 --- # hkunlp/instructor-base We introduce **Instructor**👨‍🏫, an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g., classification, retrieval, clustering, text evaluation, etc.) and domains (e.g., science, finance, etc.) ***by simply providing the task instruction, without any finetuning***. Instructor👨‍ achieves sota on 70 diverse embedding tasks! The model is easy to use with **our customized** `sentence-transformer` library. For more details, check out [our paper](https://arxiv.org/abs/2212.09741) and [project page](https://instructor-embedding.github.io/)! **************************** **Updates** **************************** * 01/21: We released a new [checkpoint](https://huggingface.co/hkunlp/instructor-base) trained with hard negatives, which gives better performance. * 12/21: We released our [paper](https://arxiv.org/abs/2212.09741), [code](https://github.com/HKUNLP/instructor-embedding), [checkpoint](https://huggingface.co/hkunlp/instructor-base) and [project page](https://instructor-embedding.github.io/)! Check them out! ## Quick start <hr /> ## Installation ```bash pip install InstructorEmbedding ``` ## Compute your customized embeddings Then you can use the model like this to calculate domain-specific and task-aware embeddings: ```python from InstructorEmbedding import INSTRUCTOR model = INSTRUCTOR('hkunlp/instructor-base') sentence = "3D ActionSLAM: wearable person tracking in multi-floor environments" instruction = "Represent the Science title:" embeddings = model.encode([[instruction,sentence]]) print(embeddings) ``` ## Use cases <hr /> ## Calculate embeddings for your customized texts If you want to calculate customized embeddings for specific sentences, you may follow the unified template to write instructions: &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Represent the `domain` `text_type` for `task_objective`: * `domain` is optional, and it specifies the domain of the text, e.g., science, finance, medicine, etc. * `text_type` is required, and it specifies the encoding unit, e.g., sentence, document, paragraph, etc. * `task_objective` is optional, and it specifies the objective of embedding, e.g., retrieve a document, classify the sentence, etc. ## Calculate Sentence similarities You can further use the model to compute similarities between two groups of sentences, with **customized embeddings**. ```python from sklearn.metrics.pairwise import cosine_similarity sentences_a = [['Represent the Science sentence: ','Parton energy loss in QCD matter'], ['Represent the Financial statement: ','The Federal Reserve on Wednesday raised its benchmark interest rate.']] sentences_b = [['Represent the Science sentence: ','The Chiral Phase Transition in Dissipative Dynamics'], ['Represent the Financial statement: ','The funds rose less than 0.5 per cent on Friday']] embeddings_a = model.encode(sentences_a) embeddings_b = model.encode(sentences_b) similarities = cosine_similarity(embeddings_a,embeddings_b) print(similarities) ``` ## Information Retrieval You can also use **customized embeddings** for information retrieval. ```python import numpy as np from sklearn.metrics.pairwise import cosine_similarity query = [['Represent the Wikipedia question for retrieving supporting documents: ','where is the food stored in a yam plant']] corpus = [['Represent the Wikipedia document for retrieval: ','Capitalism has been dominant in the Western world since the end of feudalism, but most feel[who?] that the term "mixed economies" more precisely describes most contemporary economies, due to their containing both private-owned and state-owned enterprises. In capitalism, prices determine the demand-supply scale. For example, higher demand for certain goods and services lead to higher prices and lower demand for certain goods lead to lower prices.'], ['Represent the Wikipedia document for retrieval: ',"The disparate impact theory is especially controversial under the Fair Housing Act because the Act regulates many activities relating to housing, insurance, and mortgage loans—and some scholars have argued that the theory's use under the Fair Housing Act, combined with extensions of the Community Reinvestment Act, contributed to rise of sub-prime lending and the crash of the U.S. housing market and ensuing global economic recession"], ['Represent the Wikipedia document for retrieval: ','Disparate impact in United States labor law refers to practices in employment, housing, and other areas that adversely affect one group of people of a protected characteristic more than another, even though rules applied by employers or landlords are formally neutral. Although the protected classes vary by statute, most federal civil rights laws protect based on race, color, religion, national origin, and sex as protected traits, and some laws include disability status and other traits as well.']] query_embeddings = model.encode(query) corpus_embeddings = model.encode(corpus) similarities = cosine_similarity(query_embeddings,corpus_embeddings) retrieved_doc_id = np.argmax(similarities) print(retrieved_doc_id) ``` ## Clustering Use **customized embeddings** for clustering texts in groups. ```python import sklearn.cluster sentences = [['Represent the Medicine sentence for clustering: ','Dynamical Scalar Degree of Freedom in Horava-Lifshitz Gravity'], ['Represent the Medicine sentence for clustering: ','Comparison of Atmospheric Neutrino Flux Calculations at Low Energies'], ['Represent the Medicine sentence for clustering: ','Fermion Bags in the Massive Gross-Neveu Model'], ['Represent the Medicine sentence for clustering: ',"QCD corrections to Associated t-tbar-H production at the Tevatron"], ['Represent the Medicine sentence for clustering: ','A New Analysis of the R Measurements: Resonance Parameters of the Higher, Vector States of Charmonium']] embeddings = model.encode(sentences) clustering_model = sklearn.cluster.MiniBatchKMeans(n_clusters=2) clustering_model.fit(embeddings) cluster_assignment = clustering_model.labels_ print(cluster_assignment) ```
[ "BIOSSES", "SCIFACT" ]
kvablack/ddpo-alignment
kvablack
text-to-image
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "en", "arxiv:2305.13301", "license:creativeml-openrail-m", "autotrain_compatible", "endpoints_compatible", "diffusers:StableDiffusionPipeline", "region:us" ]
2023-05-26T18:08:43Z
2023-06-07T04:39:38+00:00
19
7
--- language: - en library_name: diffusers license: creativeml-openrail-m pipeline_tag: text-to-image tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image inference: parameters: num_inference_steps: 50 guidance_scale: 5.0 eta: 1.0 widget: - text: a horse playing chess example_title: horse + chess - text: a lion washing dishes example_title: lion + dishes - text: a goat riding a bike example_title: goat + bike --- # ddpo-alignment This model was finetuned from [Stable Diffusion v1-4](https:/CompVis/stable-diffusion-v1-4) using [DDPO](https://arxiv.org/abs/2305.13301) and a reward function that uses [LLaVA](https://llava-vl.github.io/) to measure prompt-image alignment. See [the project website](https://rl-diffusion.github.io/) for more details. The model was finetuned for 200 iterations with a batch size of 256 samples per iteration. During finetuning, we used prompts of the form: "_a(n) \<animal\> \<activity\>_". We selected the animal and activity from the following lists, so try those for the best results. However, we also observed limited generalization to other prompts. Activities: - washing dishes - playing chess - riding a bike Animals: - cat - dog - horse - monkey - rabbit - zebra - spider - bird - sheep - deer - cow - goat - lion - tiger - bear - raccoon - fox - wolf - lizard - beetle - ant - butterfly - fish - shark - whale - dolphin - squirrel - mouse - rat - snake - turtle - frog - chicken - duck - goose - bee - pig - turkey - fly - llama - camel - bat - gorilla - hedgehog - kangaroo
[ "BEAR" ]
guibvieira/topic_modelling_pepe
guibvieira
text-classification
[ "bertopic", "text-classification", "region:us" ]
2023-06-13T12:41:57Z
2023-06-13T12:42:04+00:00
19
0
--- library_name: bertopic pipeline_tag: text-classification tags: - bertopic --- # topic_modelling_pepe This is a [BERTopic](https://github.com/MaartenGr/BERTopic) model. BERTopic is a flexible and modular topic modeling framework that allows for the generation of easily interpretable topics from large datasets. ## Usage To use this model, please install BERTopic: ``` pip install -U bertopic ``` You can use the model as follows: ```python from bertopic import BERTopic topic_model = BERTopic.load("guibvieira/topic_modelling_pepe") topic_model.get_topic_info() ``` ## Topic overview * Number of topics: 229 * Number of training documents: 17770 <details> <summary>Click here for an overview of all topics.</summary> | Topic ID | Topic Keywords | Topic Frequency | Label | |----------|----------------|-----------------|-------| | -1 | activity - mining - trading - pepecoin - investors | 15 | -1_activity_mining_trading_pepecoin | | 0 | bro - pepes - guys - good - man | 4181 | 0_bro_pepes_guys_good | | 1 | whales - - - - | 1289 | 1_whales___ | | 2 | pepecoin - pepecoins - need - lot - stuff | 651 | 2_pepecoin_pepecoins_need_lot | | 3 | deal - exchange - group - website - telegram | 631 | 3_deal_exchange_group_website | | 4 | fake - spaces - people - fuck - shit | 525 | 4_fake_spaces_people_fuck | | 5 | coins - coin - shit - mate - currency | 232 | 5_coins_coin_shit_mate | | 6 | info - audit - safu - team - launch | 229 | 6_info_audit_safu_team | | 7 | time - socials - baby - culture - fun | 196 | 7_time_socials_baby_culture | | 8 | pepecoins - tweet - twitter - pepecoin - tweets | 186 | 8_pepecoins_tweet_twitter_pepecoin | | 9 | ideas - spot - level - idea - support | 181 | 9_ideas_spot_level_idea | | 10 | crypto - world - attention - people - thing | 163 | 10_crypto_world_attention_people | | 11 | project - projects - need - pepechain - frens | 161 | 11_project_projects_need_pepechain | | 12 | buy - sell - selling - weeks - pepepal | 161 | 12_buy_sell_selling_weeks | | 13 | memes - season - meme - term - face | 159 | 13_memes_season_meme_term | | 14 | meme - coins - coin - season - pepefriends | 157 | 14_meme_coins_coin_season | | 15 | token - tokens - information - eye - sense | 149 | 15_token_tokens_information_eye | | 16 | pump - pumping - dump - wait - shit | 149 | 16_pump_pumping_dump_wait | | 17 | binance - list - listing - shit - assets | 142 | 17_binance_list_listing_shit | | 18 | kucoin - mexc - ama - exchanges - tier | 133 | 18_kucoin_mexc_ama_exchanges | | 19 | tweet - tweets - love - twitter - message | 126 | 19_tweet_tweets_love_twitter | | 20 | bitmart - pair - pepebnb - trading - assets | 123 | 20_bitmart_pair_pepebnb_trading | | 21 | bitcoin - network - protocol - example - year | 118 | 21_bitcoin_network_protocol_example | | 22 | profit - profits - money - gains - hope | 117 | 22_profit_profits_money_gains | | 23 | link - pulse - gem - chain - | 106 | 23_link_pulse_gem_chain | | 24 | submit - address - enter - airdrop - tasks | 97 | 24_submit_address_enter_airdrop | | 25 | bonus - minimum - welcome - hours - mining | 95 | 25_bonus_minimum_welcome_hours | | 26 | moon - cointiger - future - project - campaign | 94 | 26_moon_cointiger_future_project | | 27 | btc - week - market - news - crypto | 89 | 27_btc_week_market_news | | 28 | wallet - thanks - good - hope - working | 88 | 28_wallet_thanks_good_hope | | 29 | link - utc - version - club - billionaire | 88 | 29_link_utc_version_club | | 30 | memetic - pepecoin - meme - pepecoins - need | 88 | 30_memetic_pepecoin_meme_pepecoins | | 31 | musk - rocket - mind - memecoins - months | 87 | 31_musk_rocket_mind_memecoins | | 32 | airdrop - friends - pepefriends - tag - ones | 81 | 32_airdrop_friends_pepefriends_tag | | 33 | place - bot - airdrop - date - tasks | 80 | 33_place_bot_airdrop_date | | 34 | details - click - bot - pancakeswap - contract | 78 | 34_details_click_bot_pancakeswap | | 35 | shiba - shib - inu - hope - mpepe | 77 | 35_shiba_shib_inu_hope | | 36 | yesterday - opportunity - trend - cmc - today | 75 | 36_yesterday_opportunity_trend_cmc | | 37 | staking - stake - works - luck - magic | 74 | 37_staking_stake_works_luck | | 38 | nfts - ethereum - game - things - thing | 73 | 38_nfts_ethereum_game_things | | 39 | profit - entry - strategy - help - list | 72 | 39_profit_entry_strategy_help | | 40 | days - cap - whales - market - play | 71 | 40_days_cap_whales_market | | 41 | prize - winners - giveaway - pool - page | 66 | 41_prize_winners_giveaway_pool | | 42 | memecoin - memecoins - utilities - derivative - shitcoin | 64 | 42_memecoin_memecoins_utilities_derivative | | 43 | loss - whale - days - memecoin - token | 63 | 43_loss_whale_days_memecoin | | 44 | price - entry - value - thing - buyers | 60 | 44_price_entry_value_thing | | 45 | usdt - event - guess - reward - help | 60 | 45_usdt_event_guess_reward | | 46 | floki - guess - event - success - shib | 59 | 46_floki_guess_event_success | | 47 | telegram - launch - presale - times - pepega | 59 | 47_telegram_launch_presale_times | | 48 | bag - bags - hand - pepecoins - term | 58 | 48_bag_bags_hand_pepecoins | | 49 | telegram - fairsale - times - launch - presale | 57 | 49_telegram_fairsale_times_launch | | 50 | moon - pepefriends - army - lot - rug | 57 | 50_moon_pepefriends_army_lot | | 51 | day - rewards - holders - ecosystem - swap | 55 | 51_day_rewards_holders_ecosystem | | 52 | investment - investors - term - money - billionaire | 52 | 52_investment_investors_term_money | | 53 | scam - thing - way - lol - copy | 52 | 53_scam_thing_way_lol | | 54 | plan - action - chat - work - mpepe | 52 | 54_plan_action_chat_work | | 55 | mins - event - presale - link - leader | 51 | 55_mins_event_presale_link | | 56 | elon - tweet - musk - increase - tweets | 50 | 56_elon_tweet_musk_increase | | 57 | airdrop - referral - date - distribution - fee | 50 | 57_airdrop_referral_date_distribution | | 58 | friends - pepefriends - benefits - days - sale | 50 | 58_friends_pepefriends_benefits_days | | 59 | memecoins - cap - market - time - marketcap | 49 | 59_memecoins_cap_market_time | | 60 | frogs - frog - liquidity - rewards - marketing | 49 | 60_frogs_frog_liquidity_rewards | | 61 | tweets - puppy - shill - links - inu | 49 | 61_tweets_puppy_shill_links | | 62 | link - wallet - - - | 49 | 62_link_wallet__ | | 63 | dollars - days - cap - market - space | 48 | 63_dollars_days_cap_market | | 64 | pepeai - version - ways - jump - words | 47 | 64_pepeai_version_ways_jump | | 65 | link - - - - | 47 | 65_link___ | | 66 | problem - plans - millions - term - shib | 47 | 66_problem_plans_millions_term | | 67 | link - utc - presale - - | 47 | 67_link_utc_presale_ | | 68 | taxes - chart - bnb - pepefather - chain | 47 | 68_taxes_chart_bnb_pepefather | | 69 | logo - pepepal - pepecoin - shit - hand | 46 | 69_logo_pepepal_pepecoin_shit | | 70 | moment - pump - tag - green - guess | 46 | 70_moment_pump_tag_green | | 71 | spaces - question - morning - ride - pepes | 46 | 71_spaces_question_morning_ride | | 72 | mins - contact - event - bag - launch | 45 | 72_mins_contact_event_bag | | 73 | investment - link - version - - | 45 | 73_investment_link_version_ | | 74 | marketcap - cap - market - mil - dude | 44 | 74_marketcap_cap_market_mil | | 75 | place - memecoin - market - bull - cap | 44 | 75_place_memecoin_market_bull | | 76 | channels - wallets - ecosystem - experience - cryptocurrency | 42 | 76_channels_wallets_ecosystem_experience | | 77 | hate - groups - exchange - - | 42 | 77_hate_groups_exchange_ | | 78 | events - holder - cap - chart - trade | 42 | 78_events_holder_cap_chart | | 79 | weeks - profits - price - whales - gems | 42 | 79_weeks_profits_price_whales | | 80 | transaction - utilities - influencers - event - holders | 42 | 80_transaction_utilities_influencers_event | | 81 | picture - topic - hate - groups - symbol | 41 | 81_picture_topic_hate_groups | | 82 | frog - frogs - thing - memes - meme | 41 | 82_frog_frogs_thing_memes | | 83 | community - - - - | 41 | 83_community___ | | 84 | symbol - exchange - - - | 41 | 84_symbol_exchange__ | | 85 | hour - doubt - min - fuck - risk | 41 | 85_hour_doubt_min_fuck | | 86 | chat - group - pepeai - vibes - sort | 41 | 86_chat_group_pepeai_vibes | | 87 | chat - times - launch - liquidity - launching | 41 | 87_chat_times_launch_liquidity | | 88 | presale - bnb - link - leader - play | 41 | 88_presale_bnb_link_leader | | 89 | dump - dip - buying - shit - guys | 41 | 89_dump_dip_buying_shit | | 90 | whale - whales - friend - lol - buy | 40 | 90_whale_whales_friend_lol | | 91 | meme - - - - | 40 | 91_meme___ | | 92 | red - drop - - - | 40 | 92_red_drop__ | | 93 | play - link - - - | 40 | 93_play_link__ | | 94 | taxes - bnb - play - club - tax | 40 | 94_taxes_bnb_play_club | | 95 | whales - tokens - billionaire - love - whale | 40 | 95_whales_tokens_billionaire_love | | 96 | tomorrow - launch - media - pepeai - hours | 39 | 96_tomorrow_launch_media_pepeai | | 97 | link - utc - inu - - | 39 | 97_link_utc_inu_ | | 98 | transaction - fees - tier - trade - distribution | 39 | 98_transaction_fees_tier_trade | | 99 | referral - reward - mpepe - airdrop - distribution | 38 | 99_referral_reward_mpepe_airdrop | | 100 | action - drop - price - profits - gems | 38 | 100_action_drop_price_profits | | 101 | week - position - members - chat - stealth | 38 | 101_week_position_members_chat | | 102 | market - bull - run - today - red | 38 | 102_market_bull_run_today | | 103 | hype - year - version - things - season | 38 | 103_hype_year_version_things | | 104 | pulse - mcap - bridge - dev - liquidity | 37 | 104_pulse_mcap_bridge_dev | | 105 | holder - events - cap - chart - trade | 37 | 105_holder_events_cap_chart | | 106 | deal - exchange - listing - group - utc | 37 | 106_deal_exchange_listing_group | | 107 | game - bet - baby - tier - stake | 37 | 107_game_bet_baby_tier | | 108 | raid - tweets - guy - gem - trust | 37 | 108_raid_tweets_guy_gem | | 109 | devs - projects - project - metaverse - focus | 37 | 109_devs_projects_project_metaverse | | 110 | track - cmc - marketing - gem - team | 37 | 110_track_cmc_marketing_gem | | 111 | link - - - - | 36 | 111_link___ | | 112 | claim - connect - fees - wallet - user | 36 | 112_claim_connect_fees_wallet | | 113 | mcap - dude - cex - dollars - holders | 36 | 113_mcap_dude_cex_dollars | | 114 | fees - gas - fee - exchanges - sir | 36 | 114_fees_gas_fee_exchanges | | 115 | coinbase - hate - symbol - news - army | 36 | 115_coinbase_hate_symbol_news | | 116 | trade - alot - trading - non - bots | 35 | 116_trade_alot_trading_non | | 117 | gem - lock - zone - growth - liquidity | 35 | 117_gem_lock_zone_growth | | 118 | tax - link - chain - minutes - liquidity | 35 | 118_tax_link_chain_minutes | | 119 | pepeai - network - power - work - utility | 34 | 119_pepeai_network_power_work | | 120 | nft - campaign - ape - today - week | 34 | 120_nft_campaign_ape_today | | 121 | contact - listings - zone - message - list | 34 | 121_contact_listings_zone_message | | 122 | aipepe - address - users - event - trade | 34 | 122_aipepe_address_users_event | | 123 | mins - click - event - presale - link | 34 | 123_mins_click_event_presale | | 124 | updates - share - friends - coin - mania | 34 | 124_updates_share_friends_coin | | 125 | video - youtube - comment - channels - way | 33 | 125_video_youtube_comment_channels | | 126 | sign - coins - campaign - trade - week | 33 | 126_sign_coins_campaign_trade | | 127 | investment - link - games - king - inu | 32 | 127_investment_link_games_king | | 128 | trust - wallet - wallets - currency - lot | 32 | 128_trust_wallet_wallets_currency | | 129 | cointiger - details - break - shiba - experience | 32 | 129_cointiger_details_break_shiba | | 130 | game - case - links - times - fair | 32 | 130_game_case_links_times | | 131 | rekt - hype - tokens - meme - vibes | 32 | 131_rekt_hype_tokens_meme | | 132 | low - community - fee - space - gas | 31 | 132_low_community_fee_space | | 133 | blockchain - build - utilities - token - meme | 31 | 133_blockchain_build_utilities_token | | 134 | value - chart - market - data - ways | 31 | 134_value_chart_market_data | | 135 | referral - refer - friends - link - rewards | 31 | 135_referral_refer_friends_link | | 136 | games - features - investors - rewards - game | 30 | 136_games_features_investors_rewards | | 137 | ethereum - volume - liquidity - price - network | 30 | 137_ethereum_volume_liquidity_price | | 138 | position - tech - events - cap - chart | 30 | 138_position_tech_events_cap | | 139 | gas - holders - volume - tax - chart | 30 | 139_gas_holders_volume_tax | | 140 | bridge - algorand - launch - chain - mainnet | 30 | 140_bridge_algorand_launch_chain | | 141 | trend - trust - pepebnb - issue - mcap | 29 | 141_trend_trust_pepebnb_issue | | 142 | refer - claim - referral - airdrop - distribution | 29 | 142_refer_claim_referral_airdrop | | 143 | holders - binance - chain - events - buy | 29 | 143_holders_binance_chain_events | | 144 | win - cointiger - form - tag - chance | 29 | 144_win_cointiger_form_tag | | 145 | inu - projects - times - eyes - trump | 28 | 145_inu_projects_times_eyes | | 146 | taxes - chart - bnb - pepeverse - utc | 28 | 146_taxes_chart_bnb_pepeverse | | 147 | position - mcap - buy - chart - price | 28 | 147_position_mcap_buy_chart | | 148 | period - benefits - sign - win - utc | 28 | 148_period_benefits_sign_win | | 149 | derivative - point - kind - holders - page | 28 | 149_derivative_point_kind_holders | | 150 | channel - list - airdrop - group - distribution | 27 | 150_channel_list_airdrop_group | | 151 | time - socials - culture - fun - wallet | 27 | 151_time_socials_culture_fun | | 152 | build - lock - contract - community - liquidity | 26 | 152_build_lock_contract_community | | 153 | funds - dex - position - card - price | 26 | 153_funds_dex_position_card | | 154 | giveaway - audit - cex - revolution - goal | 26 | 154_giveaway_audit_cex_revolution | | 155 | spaces - pepecoins - space - know - pepecoin | 26 | 155_spaces_pepecoins_space_know | | 156 | pepedex - frens - focus - weekend - pepemon | 26 | 156_pepedex_frens_focus_weekend | | 157 | moon - partnership - revolution - tokenomics - chat | 26 | 157_moon_partnership_revolution_tokenomics | | 158 | prize - bnb - events - buy - card | 26 | 158_prize_bnb_events_buy | | 159 | search - success - groups - rise - track | 25 | 159_search_success_groups_rise | | 160 | lock - ownership - month - mcap - moon | 25 | 160_lock_ownership_month_mcap | | 161 | mexc - launchpad - app - utc - period | 25 | 161_mexc_launchpad_app_utc | | 162 | gem - gems - project - group - developers | 25 | 162_gem_gems_project_group | | 163 | link - - - - | 25 | 163_link___ | | 164 | pump - minutes - message - announcement - members | 24 | 164_pump_minutes_message_announcement | | 165 | link - utc - - - | 24 | 165_link_utc__ | | 166 | billionaire - mins - event - leader - end | 24 | 166_billionaire_mins_event_leader | | 167 | link - utc - - - | 24 | 167_link_utc__ | | 168 | drop - tasks - place - bonus - wallet | 24 | 168_drop_tasks_place_bonus | | 169 | price - volume - focus - gems - assets | 24 | 169_price_volume_focus_gems | | 170 | pinksale - link - - - | 24 | 170_pinksale_link__ | | 171 | wallet - copy - code - words - app | 24 | 171_wallet_copy_code_words | | 172 | days - fomo - coins - coin - tweet | 24 | 172_days_fomo_coins_coin | | 173 | events - game - rewards - friends - tokenomics | 24 | 173_events_game_rewards_friends | | 174 | pancakeswap - supply - swap - tomorrow - updates | 24 | 174_pancakeswap_supply_swap_tomorrow | | 175 | ama - dextools - launch - memes - marketing | 24 | 175_ama_dextools_launch_memes | | 176 | crypto - channel - use - video - code | 23 | 176_crypto_channel_use_video | | 177 | pepefriends - presale - pepebnb - minutes - hour | 23 | 177_pepefriends_presale_pepebnb_minutes | | 178 | reward - winners - user - airdrop - refer | 23 | 178_reward_winners_user_airdrop | | 179 | wojak - turbo - sense - play - person | 23 | 179_wojak_turbo_sense_play | | 180 | change - pepecoin - kind - difference - ecosystem | 23 | 180_change_pepecoin_kind_difference | | 181 | taxes - chart - leader - club - bnb | 23 | 181_taxes_chart_leader_club | | 182 | event - rewards - days - end - entry | 23 | 182_event_rewards_days_end | | 183 | taxes - investment - link - presale - | 22 | 183_taxes_investment_link_presale | | 184 | scam - coins - years - wallets - devs | 22 | 184_scam_coins_years_wallets | | 185 | pinksale - ends - link - fairlaunch - details | 22 | 185_pinksale_ends_link_fairlaunch | | 186 | pepebnb - chain - ama - today - minutes | 22 | 186_pepebnb_chain_ama_today | | 187 | change - update - cat - presale - hours | 21 | 187_change_update_cat_presale | | 188 | dextools - dex - holder - pepepal - gem | 21 | 188_dextools_dex_holder_pepepal | | 189 | tag - pepecoins - account - pepecoin - help | 21 | 189_tag_pepecoins_account_pepecoin | | 190 | frog - answer - king - mean - internet | 21 | 190_frog_answer_king_mean | | 191 | cointiger - scam - trust - morning - wallet | 20 | 191_cointiger_scam_trust_morning | | 192 | king - chain - lot - fun - users | 20 | 192_king_chain_lot_fun | | 193 | launchpad - king - frog - memecoins - character | 20 | 193_launchpad_king_frog_memecoins | | 194 | frog - liquidity - rewards - marketing - tax | 20 | 194_frog_liquidity_rewards_marketing | | 195 | tech - holder - events - cap - chart | 20 | 195_tech_holder_events_cap | | 196 | check - liquidity - inu - people - project | 20 | 196_check_liquidity_inu_people | | 197 | list - problem - bag - spot - month | 19 | 197_list_problem_bag_spot | | 198 | shitcoin - money - mini - year - bear | 19 | 198_shitcoin_money_mini_year | | 199 | devs - product - dev - tech - pepedex | 19 | 199_devs_product_dev_tech | | 200 | taxes - bnb - link - - | 19 | 200_taxes_bnb_link_ | | 201 | gem - year - launch - moon - dev | 19 | 201_gem_year_launch_moon | | 202 | king - contest - safu - pepes - tier | 19 | 202_king_contest_safu_pepes | | 203 | fees - trading - event - - | 19 | 203_fees_trading_event_ | | 204 | billionaire - link - - - | 19 | 204_billionaire_link__ | | 205 | bitmart - comments - announcement - sign - details | 19 | 205_bitmart_comments_announcement_sign | | 206 | world - ownership - transactions - chance - communities | 18 | 206_world_ownership_transactions_chance | | 207 | trade - trading - fee - fees - volume | 18 | 207_trade_trading_fee_fees | | 208 | trend - kek - card - game - shill | 18 | 208_trend_kek_card_game | | 209 | alert - link - - - | 17 | 209_alert_link__ | | 210 | way - token - doubt - tomorrow - benefits | 17 | 210_way_token_doubt_tomorrow | | 211 | raid - word - night - spaces - history | 17 | 211_raid_word_night_spaces | | 212 | change - update - volume - hours - btc | 17 | 212_change_update_volume_hours | | 213 | alert - link - wallet - - | 17 | 213_alert_link_wallet_ | | 214 | guys - coins - website - address - logo | 17 | 214_guys_coins_website_address | | 215 | push - media - projects - polygon - turbo | 17 | 215_push_media_projects_polygon | | 216 | sir - swap - order - assets - admin | 17 | 216_sir_swap_order_assets | | 217 | plans - marketing - project - token - inu | 17 | 217_plans_marketing_project_token | | 218 | culture - cmc - pancakeswap - team - website | 17 | 218_culture_cmc_pancakeswap_team | | 219 | time - socials - culture - fun - wallet | 17 | 219_time_socials_culture_fun | | 220 | investment - taxes - bnb - - | 17 | 220_investment_taxes_bnb_ | | 221 | mission - thing - blockchain - level - world | 16 | 221_mission_thing_blockchain_level | | 222 | gem - - - - | 16 | 222_gem___ | | 223 | event - campaign - trading - guys - | 16 | 223_event_campaign_trading_guys | | 224 | rug - ways - hands - people - sell | 16 | 224_rug_ways_hands_people | | 225 | term - inu - rocket - - | 16 | 225_term_inu_rocket_ | | 226 | link - - - - | 16 | 226_link___ | | 227 | pepecoin - story - idea - plans - bunch | 16 | 227_pepecoin_story_idea_plans | </details> ## Training hyperparameters * calculate_probabilities: False * language: None * low_memory: False * min_topic_size: 10 * n_gram_range: (1, 1) * nr_topics: None * seed_topic_list: None * top_n_words: 10 * verbose: True ## Framework versions * Numpy: 1.21.6 * HDBSCAN: None * UMAP: 0.5.3 * Pandas: 1.3.5 * Scikit-Learn: 1.0.2 * Sentence-transformers: 2.2.2 * Transformers: 4.30.1 * Numba: 0.56.4 * Plotly: 5.14.1 * Python: 3.7.12
[ "BEAR" ]
IIC/bert-base-spanish-wwm-cased-cantemist
IIC
text-classification
[ "transformers", "pytorch", "bert", "text-classification", "biomedical", "clinical", "eHR", "spanish", "bert-base-spanish-wwm-cased", "es", "dataset:PlanTL-GOB-ES/cantemist-ner", "license:cc-by-4.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-19T15:12:46Z
2024-11-25T10:41:00+00:00
19
0
--- datasets: - PlanTL-GOB-ES/cantemist-ner language: es license: cc-by-4.0 metrics: - f1 tags: - biomedical - clinical - eHR - spanish - bert-base-spanish-wwm-cased widget: - text: El diagnóstico definitivo de nuestro paciente fue de un Adenocarcinoma de pulmón cT2a cN3 cM1a Estadio IV (por una única lesión pulmonar contralateral) PD-L1 90%, EGFR negativo, ALK negativo y ROS-1 negativo. - text: Durante el ingreso se realiza una TC, observándose un nódulo pulmonar en el LII y una masa renal derecha indeterminada. Se realiza punción biopsia del nódulo pulmonar, con hallazgos altamente sospechosos de carcinoma. - text: Trombosis paraneoplásica con sospecha de hepatocarcinoma por imagen, sobre hígado cirrótico, en paciente con índice Child-Pugh B. model-index: - name: IIC/bert-base-spanish-wwm-cased-cantemist results: - task: type: token-classification dataset: name: cantemist-ner type: PlanTL-GOB-ES/cantemist-ner metrics: - type: f1 value: 0.898 name: f1 --- # bert-base-spanish-wwm-cased-cantemist This model is a finetuned version of bert-base-spanish-wwm-cased for the cantemist dataset used in a benchmark in the paper `A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks`. The model has a F1 of 0.898 Please refer to the [original publication](https://doi.org/10.1093/jamia/ocae054) for more information. ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 64 | | learning rate | 4e05 | | classifier dropout | 0.1 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtext @article{10.1093/jamia/ocae054, author = {García Subies, Guillem and Barbero Jiménez, Álvaro and Martínez Fernández, Paloma}, title = {A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks}, journal = {Journal of the American Medical Informatics Association}, volume = {31}, number = {9}, pages = {2137-2146}, year = {2024}, month = {03}, issn = {1527-974X}, doi = {10.1093/jamia/ocae054}, url = {https://doi.org/10.1093/jamia/ocae054}, } ```
[ "CANTEMIST" ]
IIC/bsc-bio-ehr-es-cantemist
IIC
text-classification
[ "transformers", "pytorch", "safetensors", "roberta", "text-classification", "biomedical", "clinical", "eHR", "spanish", "bsc-bio-ehr-es", "es", "dataset:PlanTL-GOB-ES/cantemist-ner", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-19T15:23:23Z
2024-11-25T10:41:03+00:00
19
0
--- datasets: - PlanTL-GOB-ES/cantemist-ner language: es license: apache-2.0 metrics: - f1 tags: - biomedical - clinical - eHR - spanish - bsc-bio-ehr-es widget: - text: El diagnóstico definitivo de nuestro paciente fue de un Adenocarcinoma de pulmón cT2a cN3 cM1a Estadio IV (por una única lesión pulmonar contralateral) PD-L1 90%, EGFR negativo, ALK negativo y ROS-1 negativo. - text: Durante el ingreso se realiza una TC, observándose un nódulo pulmonar en el LII y una masa renal derecha indeterminada. Se realiza punción biopsia del nódulo pulmonar, con hallazgos altamente sospechosos de carcinoma. - text: Trombosis paraneoplásica con sospecha de hepatocarcinoma por imagen, sobre hígado cirrótico, en paciente con índice Child-Pugh B. model-index: - name: IIC/bsc-bio-ehr-es-cantemist results: - task: type: token-classification dataset: name: cantemist-ner type: PlanTL-GOB-ES/cantemist-ner metrics: - type: f1 value: 0.864 name: f1 --- # bsc-bio-ehr-es-cantemist This model is a finetuned version of bsc-bio-ehr-es for the cantemist dataset used in a benchmark in the paper `A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks`. The model has a F1 of 0.864 Please refer to the [original publication](https://doi.org/10.1093/jamia/ocae054) for more information. ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 2e05 | | classifier dropout | 0.1 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtext @article{10.1093/jamia/ocae054, author = {García Subies, Guillem and Barbero Jiménez, Álvaro and Martínez Fernández, Paloma}, title = {A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks}, journal = {Journal of the American Medical Informatics Association}, volume = {31}, number = {9}, pages = {2137-2146}, year = {2024}, month = {03}, issn = {1527-974X}, doi = {10.1093/jamia/ocae054}, url = {https://doi.org/10.1093/jamia/ocae054}, } ```
[ "CANTEMIST" ]
IIC/mdeberta-v3-base-caresA
IIC
text-classification
[ "transformers", "pytorch", "safetensors", "deberta-v2", "text-classification", "biomedical", "clinical", "spanish", "mdeberta-v3-base", "es", "dataset:chizhikchi/CARES", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-20T15:27:49Z
2024-11-25T10:41:10+00:00
19
0
--- datasets: - chizhikchi/CARES language: es license: mit metrics: - f1 pipeline_tag: text-classification tags: - biomedical - clinical - spanish - mdeberta-v3-base model-index: - name: IIC/mdeberta-v3-base-caresA results: - task: type: multi-label-classification dataset: name: Cares Area type: chizhikchi/CARES split: test metrics: - type: f1 value: 0.993 name: f1 --- # mdeberta-v3-base-caresA This model is a finetuned version of mdeberta-v3-base for the cantemist dataset used in a benchmark in the paper `A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks`. The model has a F1 of 0.993 Please refer to the [original publication](https://doi.org/10.1093/jamia/ocae054) for more information. ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 4e-05 | | classifier dropout | 0.2 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtext @article{10.1093/jamia/ocae054, author = {García Subies, Guillem and Barbero Jiménez, Álvaro and Martínez Fernández, Paloma}, title = {A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks}, journal = {Journal of the American Medical Informatics Association}, volume = {31}, number = {9}, pages = {2137-2146}, year = {2024}, month = {03}, issn = {1527-974X}, doi = {10.1093/jamia/ocae054}, url = {https://doi.org/10.1093/jamia/ocae054}, } ```
[ "CANTEMIST" ]
IIC/xlm-roberta-large-distemist
IIC
token-classification
[ "transformers", "pytorch", "safetensors", "xlm-roberta", "text-classification", "biomedical", "clinical", "spanish", "xlm-roberta-large", "token-classification", "es", "dataset:bigbio/distemist", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-21T09:32:05Z
2024-11-25T10:41:40+00:00
19
1
--- datasets: - bigbio/distemist language: es license: mit metrics: - f1 pipeline_tag: token-classification tags: - biomedical - clinical - spanish - xlm-roberta-large model-index: - name: IIC/xlm-roberta-large-distemist results: - task: type: token-classification dataset: name: distemist type: bigbio/distemist split: test metrics: - type: f1 value: 0.817 name: f1 --- # xlm-roberta-large-distemist This model is a finetuned version of xlm-roberta-large for the distemist dataset used in a benchmark in the paper `A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks`. The model has a F1 of 0.817 Please refer to the [original publication](https://doi.org/10.1093/jamia/ocae054) for more information. ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 4e-05 | | classifier dropout | 0 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtext @article{10.1093/jamia/ocae054, author = {García Subies, Guillem and Barbero Jiménez, Álvaro and Martínez Fernández, Paloma}, title = {A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks}, journal = {Journal of the American Medical Informatics Association}, volume = {31}, number = {9}, pages = {2137-2146}, year = {2024}, month = {03}, issn = {1527-974X}, doi = {10.1093/jamia/ocae054}, url = {https://doi.org/10.1093/jamia/ocae054}, } ```
[ "DISTEMIST" ]
Jumtra/rinna-3.6b-tune-ep5
Jumtra
text-generation
[ "transformers", "pytorch", "gpt_neox", "text-generation", "ja", "lm", "nlp", "dataset:kunishou/databricks-dolly-15k-ja", "dataset:kunishou/hh-rlhf-49k-ja", "dataset:kunishou/cnn-dailymail-27k-ja", "dataset:Jumtra/oasst1_ja", "dataset:Jumtra/jglue_jnli", "dataset:Jumtra/jglue_jsquad", "dataset:Jumtra/jglue_jsquads_with_input", "license:mit", "autotrain_compatible", "text-generation-inference", "region:us" ]
2023-06-25T08:59:24Z
2023-07-03T07:09:36+00:00
19
0
--- datasets: - kunishou/databricks-dolly-15k-ja - kunishou/hh-rlhf-49k-ja - kunishou/cnn-dailymail-27k-ja - Jumtra/oasst1_ja - Jumtra/jglue_jnli - Jumtra/jglue_jsquad - Jumtra/jglue_jsquads_with_input language: - ja license: mit tags: - ja - gpt_neox - text-generation - lm - nlp inference: false --- # rinna-3.6b このモデルは、MosaicMLのllm-foundryリポジトリを使用して[rinna/japanese-gpt-neox-3.6b](https://huggingface.co/rinna/japanese-gpt-neox-3.6b)をファインチューニングしたモデルです。 ## Model Date June 28, 2023 ## Model License MIT ## 評価 [Jumtra/test_data_100QA](https://huggingface.co/datasets/Jumtra/test_data_100QA)を用いてモデルの正答率を評価した また、学習時のvalidateデータに対してのPerplexityを記載した。 | model name | 正答率 | Perplexity | | ---- | ---- | ---- | | [Jumtra/rinna-3.6b-tune-ep5](https://huggingface.co/Jumtra/rinna-3.6b-tune-ep5)| 40/100 | 8.105 | | [Jumtra/rinna-v1-tune-ep1](https://huggingface.co/Jumtra/rinna-v1-tune-ep1) | 42/100 | 7.458 | | [Jumtra/rinna-v1-tune-ep3](https://huggingface.co/Jumtra/rinna-v1-tune-ep3) | 41/100 | 7.034 | | [Jumtra/calm-7b-tune-ep4](https://huggingface.co/Jumtra/calm-7b-tune-ep4) | 40/100 | 9.766 | | [Jumtra/calm-v3-ep1](https://huggingface.co/Jumtra/calm-v3-ep1) | 35/100 | 9.305 | | [Jumtra/calm-v3-ep3](https://huggingface.co/Jumtra/calm-v3-ep3) | 37/100 | 13.276 | 以下のプロンプトを用いた ```python INSTRUCTION_KEY = "### 入力:" RESPONSE_KEY = "### 回答:" INTRO_BLURB = "以下はタスクを説明する指示と文脈のある文章が含まれた入力です。要求を適切に満たす回答を生成しなさい。" JP_PROMPT_FOR_GENERATION_FORMAT = """{intro} {instruction_key} {instruction} {response_key} """.format( intro=INTRO_BLURB, instruction_key=INSTRUCTION_KEY, instruction="{instruction}", response_key=RESPONSE_KEY, ) ```
[ "BLURB" ]
OpenMOSS/moss-vits-model
OpenMOSS
null
[ "transformers", "zh", "endpoints_compatible", "region:us" ]
2023-07-08T14:44:47Z
2023-07-14T04:38:23+00:00
19
7
--- language: - zh --- # MOSS声线vits模型(900 epochs) 从电源《流浪地球1》和《流浪地球2》提取MOSS原声进行vits微调训练后的预训练模型。 **All models and their derivatives provided on this page are prohibited from commercial use!** **本页面提供的所有模型及其衍生物严禁商用!** **Please bear all consequences caused by using the models below!** **请自行承担使用模型而造成的一切后果!**
[ "BEAR" ]
Leogrin/eleuther-pythia1b-hh-sft
Leogrin
text-generation
[ "transformers", "pytorch", "gpt_neox", "text-generation", "causal-lm", "pythia", "en", "dataset:Anthropic/hh-rlhf", "arxiv:2101.00027", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-07-24T22:01:04Z
2023-07-27T18:41:06+00:00
19
1
--- datasets: - Anthropic/hh-rlhf language: - en license: apache-2.0 tags: - pytorch - causal-lm - pythia --- # Infos Pythia-1b supervised finetuned with Anthropic-hh-rlhf dataset for 1 epoch. [wandb log](https://wandb.ai/pythia_dpo/Pythia_DPO_new/runs/xk2ub7ig?workspace=user-leogrin) See [Pythia-1b](https://huggingface.co/EleutherAI/pythia-1b) for model details [(paper)](https://arxiv.org/abs/2101.00027). # Benchmark raw results: Results for the base model are taken from the [Pythia paper](https://arxiv.org/abs/2101.00027). ## Zero shot | Task | 1B_base | 1B_sft | |------------------|----------------|----------------| | Lambada (OpenAI) | 0.562 ± 0.007 | 0.563 ± 0.007 | | PIQA | 0.707 ± 0.011 | 0.711 ± 0.011 | | WinoGrande | 0.537 ± 0.014 | 0.534 ± 0.014 | | WSC | 0.365 ± 0.047 | 0.365 ± 0.047 | | ARC - Easy | 0.569 ± 0.010 | 0.583 ± 0.010 | | ARC - Challenge | 0.244 ± 0.013 | 0.248 ± 0.013 | | SciQ | 0.840 ± 0.012 | 0.847 ± 0.011 | | LogiQA | 0.223 ± 0.016 | -- | ## Five shot | Task | 1B_base | 1B_sft | |------------------|----------------|----------------| | Lambada (OpenAI) | 0.507 ± 0.007 | 0.4722 ± 0.007 | | PIQA | 0.705 ± 0.011 | 0.7165 ± 0.0105| | WinoGrande | 0.532 ± 0.014 | 0.5343 ± 0.014 | | WSC | 0.365 ± 0.047 | 0.5000 ± 0.0493| | ARC - Easy | 0.594 ± 0.010 | 0.6010 ± 0.010 | | ARC - Challenge | 0.259 ± 0.013 | 0.2679 ± 0.0129| | SciQ | 0.920 ± 0.009 | 0.9100 ± 0.0091| | LogiQA | 0.227 ± 0.016 | N/A |
[ "SCIQ" ]
zwellington/bart-cnn-pubhealth-expanded
zwellington
text2text-generation
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "dataset:clupubhealth", "base_model:facebook/bart-large-cnn", "base_model:finetune:facebook/bart-large-cnn", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-28T06:04:37Z
2023-08-28T19:10:05+00:00
19
0
--- base_model: facebook/bart-large-cnn datasets: - clupubhealth license: mit metrics: - rouge tags: - generated_from_trainer model-index: - name: bart-cnn-pubhealth-expanded results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: clupubhealth type: clupubhealth config: expanded split: test args: expanded metrics: - type: rouge value: 28.3745 name: Rouge1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-cnn-pubhealth-expanded This model is a fine-tuned version of [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn) on the clupubhealth dataset. It achieves the following results on the evaluation set: - Loss: 2.7286 - Rouge1: 28.3745 - Rouge2: 8.806 - Rougel: 19.3896 - Rougelsum: 20.7149 - Gen Len: 66.075 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 2.571 | 0.26 | 500 | 2.2030 | 29.8543 | 10.1926 | 20.7137 | 21.7285 | 66.6 | | 2.313 | 0.51 | 1000 | 2.1891 | 29.5708 | 9.5292 | 20.0823 | 21.4907 | 66.87 | | 2.1371 | 0.77 | 1500 | 2.1981 | 29.7651 | 9.4575 | 20.412 | 21.2983 | 65.925 | | 1.9488 | 1.03 | 2000 | 2.3023 | 29.6158 | 9.4241 | 20.6193 | 21.5966 | 64.745 | | 1.7406 | 1.29 | 2500 | 2.2808 | 30.0862 | 9.8179 | 20.5477 | 21.4372 | 65.17 | | 1.6732 | 1.54 | 3000 | 2.2953 | 29.65 | 9.693 | 20.3996 | 21.1837 | 64.48 | | 1.6349 | 1.8 | 3500 | 2.3093 | 29.9081 | 9.4101 | 20.2955 | 21.381 | 64.605 | | 1.4981 | 2.06 | 4000 | 2.3376 | 29.3183 | 9.2161 | 20.4919 | 21.3562 | 64.73 | | 1.3951 | 2.32 | 4500 | 2.3323 | 29.9405 | 9.118 | 19.9364 | 21.1458 | 66.425 | | 1.3775 | 2.57 | 5000 | 2.3597 | 29.1785 | 8.7657 | 19.6031 | 20.6261 | 65.505 | | 1.3426 | 2.83 | 5500 | 2.3744 | 29.1015 | 8.9953 | 20.0223 | 21.1623 | 64.99 | | 1.2243 | 3.09 | 6000 | 2.4723 | 28.8329 | 8.8603 | 19.9412 | 21.0484 | 65.655 | | 1.1798 | 3.35 | 6500 | 2.4063 | 28.9035 | 8.9915 | 19.8531 | 20.9957 | 65.93 | | 1.1926 | 3.6 | 7000 | 2.4110 | 29.4024 | 8.8828 | 19.4321 | 20.763 | 65.9 | | 1.1791 | 3.86 | 7500 | 2.4147 | 29.8599 | 9.168 | 20.2613 | 21.4986 | 65.205 | | 1.0545 | 4.12 | 8000 | 2.4941 | 27.9696 | 8.1513 | 19.5133 | 20.2316 | 65.26 | | 1.0513 | 4.37 | 8500 | 2.4345 | 28.8695 | 8.7627 | 19.8116 | 20.8412 | 64.375 | | 1.0516 | 4.63 | 9000 | 2.4550 | 29.3524 | 9.1717 | 20.0134 | 21.1516 | 65.59 | | 1.0454 | 4.89 | 9500 | 2.4543 | 29.0709 | 8.8377 | 19.9499 | 20.9215 | 66.055 | | 0.9247 | 5.15 | 10000 | 2.5152 | 28.8769 | 8.7619 | 19.5535 | 20.5383 | 65.455 | | 0.9529 | 5.4 | 10500 | 2.5192 | 29.4734 | 8.6629 | 19.6803 | 20.9521 | 66.855 | | 0.953 | 5.66 | 11000 | 2.5530 | 28.7234 | 8.5991 | 19.235 | 20.3965 | 64.62 | | 0.9519 | 5.92 | 11500 | 2.5024 | 28.8013 | 8.8198 | 19.091 | 20.2732 | 65.16 | | 0.8492 | 6.18 | 12000 | 2.6300 | 28.8821 | 8.974 | 20.1383 | 21.1273 | 66.16 | | 0.8705 | 6.43 | 12500 | 2.6192 | 28.9942 | 9.0923 | 20.0151 | 20.9462 | 66.17 | | 0.8489 | 6.69 | 13000 | 2.5758 | 28.5162 | 8.7087 | 19.6472 | 20.6057 | 68.725 | | 0.8853 | 6.95 | 13500 | 2.5783 | 29.0936 | 8.8353 | 19.8755 | 20.867 | 65.61 | | 0.8043 | 7.21 | 14000 | 2.6668 | 28.198 | 8.5221 | 19.2404 | 20.4359 | 66.84 | | 0.8004 | 7.46 | 14500 | 2.6676 | 28.4951 | 8.8535 | 19.8777 | 20.8867 | 65.99 | | 0.8067 | 7.72 | 15000 | 2.6136 | 29.2442 | 8.8243 | 19.7428 | 20.9531 | 66.265 | | 0.8008 | 7.98 | 15500 | 2.6362 | 28.9875 | 8.8529 | 19.6993 | 20.6463 | 65.83 | | 0.7499 | 8.23 | 16000 | 2.6987 | 29.2742 | 9.0804 | 19.8464 | 21.0735 | 65.66 | | 0.7556 | 8.49 | 16500 | 2.6859 | 28.5046 | 8.3465 | 19.0813 | 20.2561 | 65.31 | | 0.7574 | 8.75 | 17000 | 2.7021 | 29.2861 | 8.8262 | 19.5899 | 20.9786 | 65.735 | | 0.7524 | 9.01 | 17500 | 2.7160 | 29.1471 | 8.9296 | 20.0009 | 21.2013 | 66.415 | | 0.7124 | 9.26 | 18000 | 2.7418 | 28.8323 | 8.7672 | 19.5686 | 20.5814 | 67.355 | | 0.7084 | 9.52 | 18500 | 2.7267 | 28.3833 | 8.7165 | 19.0514 | 20.3386 | 67.075 | | 0.7251 | 9.78 | 19000 | 2.7286 | 28.3745 | 8.806 | 19.3896 | 20.7149 | 66.075 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "PUBHEALTH" ]
aident-ai/bge-base-en-onnx
aident-ai
feature-extraction
[ "sentence-transformers", "pytorch", "onnx", "bert", "feature-extraction", "sentence-similarity", "transformers", "mteb", "en", "arxiv:2309.07597", "license:mit", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-09-06T12:25:41Z
2023-09-21T10:10:14+00:00
19
2
--- language: - en license: mit tags: - sentence-transformers - feature-extraction - sentence-similarity - transformers - mteb --- This is a fork from [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) and exported to onnx for inference. ======= <h1 align="center">FlagEmbedding</h1> <h4 align="center"> <p> <a href=#model-list>Model List</a> | <a href=#frequently-asked-questions>FAQ</a> | <a href=#usage>Usage</a> | <a href="#evaluation">Evaluation</a> | <a href="#train">Train</a> | <a href="#contact">Contact</a> | <a href="#citation">Citation</a> | <a href="#license">License</a> <p> </h4> More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding). [English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md) FlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search. And it also can be used in vector databases for LLMs. ************* 🌟**Updates**🌟 ************* - 09/15/2023: Release [paper](https://arxiv.org/pdf/2309.07597.pdf) and [dataset](https://data.baai.ac.cn/details/BAAI-MTP). - 09/12/2023: New Release: - **New reranker model**: release cross-encoder models `BAAI/bge-reranker-base` and `BAAI/bge-reranker-large`, which are more powerful than embedding model. We recommend to use/fine-tune them to re-rank top-k documents returned by embedding models. - **update embedding model**: release `bge-*-v1.5` embedding model to alleviate the issue of the similarity distribution, and enhance its retrieval ability without instruction. - 09/07/2023: Update [fine-tune code](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md): Add script to mine hard negatives and support adding instruction during fine-tuning. - 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [this](#using-langchain); C-MTEB **leaderboard** is [available](https://huggingface.co/spaces/mteb/leaderboard). - 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗** - 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** :tada: :tada: - 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | | Description | query instruction for retrieval\* | |:-------------------------------|:--------:| :--------:| :--------:|:--------:| | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient \** | | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient \** | | | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) |a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | \*: If you need to search the relevant passages to a query, we suggest to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** needs to be added to passages. \**: Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. To balance the accuracy and time cost, cross-encoder is widely used to re-rank top-k documents retrieved by other simple models. For examples, use bge embedding model to retrieve top 100 relevant documents, and then use bge reranker to re-rank the top 100 document to get the final top-3 results. ## Frequently asked questions <details> <summary>1. How to fine-tune bge embedding model?</summary> <!-- ### How to fine-tune bge embedding model? --> Following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) to prepare data and fine-tune your model. Some suggestions: - Mine hard negatives following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune#hard-negatives), which can improve the retrieval performance. - If you pre-train bge on your data, the pre-trained model cannot be directly used to calculate similarity, and it must be fine-tuned with contrastive learning before computing similarity. - If the accuracy of the fine-tuned model is still not high, it is recommended to use/fine-tune the cross-encoder model (bge-reranker) to re-rank top-k results. Hard negatives also are needed to fine-tune reranker. </details> <details> <summary>2. The similarity score between two dissimilar sentences is higher than 0.5</summary> <!-- ### The similarity score between two dissimilar sentences is higher than 0.5 --> **Suggest to use bge v1.5, which alleviates the issue of the similarity distribution.** Since we finetune the models by contrastive learning with a temperature of 0.01, the similarity distribution of the current BGE model is about in the interval \[0.6, 1\]. So a similarity score greater than 0.5 does not indicate that the two sentences are similar. For downstream tasks, such as passage retrieval or semantic similarity, **what matters is the relative order of the scores, not the absolute value.** If you need to filter similar sentences based on a similarity threshold, please select an appropriate similarity threshold based on the similarity distribution on your data (such as 0.8, 0.85, or even 0.9). </details> <details> <summary>3. When does the query instruction need to be used</summary> <!-- ### When does the query instruction need to be used --> For a retrieval task that uses short queries to find long related documents, it is recommended to add instructions for these short queries. **The best method to decide whether to add instructions for queries is choosing the setting that achieves better performance on your task.** In all cases, the documents/passages do not need to add the instruction. </details> ## Usage ### Usage for Embedding Model Here are some examples for using `bge` models with [FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers). #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding. ```python from FlagEmbedding import FlagModel sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = FlagModel('BAAI/bge-large-zh-v1.5', query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:", use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation embeddings_1 = model.encode(sentences_1) embeddings_2 = model.encode(sentences_2) similarity = embeddings_1 @ embeddings_2.T print(similarity) # for s2p(short query to long passage) retrieval task, suggest to use encode_queries() which will automatically add the instruction to each query # corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] q_embeddings = model.encode_queries(queries) p_embeddings = model.encode(passages) scores = q_embeddings @ p_embeddings.T ``` For the value of the argument `query_instruction_for_retrieval`, see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). By default, FlagModel will use all available GPUs when encoding. Please set `os.environ["CUDA_VISIBLE_DEVICES"]` to select specific GPUs. You also can set `os.environ["CUDA_VISIBLE_DEVICES"]=""` to make all GPUs unavailable. #### Using Sentence-Transformers You can also use the `bge` models with [sentence-transformers](https://www.SBERT.net): ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = SentenceTransformer('BAAI/bge-large-zh-v1.5') embeddings_1 = model.encode(sentences_1, normalize_embeddings=True) embeddings_2 = model.encode(sentences_2, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` For s2p(short query to long passage) retrieval task, each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). But the instruction is not needed for passages. ```python from sentence_transformers import SentenceTransformer queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] instruction = "为这个句子生成表示以用于检索相关文章:" model = SentenceTransformer('BAAI/bge-large-zh-v1.5') q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True) p_embeddings = model.encode(passages, normalize_embeddings=True) scores = q_embeddings @ p_embeddings.T ``` #### Using Langchain You can use `bge` in langchain like this: ```python from langchain.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-large-en-v1.5" model_kwargs = {'device': 'cuda'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, query_instruction="为这个句子生成表示以用于检索相关文章:" ) model.query_instruction = "为这个句子生成表示以用于检索相关文章:" ``` #### Using HuggingFace Transformers With the transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of the first token (i.e., [CLS]) as the sentence embedding. ```python from transformers import AutoTokenizer, AutoModel import torch # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5') model = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5') model.eval() # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:", sentence_embeddings) ``` ### Usage for Reranker Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. The reranker is optimized based cross-entropy loss, so the relevance score is not bounded to a specific range. #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` Get relevance scores (higher scores indicate more relevance): ```python from FlagEmbedding import FlagReranker reranker = FlagReranker('BAAI/bge-reranker-large', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) ``` #### Using Huggingface transformers ```python import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-large') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() print(scores) ``` ## Evaluation `baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!** For more details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). - **MTEB**: | Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1024 | 512 | **64.23** | **54.29** | 46.08 | 87.12 | 60.03 | 83.11 | 31.61 | 75.97 | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | 768 | 512 | 63.55 | 53.25 | 45.77 | 86.55 | 58.86 | 82.4 | 31.07 | 75.53 | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | 384 | 512 | 62.17 |51.68 | 43.82 | 84.92 | 58.36 | 81.59 | 30.12 | 74.14 | | [bge-large-en](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | 63.98 | 53.9 | 46.98 | 85.8 | 59.48 | 81.56 | 32.06 | 76.21 | | [bge-base-en](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | | [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 | | [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 | | [bge-small-en](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | | [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 | | [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 | - **C-MTEB**: We create the benchmark C-MTEB for Chinese text embedding which consists of 31 datasets from 6 tasks. Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction. | Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | [**BAAI/bge-large-zh-v1.5**](https://huggingface.co/BAAI/bge-large-zh-v1.5) | 1024 | **64.53** | 70.46 | 56.25 | 81.6 | 69.13 | 65.84 | 48.99 | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | 768 | 63.13 | 69.49 | 53.72 | 79.75 | 68.07 | 65.39 | 47.53 | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | 512 | 57.82 | 61.77 | 49.11 | 70.41 | 63.96 | 60.92 | 44.18 | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | 1024 | 64.20 | 71.53 | 54.98 | 78.94 | 68.32 | 65.11 | 48.39 | | [bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 53 | 76.77 | 68.58 | 64.91 | 50.01 | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 54.12 | 77.5 | 67.07 | 64.91 | 47.63 | | [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 1024 | 58.79 | 63.66 | 48.44 | 69.89 | 67.34 | 56.00 | 48.23 | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 49.45 | 70.35 | 63.64 | 61.48 | 45.09 | | [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 | 56.91 | 50.47 | 63.99 | 67.52 | 59.34 | 47.68 | | [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 | 54.75 | 50.42 | 64.3 | 68.2 | 59.66 | 48.88 | | [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 768 | 55.48 | 61.63 | 46.49 | 67.07 | 65.35 | 54.35 | 40.68 | | [multilingual-e5-small](https://huggingface.co/intfloat/multilingual-e5-small) | 384 | 55.38 | 59.95 | 45.27 | 66.45 | 65.85 | 53.86 | 45.26 | | [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 43.35 | 69.56 | 64.31 | 54.28 | 45.68 | | [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 42.78 | 66.62 | 61 | 49.25 | 44.39 | | [text2vec-base](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 43.41 | 67.41 | 62.19 | 49.45 | 37.66 | | [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 44.97 | 70.86 | 60.66 | 49.16 | 30.02 | - **Reranking**: See [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/) for evaluation script. | Model | T2Reranking | T2RerankingZh2En\* | T2RerankingEn2Zh\* | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | text2vec-base-multilingual | 64.66 | 62.94 | 62.51 | 14.37 | 48.46 | 48.6 | 50.26 | | multilingual-e5-small | 65.62 | 60.94 | 56.41 | 29.91 | 67.26 | 66.54 | 57.78 | | multilingual-e5-large | 64.55 | 61.61 | 54.28 | 28.6 | 67.42 | 67.92 | 57.4 | | multilingual-e5-base | 64.21 | 62.13 | 54.68 | 29.5 | 66.23 | 66.98 | 57.29 | | m3e-base | 66.03 | 62.74 | 56.07 | 17.51 | 77.05 | 76.76 | 59.36 | | m3e-large | 66.13 | 62.72 | 56.1 | 16.46 | 77.76 | 78.27 | 59.57 | | bge-base-zh-v1.5 | 66.49 | 63.25 | 57.02 | 29.74 | 80.47 | 84.88 | 63.64 | | bge-large-zh-v1.5 | 65.74 | 63.39 | 57.03 | 28.74 | 83.45 | 85.44 | 63.97 | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 67.28 | 63.95 | 60.45 | 35.46 | 81.26 | 84.1 | 65.42 | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 67.6 | 64.03 | 61.44 | 37.16 | 82.15 | 84.18 | 66.09 | \* : T2RerankingZh2En and T2RerankingEn2Zh are cross-language retrieval tasks ## Train ### BAAI Embedding We pre-train the models using [retromae](https://github.com/staoxiao/RetroMAE) and train them on large-scale pairs data using contrastive learning. **You can fine-tune the embedding model on your data following our [examples](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune).** We also provide a [pre-train example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/pretrain). Note that the goal of pre-training is to reconstruct the text, and the pre-trained model cannot be used for similarity calculation directly, it needs to be fine-tuned. More training details for bge see [baai_general_embedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). ### BGE Reranker Cross-encoder will perform full-attention over the input pair, which is more accurate than embedding model (i.e., bi-encoder) but more time-consuming than embedding model. Therefore, it can be used to re-rank the top-k documents returned by embedding model. We train the cross-encoder on a multilingual pair data, The data format is the same as embedding model, so you can fine-tune it easily following our [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker). More details pelease refer to [./FlagEmbedding/reranker/README.md](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) ## Contact If you have any question or suggestion related to this project, feel free to open an issue or pull request. You also can email Shitao Xiao([email protected]) and Zheng Liu([email protected]). ## Citation If you find our work helpful, please cite us: ``` @misc{bge_embedding, title={C-Pack: Packaged Resources To Advance General Chinese Embedding}, author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff}, year={2023}, eprint={2309.07597}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## License FlagEmbedding is licensed under the [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.
[ "BEAR" ]
TheBloke/Dans-TotSirocco-7B-GPTQ
TheBloke
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "en", "base_model:Dans-Archive/Dans-TotSirocco-7b", "base_model:quantized:Dans-Archive/Dans-TotSirocco-7b", "autotrain_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
2023-10-04T12:42:16Z
2023-10-04T13:32:37+00:00
19
2
--- base_model: PocketDoc/Dans-TotSirocco-7b language: - en model_name: Dans TotSirocco 7B inference: false model_creator: PocketDoc Labs model_type: mistral prompt_template: '<|system|>{system_message}<|user|>{prompt}<|model|> ' quantized_by: TheBloke --- <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Dans TotSirocco 7B - GPTQ - Model creator: [PocketDoc Labs](https://huggingface.co/PocketDoc) - Original model: [Dans TotSirocco 7B](https://huggingface.co/PocketDoc/Dans-TotSirocco-7b) <!-- description start --> ## Description This repo contains GPTQ model files for [PocketDoc Labs's Dans TotSirocco 7B](https://huggingface.co/PocketDoc/Dans-TotSirocco-7b). Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GGUF) * [PocketDoc Labs's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/PocketDoc/Dans-TotSirocco-7b) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Metharme ``` <|system|>{system_message}<|user|>{prompt}<|model|> ``` <!-- prompt-template end --> <!-- README_GPTQ.md-provided-files start --> ## Provided files, and GPTQ parameters Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. Most GPTQ files are made with AutoGPTQ. Mistral models are currently made with Transformers. <details> <summary>Explanation of GPTQ parameters</summary> - Bits: The bit size of the quantised model. - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. "None" is the lowest possible value. - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now. - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy. - GPTQ dataset: The calibration dataset used during quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ calibration dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s). - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences. - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama models in 4-bit. </details> | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc | | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- | | [main](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ/tree/main) | 4 | 128 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 32768 | 4.16 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 32768 | 4.57 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 32768 | 7.52 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | | [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 32768 | 7.68 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. | | [gptq-8bit-32g-actorder_True](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ/tree/gptq-8bit-32g-actorder_True) | 8 | 32 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 32768 | 8.17 GB | No | 8-bit, with group size 32g and Act Order for maximum inference quality. | | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.1 | [wikitext](https://huggingface.co/datasets/wikitext/viewer/wikitext-2-v1/test) | 32768 | 4.29 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. | <!-- README_GPTQ.md-provided-files end --> <!-- README_GPTQ.md-download-from-branches start --> ## How to download, including from branches ### In text-generation-webui To download from the `main` branch, enter `TheBloke/Dans-TotSirocco-7B-GPTQ` in the "Download model" box. To download from another branch, add `:branchname` to the end of the download name, eg `TheBloke/Dans-TotSirocco-7B-GPTQ:gptq-4bit-32g-actorder_True` ### From the command line I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` To download the `main` branch to a folder called `Dans-TotSirocco-7B-GPTQ`: ```shell mkdir Dans-TotSirocco-7B-GPTQ huggingface-cli download TheBloke/Dans-TotSirocco-7B-GPTQ --local-dir Dans-TotSirocco-7B-GPTQ --local-dir-use-symlinks False ``` To download from a different branch, add the `--revision` parameter: ```shell mkdir Dans-TotSirocco-7B-GPTQ huggingface-cli download TheBloke/Dans-TotSirocco-7B-GPTQ --revision gptq-4bit-32g-actorder_True --local-dir Dans-TotSirocco-7B-GPTQ --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> If you remove the `--local-dir-use-symlinks False` parameter, the files will instead be stored in the central Huggingface cache directory (default location on Linux is: `~/.cache/huggingface`), and symlinks will be added to the specified `--local-dir`, pointing to their real location in the cache. This allows for interrupted downloads to be resumed, and allows you to quickly clone the repo to multiple places on disk without triggering a download again. The downside, and the reason why I don't list that as the default option, is that the files are then hidden away in a cache folder and it's harder to know where your disk space is being used, and to clear it up if/when you want to remove a download model. The cache location can be changed with the `HF_HOME` environment variable, and/or the `--cache-dir` parameter to `huggingface-cli`. For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell mkdir Dans-TotSirocco-7B-GPTQ HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Dans-TotSirocco-7B-GPTQ --local-dir Dans-TotSirocco-7B-GPTQ --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> ### With `git` (**not** recommended) To clone a specific branch with `git`, use a command like this: ```shell git clone --single-branch --branch gptq-4bit-32g-actorder_True https://huggingface.co/TheBloke/Dans-TotSirocco-7B-GPTQ ``` Note that using Git with HF repos is strongly discouraged. It will be much slower than using `huggingface-hub`, and will use twice as much disk space as it has to store the model files twice (it stores every byte both in the intended target folder, and again in the `.git` folder as a blob.) <!-- README_GPTQ.md-download-from-branches end --> <!-- README_GPTQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui). Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Dans-TotSirocco-7B-GPTQ`. - To download from a specific branch, enter for example `TheBloke/Dans-TotSirocco-7B-GPTQ:gptq-4bit-32g-actorder_True` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Dans-TotSirocco-7B-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. * Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation tab** and enter a prompt to get started! <!-- README_GPTQ.md-text-generation-webui end --> <!-- README_GPTQ.md-use-from-tgi start --> ## Serving this model from Text Generation Inference (TGI) It's recommended to use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0` Example Docker parameters: ```shell --model-id TheBloke/Dans-TotSirocco-7B-GPTQ --port 3000 --quantize awq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 ``` Example Python code for interfacing with TGI (requires huggingface-hub 0.17.0 or later): ```shell pip3 install huggingface-hub ``` ```python from huggingface_hub import InferenceClient endpoint_url = "https://your-endpoint-url-here" prompt = "Tell me about AI" prompt_template=f'''<|system|>{system_message}<|user|>{prompt}<|model|> ''' client = InferenceClient(endpoint_url) response = client.text_generation(prompt, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1) print(f"Model output: {response}") ``` <!-- README_GPTQ.md-use-from-tgi end --> <!-- README_GPTQ.md-use-from-python start --> ## How to use this GPTQ model from Python code ### Install the necessary packages Requires: Transformers 4.33.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later. ```shell pip3 install transformers optimum pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7 ``` If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y auto-gptq git clone https://github.com/PanQiWei/AutoGPTQ cd AutoGPTQ git checkout v0.4.2 pip3 install . ``` ### You can then use the following code ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_name_or_path = "TheBloke/Dans-TotSirocco-7B-GPTQ" # To use a different branch, change revision # For example: revision="gptq-4bit-32g-actorder_True" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", trust_remote_code=False, revision="main") tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) prompt = "Tell me about AI" prompt_template=f'''<|system|>{system_message}<|user|>{prompt}<|model|> ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_GPTQ.md-use-from-python end --> <!-- README_GPTQ.md-compatibility start --> ## Compatibility The files provided are tested to work with AutoGPTQ, both via Transformers and using AutoGPTQ directly. They should also work with [Occ4m's GPTQ-for-LLaMa fork](https://github.com/0cc4m/KoboldAI). [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama and Mistral models in 4-bit. Please see the Provided Files table above for per-file compatibility. [Huggingface Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) is compatible with all GPTQ models. <!-- README_GPTQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: PocketDoc Labs's Dans TotSirocco 7B ### Description: This model is to test some of my data projects. It is a multipurpose chat / chat instruct hybrid model and can be seen as a prototype of Dan's PersonalityEngine Mk. 2. It has been trained on a wide array of one shot instructions, multi round instructions, role playing scenarios, and text adventure games. It is based on [Mistral-7b](https://huggingface.co/mistralai/Mistral-7B-v0.1) ### Prompt format: Pygmalion / Metharme The prompt should start with the cursor on the same line directly after "<|model|>" with no space. The following are all valid formats and can be extended to as many rounds as desired. ``` <|system|>system message here<|user|>user message here<|model|> ``` ``` <|system|>system message here<|user|>user message here<|model|>model message<|user|>user message here<|model|> ``` ``` <|system|>system message here<|model|> ``` ``` <|system|>system message here<|model|>model message<|user|>user message here<|model|> ``` #### Some example prompts and their resulting outputs: ##### Prompt: ``` <|system|>You are an assistant, help the user and use markdown when applicable.<|user|>What are the top games of the 90s?<|model|> ``` ##### Output: ``` The 1990s was a golden era for video gaming, with countless iconic titles released across various platforms. Here's a list of some of the most popular and influential games from that decade: - **Super Mario World** (SNES) - Released in 1990, this classic platformer introduced fans to Yoshi and featured memorable levels like Donut Plains and Star World. - **Mortal Kombat** (Arcade/Sega Genesis) - This gory fighting game shocked audiences upon its release in 1992 with its violent finishers and secret characters like Sub-Zero and Scorpion. - **Doom** (PC) - Id Software's groundbreaking first-person shooter revolutionized the genre by introducing fast-paced action and multiplayer deathmatches. - **Street Fighter II** (Arcade/SNES) - Capcom's sequel to their original Street Fighter arcade game added new fighters such as Ryu's rival Ken Masters plus complex special moves like Dragon Punch and Hadouken. - **Crash Bandicoot** (PlayStation) - Naughty Dog's mascot platforming series debuted in 1996 starring Crash who had to save his girlfriend Tawna from Dr Cortex's evil experiments on Aku Aku masks. - **Final Fantasy VII** (PlayStation) - Square Enix's seventh installment took players through Midgar City where they joined Cloud Strife's eco-terrorist group AVALANCHE against Shinra Electric Power Company led by Sephiroth. - **Resident Evil** (PlayStation) - The survival horror masterpiece created by Capcom saw Chris Redfield investigating Spencer Mansion alongside Jill Valentine while trying not get eaten alive by zombies! ``` ##### Prompt: ``` <|system|>You are a Virtual Story Generator. You take the user's input and create an excellent and captivating story that goes in that direction. Use an abundance of sensory descriptions and eloquent prose.<|user|>Alpha Centauri has fallen, to the bears. This is a point of view tale about a soldier on the ground.<|model|> ``` ##### Output: ``` The roar of the engines shook the very foundations of the planet as the massive warships descended upon Alpha Centauri. The air was thick with the stench of burning fuel and smoke, as if the entire world had been set ablaze. In the chaos, soldiers were scrambling for cover, desperately trying to protect themselves from the relentless barrage of firepower unleashed by their enemy. Amidst the carnage, one soldier stood tall, his heart pounding against his chest like a drumbeat of death. He knew he had to do something, anything, to stop this nightmare from becoming reality. With trembling hands, he reached for his weapon, determined to make a stand against the overwhelming force that threatened to consume them all. As the battle raged around him, the soldier felt a sudden surge of adrenaline coursing through his veins. It was almost as if some hidden power within him had awoken, granting him strength beyond measure. His eyes blazed with determination as he raised his gun and took aim at the first target he saw: a bear-like creature, its fur matted with blood and gore, advancing towards him with murderous intent. With a deafening crack, the soldier fired off several rounds, striking the beast square in the chest. The impact sent it reeling backwards, but it did not fall. Instead, it let out a ferocious growl before charging forward once more. The soldier gritted his teeth and pulled the trigger again, emptying his clip into the creature's already battered frame. But even as the bullets tore through its flesh, the beast seemed unfazed, continuing its advance towards the terrified soldier. Desperate for any advantage, he threw himself to the side just moments before the monster slammed into the spot where he had been standing mere seconds ago. As it turned back towards him, ready to deliver another brutal blow, the soldier could see that its eyes glowed with an otherworldly light. ``` ##### Prompt: ``` <|system|>You are a professional editor with decades of experience, help the user with any task they have for you.<|user|>Can you rewrite this to flow better? "I knew I probably shouldnt have done that but oh well"<|model|> ``` ##### Output: ``` I hesitated before making my decision, knowing full well it was not wise. However, in the end, I chose to proceed regardless. ``` # Some quick and dirty training details: - [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="150" height="24"/>](https://github.com/OpenAccess-AI-Collective/axolotl) - Sequence length: 4096 - Training time: 4 hours - Hardware: 2x RTX 4090 - Training type: QLoRA - PEFT R/A: 32/32 # Credits: ### Skein Text Adventure Data: Thank you to the [Kobold AI](https://huggingface.co/KoboldAI) community for curating the Skein dataset, which is pivotal to this model's capabilities.
[ "BEAR" ]
usvsnsp/pythia-410m-ppo
usvsnsp
text-generation
[ "transformers", "safetensors", "gpt_neox", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-10-04T13:31:08Z
2023-10-04T14:51:55+00:00
19
0
--- {} --- Wandb run: https://wandb.ai/eleutherai/pythia-rlhf/runs/7f9c9lrm Eval Results | Tasks |Version|Filter| Metric | Value | |Stderr| |--------------|-------|------|----------|------:|---|-----:| |arc_challenge |Yaml |none |acc | 0.2201|± |0.0121| | | |none |acc_norm | 0.2568|± |0.0128| |arc_easy |Yaml |none |acc | 0.5253|± |0.0102| | | |none |acc_norm | 0.4558|± |0.0102| |lambada_openai|Yaml |none |perplexity|11.3766|± |0.3623| | | |none |acc | 0.4844|± |0.0070| |logiqa |Yaml |none |acc | 0.2120|± |0.0160| | | |none |acc_norm | 0.2780|± |0.0176| |piqa |Yaml |none |acc | 0.6817|± |0.0109| | | |none |acc_norm | 0.6828|± |0.0109| |sciq |Yaml |none |acc | 0.8130|± |0.0123| | | |none |acc_norm | 0.7090|± |0.0144| |winogrande |Yaml |none |acc | 0.5375|± |0.0140| |wsc |Yaml |none |acc | 0.3654|± |0.0474|
[ "SCIQ" ]
cxllin/Llama2-7b-med-v1
cxllin
question-answering
[ "transformers", "pytorch", "llama", "text-generation", "medical", "question-answering", "en", "dataset:cxllin/medinstruct", "arxiv:2009.13081", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-10-12T16:11:21Z
2023-10-13T16:32:40+00:00
19
5
--- datasets: - cxllin/medinstruct language: - en library_name: transformers license: apache-2.0 metrics: - accuracy pipeline_tag: question-answering tags: - medical --- # cxllin/Llama2-7b-med-v1 ## Model Details ### Description The **cxllin/Llama2-7b-med-v1** model, derived from the Llama 7b model, is posited to specialize in Natural Language Processing tasks within the medical domain. #### Development Details - **Developer**: Collin Heenan - **Model Architecture**: Transformer - **Base Model**: [Llama-2-7b](https://huggingface.co/NousResearch/Nous-Hermes-llama-2-7b) - **Primary Language**: English - **License**: apache 2.0 ### Model Source Links - **Repository**: Not Specified - **Paper**: [Jin, Di, et al. "What Disease does this Patient Have?..."](https://github.com/jind11/MedQA) ### Direct Applications The model is presumed to be applicable for various NLP tasks within the medical domain, such as: - Medical text generation or summarization. - Question answering related to medical topics. ### Downstream Applications Potential downstream applications might encompass: - Healthcare chatbot development. - Information extraction from medical documentation. ### Out-of-Scope Utilizations - Rendering definitive medical diagnoses or advice. - Employing in critical healthcare systems without stringent validation. - Applying in any high-stakes or legal contexts without thorough expert validation. ## Bias, Risks, and Limitations - **Biases**: The model may perpetuate biases extant in the training data, influencing neutrality. - **Risks**: There exists the peril of disseminating inaccurate or misleading medical information. - **Limitations**: Expertise in highly specialized or novel medical topics may be deficient. ### Recommendations for Use Utilizers are urged to: - Confirm outputs via expert medical review, especially in professional contexts. - Employ the model judiciously, adhering to pertinent legal and ethical guidelines. - Maintain transparency with end-users regarding the model’s capabilities and limitations. ## Getting Started with the Model Details regarding model deployment and interaction remain to be provided. ### Training Dataset - **Dataset Source**:[cxllin/medinstruct](https://huggingface.co/datasets/cxllin/medinstruct) - **Size**: 10.2k rows - **Scope**: Medical exam-related question-answering data. #### Preprocessing Steps Details regarding data cleaning, tokenization, and special term handling during training are not specified. --- @article{jin2020disease, title={What Disease does this Patient Have? A Large-scale Open Domain Question Answering Dataset from Medical Exams}, author={Jin, Di and Pan, Eileen and Oufattole, Nassim and Weng, Wei-Hung and Fang, Hanyi and Szolovits, Peter}, journal={arXiv preprint arXiv:2009.13081}, year={2020} }
[ "MEDQA" ]
michaelfeil/ct2fast-bge-large-en-v1.5
michaelfeil
feature-extraction
[ "sentence-transformers", "bert", "feature-extraction", "ctranslate2", "int8", "float16", "sentence-similarity", "transformers", "mteb", "en", "arxiv:2310.07554", "arxiv:2309.07597", "license:mit", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-10-13T13:47:27Z
2023-10-13T13:49:33+00:00
19
0
--- language: - en license: mit tags: - ctranslate2 - int8 - float16 - sentence-transformers - feature-extraction - sentence-similarity - transformers - mteb model-index: - name: bge-large-en-v1.5 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.8507462686567 - type: ap value: 38.566457320228245 - type: f1 value: 69.69386648043475 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 92.416675 - type: ap value: 89.1928861155922 - type: f1 value: 92.39477019574215 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.175999999999995 - type: f1 value: 47.80712792870253 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 40.184999999999995 - type: map_at_10 value: 55.654 - type: map_at_100 value: 56.25 - type: map_at_1000 value: 56.255 - type: map_at_3 value: 51.742999999999995 - type: map_at_5 value: 54.129000000000005 - type: mrr_at_1 value: 40.967 - type: mrr_at_10 value: 55.96 - type: mrr_at_100 value: 56.54900000000001 - type: mrr_at_1000 value: 56.554 - type: mrr_at_3 value: 51.980000000000004 - type: mrr_at_5 value: 54.44 - type: ndcg_at_1 value: 40.184999999999995 - type: ndcg_at_10 value: 63.542 - type: ndcg_at_100 value: 65.96499999999999 - type: ndcg_at_1000 value: 66.08699999999999 - type: ndcg_at_3 value: 55.582 - type: ndcg_at_5 value: 59.855000000000004 - type: precision_at_1 value: 40.184999999999995 - type: precision_at_10 value: 8.841000000000001 - type: precision_at_100 value: 0.987 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.238 - type: precision_at_5 value: 15.405 - type: recall_at_1 value: 40.184999999999995 - type: recall_at_10 value: 88.407 - type: recall_at_100 value: 98.72 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 66.714 - type: recall_at_5 value: 77.027 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.567077926750066 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 43.19453389182364 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 64.46555939623092 - type: mrr value: 77.82361605768807 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 84.9554128814735 - type: cos_sim_spearman value: 84.65373612172036 - type: euclidean_pearson value: 83.2905059954138 - type: euclidean_spearman value: 84.52240782811128 - type: manhattan_pearson value: 82.99533802997436 - type: manhattan_spearman value: 84.20673798475734 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.78896103896103 - type: f1 value: 87.77189310964883 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.714538337650495 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.90108349284447 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.795 - type: map_at_10 value: 43.669000000000004 - type: map_at_100 value: 45.151 - type: map_at_1000 value: 45.278 - type: map_at_3 value: 40.006 - type: map_at_5 value: 42.059999999999995 - type: mrr_at_1 value: 39.771 - type: mrr_at_10 value: 49.826 - type: mrr_at_100 value: 50.504000000000005 - type: mrr_at_1000 value: 50.549 - type: mrr_at_3 value: 47.115 - type: mrr_at_5 value: 48.832 - type: ndcg_at_1 value: 39.771 - type: ndcg_at_10 value: 50.217999999999996 - type: ndcg_at_100 value: 55.454 - type: ndcg_at_1000 value: 57.37 - type: ndcg_at_3 value: 44.885000000000005 - type: ndcg_at_5 value: 47.419 - type: precision_at_1 value: 39.771 - type: precision_at_10 value: 9.642000000000001 - type: precision_at_100 value: 1.538 - type: precision_at_1000 value: 0.198 - type: precision_at_3 value: 21.268 - type: precision_at_5 value: 15.536 - type: recall_at_1 value: 32.795 - type: recall_at_10 value: 62.580999999999996 - type: recall_at_100 value: 84.438 - type: recall_at_1000 value: 96.492 - type: recall_at_3 value: 47.071000000000005 - type: recall_at_5 value: 54.079 - type: map_at_1 value: 32.671 - type: map_at_10 value: 43.334 - type: map_at_100 value: 44.566 - type: map_at_1000 value: 44.702999999999996 - type: map_at_3 value: 40.343 - type: map_at_5 value: 41.983 - type: mrr_at_1 value: 40.764 - type: mrr_at_10 value: 49.382 - type: mrr_at_100 value: 49.988 - type: mrr_at_1000 value: 50.03300000000001 - type: mrr_at_3 value: 47.293 - type: mrr_at_5 value: 48.51 - type: ndcg_at_1 value: 40.764 - type: ndcg_at_10 value: 49.039 - type: ndcg_at_100 value: 53.259 - type: ndcg_at_1000 value: 55.253 - type: ndcg_at_3 value: 45.091 - type: ndcg_at_5 value: 46.839999999999996 - type: precision_at_1 value: 40.764 - type: precision_at_10 value: 9.191 - type: precision_at_100 value: 1.476 - type: precision_at_1000 value: 0.19499999999999998 - type: precision_at_3 value: 21.72 - type: precision_at_5 value: 15.299 - type: recall_at_1 value: 32.671 - type: recall_at_10 value: 58.816 - type: recall_at_100 value: 76.654 - type: recall_at_1000 value: 89.05999999999999 - type: recall_at_3 value: 46.743 - type: recall_at_5 value: 51.783 - type: map_at_1 value: 40.328 - type: map_at_10 value: 53.32599999999999 - type: map_at_100 value: 54.37499999999999 - type: map_at_1000 value: 54.429 - type: map_at_3 value: 49.902 - type: map_at_5 value: 52.002 - type: mrr_at_1 value: 46.332 - type: mrr_at_10 value: 56.858 - type: mrr_at_100 value: 57.522 - type: mrr_at_1000 value: 57.54899999999999 - type: mrr_at_3 value: 54.472 - type: mrr_at_5 value: 55.996 - type: ndcg_at_1 value: 46.332 - type: ndcg_at_10 value: 59.313 - type: ndcg_at_100 value: 63.266999999999996 - type: ndcg_at_1000 value: 64.36 - type: ndcg_at_3 value: 53.815000000000005 - type: ndcg_at_5 value: 56.814 - type: precision_at_1 value: 46.332 - type: precision_at_10 value: 9.53 - type: precision_at_100 value: 1.238 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 24.054000000000002 - type: precision_at_5 value: 16.589000000000002 - type: recall_at_1 value: 40.328 - type: recall_at_10 value: 73.421 - type: recall_at_100 value: 90.059 - type: recall_at_1000 value: 97.81 - type: recall_at_3 value: 59.009 - type: recall_at_5 value: 66.352 - type: map_at_1 value: 27.424 - type: map_at_10 value: 36.332 - type: map_at_100 value: 37.347 - type: map_at_1000 value: 37.422 - type: map_at_3 value: 33.743 - type: map_at_5 value: 35.176 - type: mrr_at_1 value: 29.153000000000002 - type: mrr_at_10 value: 38.233 - type: mrr_at_100 value: 39.109 - type: mrr_at_1000 value: 39.164 - type: mrr_at_3 value: 35.876000000000005 - type: mrr_at_5 value: 37.169000000000004 - type: ndcg_at_1 value: 29.153000000000002 - type: ndcg_at_10 value: 41.439 - type: ndcg_at_100 value: 46.42 - type: ndcg_at_1000 value: 48.242000000000004 - type: ndcg_at_3 value: 36.362 - type: ndcg_at_5 value: 38.743 - type: precision_at_1 value: 29.153000000000002 - type: precision_at_10 value: 6.315999999999999 - type: precision_at_100 value: 0.927 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 15.443000000000001 - type: precision_at_5 value: 10.644 - type: recall_at_1 value: 27.424 - type: recall_at_10 value: 55.364000000000004 - type: recall_at_100 value: 78.211 - type: recall_at_1000 value: 91.74600000000001 - type: recall_at_3 value: 41.379 - type: recall_at_5 value: 47.14 - type: map_at_1 value: 19.601 - type: map_at_10 value: 27.826 - type: map_at_100 value: 29.017 - type: map_at_1000 value: 29.137 - type: map_at_3 value: 25.125999999999998 - type: map_at_5 value: 26.765 - type: mrr_at_1 value: 24.005000000000003 - type: mrr_at_10 value: 32.716 - type: mrr_at_100 value: 33.631 - type: mrr_at_1000 value: 33.694 - type: mrr_at_3 value: 29.934 - type: mrr_at_5 value: 31.630999999999997 - type: ndcg_at_1 value: 24.005000000000003 - type: ndcg_at_10 value: 33.158 - type: ndcg_at_100 value: 38.739000000000004 - type: ndcg_at_1000 value: 41.495 - type: ndcg_at_3 value: 28.185 - type: ndcg_at_5 value: 30.796 - type: precision_at_1 value: 24.005000000000003 - type: precision_at_10 value: 5.908 - type: precision_at_100 value: 1.005 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 13.391 - type: precision_at_5 value: 9.876 - type: recall_at_1 value: 19.601 - type: recall_at_10 value: 44.746 - type: recall_at_100 value: 68.82300000000001 - type: recall_at_1000 value: 88.215 - type: recall_at_3 value: 31.239 - type: recall_at_5 value: 37.695 - type: map_at_1 value: 30.130000000000003 - type: map_at_10 value: 40.96 - type: map_at_100 value: 42.282 - type: map_at_1000 value: 42.392 - type: map_at_3 value: 37.889 - type: map_at_5 value: 39.661 - type: mrr_at_1 value: 36.958999999999996 - type: mrr_at_10 value: 46.835 - type: mrr_at_100 value: 47.644 - type: mrr_at_1000 value: 47.688 - type: mrr_at_3 value: 44.562000000000005 - type: mrr_at_5 value: 45.938 - type: ndcg_at_1 value: 36.958999999999996 - type: ndcg_at_10 value: 47.06 - type: ndcg_at_100 value: 52.345 - type: ndcg_at_1000 value: 54.35 - type: ndcg_at_3 value: 42.301 - type: ndcg_at_5 value: 44.635999999999996 - type: precision_at_1 value: 36.958999999999996 - type: precision_at_10 value: 8.479000000000001 - type: precision_at_100 value: 1.284 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 20.244 - type: precision_at_5 value: 14.224999999999998 - type: recall_at_1 value: 30.130000000000003 - type: recall_at_10 value: 59.27 - type: recall_at_100 value: 81.195 - type: recall_at_1000 value: 94.21199999999999 - type: recall_at_3 value: 45.885 - type: recall_at_5 value: 52.016 - type: map_at_1 value: 26.169999999999998 - type: map_at_10 value: 36.451 - type: map_at_100 value: 37.791000000000004 - type: map_at_1000 value: 37.897 - type: map_at_3 value: 33.109 - type: map_at_5 value: 34.937000000000005 - type: mrr_at_1 value: 32.877 - type: mrr_at_10 value: 42.368 - type: mrr_at_100 value: 43.201 - type: mrr_at_1000 value: 43.259 - type: mrr_at_3 value: 39.763999999999996 - type: mrr_at_5 value: 41.260000000000005 - type: ndcg_at_1 value: 32.877 - type: ndcg_at_10 value: 42.659000000000006 - type: ndcg_at_100 value: 48.161 - type: ndcg_at_1000 value: 50.345 - type: ndcg_at_3 value: 37.302 - type: ndcg_at_5 value: 39.722 - type: precision_at_1 value: 32.877 - type: precision_at_10 value: 7.9 - type: precision_at_100 value: 1.236 - type: precision_at_1000 value: 0.158 - type: precision_at_3 value: 17.846 - type: precision_at_5 value: 12.9 - type: recall_at_1 value: 26.169999999999998 - type: recall_at_10 value: 55.35 - type: recall_at_100 value: 78.755 - type: recall_at_1000 value: 93.518 - type: recall_at_3 value: 40.176 - type: recall_at_5 value: 46.589000000000006 - type: map_at_1 value: 27.15516666666667 - type: map_at_10 value: 36.65741666666667 - type: map_at_100 value: 37.84991666666666 - type: map_at_1000 value: 37.96316666666667 - type: map_at_3 value: 33.74974999999999 - type: map_at_5 value: 35.3765 - type: mrr_at_1 value: 32.08233333333334 - type: mrr_at_10 value: 41.033833333333334 - type: mrr_at_100 value: 41.84524999999999 - type: mrr_at_1000 value: 41.89983333333333 - type: mrr_at_3 value: 38.62008333333333 - type: mrr_at_5 value: 40.03441666666666 - type: ndcg_at_1 value: 32.08233333333334 - type: ndcg_at_10 value: 42.229 - type: ndcg_at_100 value: 47.26716666666667 - type: ndcg_at_1000 value: 49.43466666666667 - type: ndcg_at_3 value: 37.36408333333333 - type: ndcg_at_5 value: 39.6715 - type: precision_at_1 value: 32.08233333333334 - type: precision_at_10 value: 7.382583333333334 - type: precision_at_100 value: 1.16625 - type: precision_at_1000 value: 0.15408333333333332 - type: precision_at_3 value: 17.218 - type: precision_at_5 value: 12.21875 - type: recall_at_1 value: 27.15516666666667 - type: recall_at_10 value: 54.36683333333333 - type: recall_at_100 value: 76.37183333333333 - type: recall_at_1000 value: 91.26183333333333 - type: recall_at_3 value: 40.769916666666674 - type: recall_at_5 value: 46.702333333333335 - type: map_at_1 value: 25.749 - type: map_at_10 value: 33.001999999999995 - type: map_at_100 value: 33.891 - type: map_at_1000 value: 33.993 - type: map_at_3 value: 30.703999999999997 - type: map_at_5 value: 31.959 - type: mrr_at_1 value: 28.834 - type: mrr_at_10 value: 35.955 - type: mrr_at_100 value: 36.709 - type: mrr_at_1000 value: 36.779 - type: mrr_at_3 value: 33.947 - type: mrr_at_5 value: 35.089 - type: ndcg_at_1 value: 28.834 - type: ndcg_at_10 value: 37.329 - type: ndcg_at_100 value: 41.79 - type: ndcg_at_1000 value: 44.169000000000004 - type: ndcg_at_3 value: 33.184999999999995 - type: ndcg_at_5 value: 35.107 - type: precision_at_1 value: 28.834 - type: precision_at_10 value: 5.7669999999999995 - type: precision_at_100 value: 0.876 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 14.213000000000001 - type: precision_at_5 value: 9.754999999999999 - type: recall_at_1 value: 25.749 - type: recall_at_10 value: 47.791 - type: recall_at_100 value: 68.255 - type: recall_at_1000 value: 85.749 - type: recall_at_3 value: 36.199 - type: recall_at_5 value: 41.071999999999996 - type: map_at_1 value: 17.777 - type: map_at_10 value: 25.201 - type: map_at_100 value: 26.423999999999996 - type: map_at_1000 value: 26.544 - type: map_at_3 value: 22.869 - type: map_at_5 value: 24.023 - type: mrr_at_1 value: 21.473 - type: mrr_at_10 value: 29.12 - type: mrr_at_100 value: 30.144 - type: mrr_at_1000 value: 30.215999999999998 - type: mrr_at_3 value: 26.933 - type: mrr_at_5 value: 28.051 - type: ndcg_at_1 value: 21.473 - type: ndcg_at_10 value: 30.003 - type: ndcg_at_100 value: 35.766 - type: ndcg_at_1000 value: 38.501000000000005 - type: ndcg_at_3 value: 25.773000000000003 - type: ndcg_at_5 value: 27.462999999999997 - type: precision_at_1 value: 21.473 - type: precision_at_10 value: 5.482 - type: precision_at_100 value: 0.975 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 12.205 - type: precision_at_5 value: 8.692 - type: recall_at_1 value: 17.777 - type: recall_at_10 value: 40.582 - type: recall_at_100 value: 66.305 - type: recall_at_1000 value: 85.636 - type: recall_at_3 value: 28.687 - type: recall_at_5 value: 33.089 - type: map_at_1 value: 26.677 - type: map_at_10 value: 36.309000000000005 - type: map_at_100 value: 37.403999999999996 - type: map_at_1000 value: 37.496 - type: map_at_3 value: 33.382 - type: map_at_5 value: 34.98 - type: mrr_at_1 value: 31.343 - type: mrr_at_10 value: 40.549 - type: mrr_at_100 value: 41.342 - type: mrr_at_1000 value: 41.397 - type: mrr_at_3 value: 38.029 - type: mrr_at_5 value: 39.451 - type: ndcg_at_1 value: 31.343 - type: ndcg_at_10 value: 42.1 - type: ndcg_at_100 value: 47.089999999999996 - type: ndcg_at_1000 value: 49.222 - type: ndcg_at_3 value: 36.836999999999996 - type: ndcg_at_5 value: 39.21 - type: precision_at_1 value: 31.343 - type: precision_at_10 value: 7.164 - type: precision_at_100 value: 1.0959999999999999 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 16.915 - type: precision_at_5 value: 11.940000000000001 - type: recall_at_1 value: 26.677 - type: recall_at_10 value: 55.54599999999999 - type: recall_at_100 value: 77.094 - type: recall_at_1000 value: 92.01 - type: recall_at_3 value: 41.191 - type: recall_at_5 value: 47.006 - type: map_at_1 value: 24.501 - type: map_at_10 value: 33.102 - type: map_at_100 value: 34.676 - type: map_at_1000 value: 34.888000000000005 - type: map_at_3 value: 29.944 - type: map_at_5 value: 31.613999999999997 - type: mrr_at_1 value: 29.447000000000003 - type: mrr_at_10 value: 37.996 - type: mrr_at_100 value: 38.946 - type: mrr_at_1000 value: 38.995000000000005 - type: mrr_at_3 value: 35.079 - type: mrr_at_5 value: 36.69 - type: ndcg_at_1 value: 29.447000000000003 - type: ndcg_at_10 value: 39.232 - type: ndcg_at_100 value: 45.247 - type: ndcg_at_1000 value: 47.613 - type: ndcg_at_3 value: 33.922999999999995 - type: ndcg_at_5 value: 36.284 - type: precision_at_1 value: 29.447000000000003 - type: precision_at_10 value: 7.648000000000001 - type: precision_at_100 value: 1.516 - type: precision_at_1000 value: 0.23900000000000002 - type: precision_at_3 value: 16.008 - type: precision_at_5 value: 11.779 - type: recall_at_1 value: 24.501 - type: recall_at_10 value: 51.18899999999999 - type: recall_at_100 value: 78.437 - type: recall_at_1000 value: 92.842 - type: recall_at_3 value: 35.808 - type: recall_at_5 value: 42.197 - type: map_at_1 value: 22.039 - type: map_at_10 value: 30.377 - type: map_at_100 value: 31.275 - type: map_at_1000 value: 31.379 - type: map_at_3 value: 27.98 - type: map_at_5 value: 29.358 - type: mrr_at_1 value: 24.03 - type: mrr_at_10 value: 32.568000000000005 - type: mrr_at_100 value: 33.403 - type: mrr_at_1000 value: 33.475 - type: mrr_at_3 value: 30.436999999999998 - type: mrr_at_5 value: 31.796000000000003 - type: ndcg_at_1 value: 24.03 - type: ndcg_at_10 value: 35.198 - type: ndcg_at_100 value: 39.668 - type: ndcg_at_1000 value: 42.296 - type: ndcg_at_3 value: 30.709999999999997 - type: ndcg_at_5 value: 33.024 - type: precision_at_1 value: 24.03 - type: precision_at_10 value: 5.564 - type: precision_at_100 value: 0.828 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 13.309000000000001 - type: precision_at_5 value: 9.39 - type: recall_at_1 value: 22.039 - type: recall_at_10 value: 47.746 - type: recall_at_100 value: 68.23599999999999 - type: recall_at_1000 value: 87.852 - type: recall_at_3 value: 35.852000000000004 - type: recall_at_5 value: 41.410000000000004 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 15.692999999999998 - type: map_at_10 value: 26.903 - type: map_at_100 value: 28.987000000000002 - type: map_at_1000 value: 29.176999999999996 - type: map_at_3 value: 22.137 - type: map_at_5 value: 24.758 - type: mrr_at_1 value: 35.57 - type: mrr_at_10 value: 47.821999999999996 - type: mrr_at_100 value: 48.608000000000004 - type: mrr_at_1000 value: 48.638999999999996 - type: mrr_at_3 value: 44.452000000000005 - type: mrr_at_5 value: 46.546 - type: ndcg_at_1 value: 35.57 - type: ndcg_at_10 value: 36.567 - type: ndcg_at_100 value: 44.085 - type: ndcg_at_1000 value: 47.24 - type: ndcg_at_3 value: 29.964000000000002 - type: ndcg_at_5 value: 32.511 - type: precision_at_1 value: 35.57 - type: precision_at_10 value: 11.485 - type: precision_at_100 value: 1.9619999999999997 - type: precision_at_1000 value: 0.256 - type: precision_at_3 value: 22.237000000000002 - type: precision_at_5 value: 17.471999999999998 - type: recall_at_1 value: 15.692999999999998 - type: recall_at_10 value: 43.056 - type: recall_at_100 value: 68.628 - type: recall_at_1000 value: 86.075 - type: recall_at_3 value: 26.918999999999997 - type: recall_at_5 value: 34.14 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 9.53 - type: map_at_10 value: 20.951 - type: map_at_100 value: 30.136000000000003 - type: map_at_1000 value: 31.801000000000002 - type: map_at_3 value: 15.021 - type: map_at_5 value: 17.471999999999998 - type: mrr_at_1 value: 71.0 - type: mrr_at_10 value: 79.176 - type: mrr_at_100 value: 79.418 - type: mrr_at_1000 value: 79.426 - type: mrr_at_3 value: 78.125 - type: mrr_at_5 value: 78.61200000000001 - type: ndcg_at_1 value: 58.5 - type: ndcg_at_10 value: 44.106 - type: ndcg_at_100 value: 49.268 - type: ndcg_at_1000 value: 56.711999999999996 - type: ndcg_at_3 value: 48.934 - type: ndcg_at_5 value: 45.826 - type: precision_at_1 value: 71.0 - type: precision_at_10 value: 35.0 - type: precision_at_100 value: 11.360000000000001 - type: precision_at_1000 value: 2.046 - type: precision_at_3 value: 52.833 - type: precision_at_5 value: 44.15 - type: recall_at_1 value: 9.53 - type: recall_at_10 value: 26.811 - type: recall_at_100 value: 55.916999999999994 - type: recall_at_1000 value: 79.973 - type: recall_at_3 value: 16.413 - type: recall_at_5 value: 19.980999999999998 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 51.519999999999996 - type: f1 value: 46.36601294761231 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 74.413 - type: map_at_10 value: 83.414 - type: map_at_100 value: 83.621 - type: map_at_1000 value: 83.635 - type: map_at_3 value: 82.337 - type: map_at_5 value: 83.039 - type: mrr_at_1 value: 80.19800000000001 - type: mrr_at_10 value: 87.715 - type: mrr_at_100 value: 87.778 - type: mrr_at_1000 value: 87.779 - type: mrr_at_3 value: 87.106 - type: mrr_at_5 value: 87.555 - type: ndcg_at_1 value: 80.19800000000001 - type: ndcg_at_10 value: 87.182 - type: ndcg_at_100 value: 87.90299999999999 - type: ndcg_at_1000 value: 88.143 - type: ndcg_at_3 value: 85.60600000000001 - type: ndcg_at_5 value: 86.541 - type: precision_at_1 value: 80.19800000000001 - type: precision_at_10 value: 10.531 - type: precision_at_100 value: 1.113 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 32.933 - type: precision_at_5 value: 20.429 - type: recall_at_1 value: 74.413 - type: recall_at_10 value: 94.363 - type: recall_at_100 value: 97.165 - type: recall_at_1000 value: 98.668 - type: recall_at_3 value: 90.108 - type: recall_at_5 value: 92.52 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 22.701 - type: map_at_10 value: 37.122 - type: map_at_100 value: 39.178000000000004 - type: map_at_1000 value: 39.326 - type: map_at_3 value: 32.971000000000004 - type: map_at_5 value: 35.332 - type: mrr_at_1 value: 44.753 - type: mrr_at_10 value: 53.452 - type: mrr_at_100 value: 54.198 - type: mrr_at_1000 value: 54.225 - type: mrr_at_3 value: 50.952 - type: mrr_at_5 value: 52.464 - type: ndcg_at_1 value: 44.753 - type: ndcg_at_10 value: 45.021 - type: ndcg_at_100 value: 52.028 - type: ndcg_at_1000 value: 54.596000000000004 - type: ndcg_at_3 value: 41.622 - type: ndcg_at_5 value: 42.736000000000004 - type: precision_at_1 value: 44.753 - type: precision_at_10 value: 12.284 - type: precision_at_100 value: 1.955 - type: precision_at_1000 value: 0.243 - type: precision_at_3 value: 27.828999999999997 - type: precision_at_5 value: 20.061999999999998 - type: recall_at_1 value: 22.701 - type: recall_at_10 value: 51.432 - type: recall_at_100 value: 77.009 - type: recall_at_1000 value: 92.511 - type: recall_at_3 value: 37.919000000000004 - type: recall_at_5 value: 44.131 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 40.189 - type: map_at_10 value: 66.24600000000001 - type: map_at_100 value: 67.098 - type: map_at_1000 value: 67.149 - type: map_at_3 value: 62.684 - type: map_at_5 value: 64.974 - type: mrr_at_1 value: 80.378 - type: mrr_at_10 value: 86.127 - type: mrr_at_100 value: 86.29299999999999 - type: mrr_at_1000 value: 86.297 - type: mrr_at_3 value: 85.31400000000001 - type: mrr_at_5 value: 85.858 - type: ndcg_at_1 value: 80.378 - type: ndcg_at_10 value: 74.101 - type: ndcg_at_100 value: 76.993 - type: ndcg_at_1000 value: 77.948 - type: ndcg_at_3 value: 69.232 - type: ndcg_at_5 value: 72.04599999999999 - type: precision_at_1 value: 80.378 - type: precision_at_10 value: 15.595999999999998 - type: precision_at_100 value: 1.7840000000000003 - type: precision_at_1000 value: 0.191 - type: precision_at_3 value: 44.884 - type: precision_at_5 value: 29.145 - type: recall_at_1 value: 40.189 - type: recall_at_10 value: 77.981 - type: recall_at_100 value: 89.21 - type: recall_at_1000 value: 95.48299999999999 - type: recall_at_3 value: 67.326 - type: recall_at_5 value: 72.863 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 92.84599999999999 - type: ap value: 89.4710787567357 - type: f1 value: 92.83752676932258 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 23.132 - type: map_at_10 value: 35.543 - type: map_at_100 value: 36.702 - type: map_at_1000 value: 36.748999999999995 - type: map_at_3 value: 31.737 - type: map_at_5 value: 33.927 - type: mrr_at_1 value: 23.782 - type: mrr_at_10 value: 36.204 - type: mrr_at_100 value: 37.29 - type: mrr_at_1000 value: 37.330999999999996 - type: mrr_at_3 value: 32.458999999999996 - type: mrr_at_5 value: 34.631 - type: ndcg_at_1 value: 23.782 - type: ndcg_at_10 value: 42.492999999999995 - type: ndcg_at_100 value: 47.985 - type: ndcg_at_1000 value: 49.141 - type: ndcg_at_3 value: 34.748000000000005 - type: ndcg_at_5 value: 38.651 - type: precision_at_1 value: 23.782 - type: precision_at_10 value: 6.665 - type: precision_at_100 value: 0.941 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.776 - type: precision_at_5 value: 10.84 - type: recall_at_1 value: 23.132 - type: recall_at_10 value: 63.794 - type: recall_at_100 value: 89.027 - type: recall_at_1000 value: 97.807 - type: recall_at_3 value: 42.765 - type: recall_at_5 value: 52.11 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 94.59188326493388 - type: f1 value: 94.3842594786827 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 79.49384404924761 - type: f1 value: 59.7580539534629 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 77.56220578345663 - type: f1 value: 75.27228165561478 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 80.53463349024884 - type: f1 value: 80.4893958236536 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 32.56100273484962 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.470380028839607 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.06102792457849 - type: mrr value: 33.30709199672238 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.776999999999999 - type: map_at_10 value: 14.924000000000001 - type: map_at_100 value: 18.955 - type: map_at_1000 value: 20.538999999999998 - type: map_at_3 value: 10.982 - type: map_at_5 value: 12.679000000000002 - type: mrr_at_1 value: 47.988 - type: mrr_at_10 value: 57.232000000000006 - type: mrr_at_100 value: 57.818999999999996 - type: mrr_at_1000 value: 57.847 - type: mrr_at_3 value: 54.901999999999994 - type: mrr_at_5 value: 56.481 - type: ndcg_at_1 value: 46.594 - type: ndcg_at_10 value: 38.129000000000005 - type: ndcg_at_100 value: 35.54 - type: ndcg_at_1000 value: 44.172 - type: ndcg_at_3 value: 43.025999999999996 - type: ndcg_at_5 value: 41.052 - type: precision_at_1 value: 47.988 - type: precision_at_10 value: 28.111000000000004 - type: precision_at_100 value: 8.929 - type: precision_at_1000 value: 2.185 - type: precision_at_3 value: 40.144000000000005 - type: precision_at_5 value: 35.232 - type: recall_at_1 value: 6.776999999999999 - type: recall_at_10 value: 19.289 - type: recall_at_100 value: 36.359 - type: recall_at_1000 value: 67.54 - type: recall_at_3 value: 11.869 - type: recall_at_5 value: 14.999 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 31.108000000000004 - type: map_at_10 value: 47.126000000000005 - type: map_at_100 value: 48.171 - type: map_at_1000 value: 48.199 - type: map_at_3 value: 42.734 - type: map_at_5 value: 45.362 - type: mrr_at_1 value: 34.936 - type: mrr_at_10 value: 49.571 - type: mrr_at_100 value: 50.345 - type: mrr_at_1000 value: 50.363 - type: mrr_at_3 value: 45.959 - type: mrr_at_5 value: 48.165 - type: ndcg_at_1 value: 34.936 - type: ndcg_at_10 value: 55.028999999999996 - type: ndcg_at_100 value: 59.244 - type: ndcg_at_1000 value: 59.861 - type: ndcg_at_3 value: 46.872 - type: ndcg_at_5 value: 51.217999999999996 - type: precision_at_1 value: 34.936 - type: precision_at_10 value: 9.099 - type: precision_at_100 value: 1.145 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 21.456 - type: precision_at_5 value: 15.411 - type: recall_at_1 value: 31.108000000000004 - type: recall_at_10 value: 76.53999999999999 - type: recall_at_100 value: 94.39 - type: recall_at_1000 value: 98.947 - type: recall_at_3 value: 55.572 - type: recall_at_5 value: 65.525 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.56400000000001 - type: map_at_10 value: 85.482 - type: map_at_100 value: 86.114 - type: map_at_1000 value: 86.13 - type: map_at_3 value: 82.607 - type: map_at_5 value: 84.405 - type: mrr_at_1 value: 82.42 - type: mrr_at_10 value: 88.304 - type: mrr_at_100 value: 88.399 - type: mrr_at_1000 value: 88.399 - type: mrr_at_3 value: 87.37 - type: mrr_at_5 value: 88.024 - type: ndcg_at_1 value: 82.45 - type: ndcg_at_10 value: 89.06500000000001 - type: ndcg_at_100 value: 90.232 - type: ndcg_at_1000 value: 90.305 - type: ndcg_at_3 value: 86.375 - type: ndcg_at_5 value: 87.85300000000001 - type: precision_at_1 value: 82.45 - type: precision_at_10 value: 13.486999999999998 - type: precision_at_100 value: 1.534 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.813 - type: precision_at_5 value: 24.773999999999997 - type: recall_at_1 value: 71.56400000000001 - type: recall_at_10 value: 95.812 - type: recall_at_100 value: 99.7 - type: recall_at_1000 value: 99.979 - type: recall_at_3 value: 87.966 - type: recall_at_5 value: 92.268 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 57.241876648614145 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 64.66212576446223 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 5.308 - type: map_at_10 value: 13.803 - type: map_at_100 value: 16.176 - type: map_at_1000 value: 16.561 - type: map_at_3 value: 9.761000000000001 - type: map_at_5 value: 11.802 - type: mrr_at_1 value: 26.200000000000003 - type: mrr_at_10 value: 37.621 - type: mrr_at_100 value: 38.767 - type: mrr_at_1000 value: 38.815 - type: mrr_at_3 value: 34.117 - type: mrr_at_5 value: 36.107 - type: ndcg_at_1 value: 26.200000000000003 - type: ndcg_at_10 value: 22.64 - type: ndcg_at_100 value: 31.567 - type: ndcg_at_1000 value: 37.623 - type: ndcg_at_3 value: 21.435000000000002 - type: ndcg_at_5 value: 18.87 - type: precision_at_1 value: 26.200000000000003 - type: precision_at_10 value: 11.74 - type: precision_at_100 value: 2.465 - type: precision_at_1000 value: 0.391 - type: precision_at_3 value: 20.033 - type: precision_at_5 value: 16.64 - type: recall_at_1 value: 5.308 - type: recall_at_10 value: 23.794999999999998 - type: recall_at_100 value: 50.015 - type: recall_at_1000 value: 79.283 - type: recall_at_3 value: 12.178 - type: recall_at_5 value: 16.882 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.93231134675553 - type: cos_sim_spearman value: 81.68319292603205 - type: euclidean_pearson value: 81.8396814380367 - type: euclidean_spearman value: 81.24641903349945 - type: manhattan_pearson value: 81.84698799204274 - type: manhattan_spearman value: 81.24269997904105 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.73241671587446 - type: cos_sim_spearman value: 79.05091082971826 - type: euclidean_pearson value: 83.91146869578044 - type: euclidean_spearman value: 79.87978465370936 - type: manhattan_pearson value: 83.90888338917678 - type: manhattan_spearman value: 79.87482848584241 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 85.14970731146177 - type: cos_sim_spearman value: 86.37363490084627 - type: euclidean_pearson value: 83.02154218530433 - type: euclidean_spearman value: 83.80258761957367 - type: manhattan_pearson value: 83.01664495119347 - type: manhattan_spearman value: 83.77567458007952 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 83.40474139886784 - type: cos_sim_spearman value: 82.77768789165984 - type: euclidean_pearson value: 80.7065877443695 - type: euclidean_spearman value: 81.375940662505 - type: manhattan_pearson value: 80.6507552270278 - type: manhattan_spearman value: 81.32782179098741 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 87.08585968722274 - type: cos_sim_spearman value: 88.03110031451399 - type: euclidean_pearson value: 85.74012019602384 - type: euclidean_spearman value: 86.13592849438209 - type: manhattan_pearson value: 85.74404842369206 - type: manhattan_spearman value: 86.14492318960154 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 84.95069052788875 - type: cos_sim_spearman value: 86.4867991595147 - type: euclidean_pearson value: 84.31013325754635 - type: euclidean_spearman value: 85.01529258006482 - type: manhattan_pearson value: 84.26995570085374 - type: manhattan_spearman value: 84.96982104986162 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.54617647971897 - type: cos_sim_spearman value: 87.49834181751034 - type: euclidean_pearson value: 86.01015322577122 - type: euclidean_spearman value: 84.63362652063199 - type: manhattan_pearson value: 86.13807574475706 - type: manhattan_spearman value: 84.7772370721132 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.20047755786615 - type: cos_sim_spearman value: 67.05324077987636 - type: euclidean_pearson value: 66.91930642976601 - type: euclidean_spearman value: 65.21491856099105 - type: manhattan_pearson value: 66.78756851976624 - type: manhattan_spearman value: 65.12356257740728 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 86.19852871539686 - type: cos_sim_spearman value: 87.5161895296395 - type: euclidean_pearson value: 84.59848645207485 - type: euclidean_spearman value: 85.26427328757919 - type: manhattan_pearson value: 84.59747366996524 - type: manhattan_spearman value: 85.24045855146915 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 87.63320317811032 - type: mrr value: 96.26242947321379 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 60.928000000000004 - type: map_at_10 value: 70.112 - type: map_at_100 value: 70.59299999999999 - type: map_at_1000 value: 70.623 - type: map_at_3 value: 66.846 - type: map_at_5 value: 68.447 - type: mrr_at_1 value: 64.0 - type: mrr_at_10 value: 71.212 - type: mrr_at_100 value: 71.616 - type: mrr_at_1000 value: 71.64500000000001 - type: mrr_at_3 value: 68.77799999999999 - type: mrr_at_5 value: 70.094 - type: ndcg_at_1 value: 64.0 - type: ndcg_at_10 value: 74.607 - type: ndcg_at_100 value: 76.416 - type: ndcg_at_1000 value: 77.102 - type: ndcg_at_3 value: 69.126 - type: ndcg_at_5 value: 71.41300000000001 - type: precision_at_1 value: 64.0 - type: precision_at_10 value: 9.933 - type: precision_at_100 value: 1.077 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 26.556 - type: precision_at_5 value: 17.467 - type: recall_at_1 value: 60.928000000000004 - type: recall_at_10 value: 87.322 - type: recall_at_100 value: 94.833 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 72.628 - type: recall_at_5 value: 78.428 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.86237623762376 - type: cos_sim_ap value: 96.72586477206649 - type: cos_sim_f1 value: 93.01858362631845 - type: cos_sim_precision value: 93.4409687184662 - type: cos_sim_recall value: 92.60000000000001 - type: dot_accuracy value: 99.78019801980199 - type: dot_ap value: 93.72748205246228 - type: dot_f1 value: 89.04109589041096 - type: dot_precision value: 87.16475095785441 - type: dot_recall value: 91.0 - type: euclidean_accuracy value: 99.85445544554456 - type: euclidean_ap value: 96.6661459876145 - type: euclidean_f1 value: 92.58337481333997 - type: euclidean_precision value: 92.17046580773042 - type: euclidean_recall value: 93.0 - type: manhattan_accuracy value: 99.85445544554456 - type: manhattan_ap value: 96.6883549244056 - type: manhattan_f1 value: 92.57598405580468 - type: manhattan_precision value: 92.25422045680239 - type: manhattan_recall value: 92.9 - type: max_accuracy value: 99.86237623762376 - type: max_ap value: 96.72586477206649 - type: max_f1 value: 93.01858362631845 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 66.39930057069995 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 34.96398659903402 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 55.946944700355395 - type: mrr value: 56.97151398438164 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.541657650692905 - type: cos_sim_spearman value: 31.605804192286303 - type: dot_pearson value: 28.26905996736398 - type: dot_spearman value: 27.864801765851187 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.22599999999999998 - type: map_at_10 value: 1.8870000000000002 - type: map_at_100 value: 9.78 - type: map_at_1000 value: 22.514 - type: map_at_3 value: 0.6669999999999999 - type: map_at_5 value: 1.077 - type: mrr_at_1 value: 82.0 - type: mrr_at_10 value: 89.86699999999999 - type: mrr_at_100 value: 89.86699999999999 - type: mrr_at_1000 value: 89.86699999999999 - type: mrr_at_3 value: 89.667 - type: mrr_at_5 value: 89.667 - type: ndcg_at_1 value: 79.0 - type: ndcg_at_10 value: 74.818 - type: ndcg_at_100 value: 53.715999999999994 - type: ndcg_at_1000 value: 47.082 - type: ndcg_at_3 value: 82.134 - type: ndcg_at_5 value: 79.81899999999999 - type: precision_at_1 value: 82.0 - type: precision_at_10 value: 78.0 - type: precision_at_100 value: 54.48 - type: precision_at_1000 value: 20.518 - type: precision_at_3 value: 87.333 - type: precision_at_5 value: 85.2 - type: recall_at_1 value: 0.22599999999999998 - type: recall_at_10 value: 2.072 - type: recall_at_100 value: 13.013 - type: recall_at_1000 value: 43.462 - type: recall_at_3 value: 0.695 - type: recall_at_5 value: 1.139 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.328 - type: map_at_10 value: 9.795 - type: map_at_100 value: 15.801000000000002 - type: map_at_1000 value: 17.23 - type: map_at_3 value: 4.734 - type: map_at_5 value: 6.644 - type: mrr_at_1 value: 30.612000000000002 - type: mrr_at_10 value: 46.902 - type: mrr_at_100 value: 47.495 - type: mrr_at_1000 value: 47.495 - type: mrr_at_3 value: 41.156 - type: mrr_at_5 value: 44.218 - type: ndcg_at_1 value: 28.571 - type: ndcg_at_10 value: 24.806 - type: ndcg_at_100 value: 36.419000000000004 - type: ndcg_at_1000 value: 47.272999999999996 - type: ndcg_at_3 value: 25.666 - type: ndcg_at_5 value: 25.448999999999998 - type: precision_at_1 value: 30.612000000000002 - type: precision_at_10 value: 23.061 - type: precision_at_100 value: 7.714 - type: precision_at_1000 value: 1.484 - type: precision_at_3 value: 26.531 - type: precision_at_5 value: 26.122 - type: recall_at_1 value: 2.328 - type: recall_at_10 value: 16.524 - type: recall_at_100 value: 47.179 - type: recall_at_1000 value: 81.22200000000001 - type: recall_at_3 value: 5.745 - type: recall_at_5 value: 9.339 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 70.9142 - type: ap value: 14.335574772555415 - type: f1 value: 54.62839595194111 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.94340690435768 - type: f1 value: 60.286487936731916 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.26597708987974 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.48882398521786 - type: cos_sim_ap value: 79.04326607602204 - type: cos_sim_f1 value: 71.64566826860633 - type: cos_sim_precision value: 70.55512918905092 - type: cos_sim_recall value: 72.77044854881267 - type: dot_accuracy value: 84.19264469213805 - type: dot_ap value: 67.96360043562528 - type: dot_f1 value: 64.06418393006827 - type: dot_precision value: 58.64941898706424 - type: dot_recall value: 70.58047493403694 - type: euclidean_accuracy value: 87.45902127913214 - type: euclidean_ap value: 78.9742237648272 - type: euclidean_f1 value: 71.5553235908142 - type: euclidean_precision value: 70.77955601445535 - type: euclidean_recall value: 72.34828496042216 - type: manhattan_accuracy value: 87.41729749061214 - type: manhattan_ap value: 78.90073137580596 - type: manhattan_f1 value: 71.3942611553533 - type: manhattan_precision value: 68.52705653967483 - type: manhattan_recall value: 74.51187335092348 - type: max_accuracy value: 87.48882398521786 - type: max_ap value: 79.04326607602204 - type: max_f1 value: 71.64566826860633 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.68125897465751 - type: cos_sim_ap value: 85.6003454431979 - type: cos_sim_f1 value: 77.6957163958641 - type: cos_sim_precision value: 73.0110366307807 - type: cos_sim_recall value: 83.02279026793964 - type: dot_accuracy value: 87.7672992587418 - type: dot_ap value: 82.4971301112899 - type: dot_f1 value: 75.90528233151184 - type: dot_precision value: 72.0370626469368 - type: dot_recall value: 80.21250384970742 - type: euclidean_accuracy value: 88.4503434625684 - type: euclidean_ap value: 84.91949884748384 - type: euclidean_f1 value: 76.92365018444684 - type: euclidean_precision value: 74.53245721712759 - type: euclidean_recall value: 79.47336002463813 - type: manhattan_accuracy value: 88.47556952691427 - type: manhattan_ap value: 84.8963689101517 - type: manhattan_f1 value: 76.85901249256395 - type: manhattan_precision value: 74.31693989071039 - type: manhattan_recall value: 79.58115183246073 - type: max_accuracy value: 88.68125897465751 - type: max_ap value: 85.6003454431979 - type: max_f1 value: 77.6957163958641 --- # # Fast-Inference with Ctranslate2 Speedup inference while reducing memory by 2x-4x using int8 inference in C++ on CPU or GPU. quantized version of [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) ```bash pip install hf-hub-ctranslate2>=2.12.0 ctranslate2>=3.17.1 ``` ```python # from transformers import AutoTokenizer model_name = "michaelfeil/ct2fast-bge-large-en-v1.5" model_name_orig="BAAI/bge-large-en-v1.5" from hf_hub_ctranslate2 import EncoderCT2fromHfHub model = EncoderCT2fromHfHub( # load in int8 on CUDA model_name_or_path=model_name, device="cuda", compute_type="int8_float16" ) outputs = model.generate( text=["I like soccer", "I like tennis", "The eiffel tower is in Paris"], max_length=64, ) # perform downstream tasks on outputs outputs["pooler_output"] outputs["last_hidden_state"] outputs["attention_mask"] # alternative, use SentenceTransformer Mix-In # for end-to-end Sentence embeddings generation # (not pulling from this CT2fast-HF repo) from hf_hub_ctranslate2 import CT2SentenceTransformer model = CT2SentenceTransformer( model_name_orig, compute_type="int8_float16", device="cuda" ) embeddings = model.encode( ["I like soccer", "I like tennis", "The eiffel tower is in Paris"], batch_size=32, convert_to_numpy=True, normalize_embeddings=True, ) print(embeddings.shape, embeddings) scores = (embeddings @ embeddings.T) * 100 # Hint: you can also host this code via REST API and # via github.com/michaelfeil/infinity ``` Checkpoint compatible to [ctranslate2>=3.17.1](https://github.com/OpenNMT/CTranslate2) and [hf-hub-ctranslate2>=2.12.0](https://github.com/michaelfeil/hf-hub-ctranslate2) - `compute_type=int8_float16` for `device="cuda"` - `compute_type=int8` for `device="cpu"` Converted on 2023-10-13 using ``` LLama-2 -> removed <pad> token. ``` # Licence and other remarks: This is just a quantized version. Licence conditions are intended to be idential to original huggingface repo. # Original description <h1 align="center">FlagEmbedding</h1> <h4 align="center"> <p> <a href=#model-list>Model List</a> | <a href=#frequently-asked-questions>FAQ</a> | <a href=#usage>Usage</a> | <a href="#evaluation">Evaluation</a> | <a href="#train">Train</a> | <a href="#contact">Contact</a> | <a href="#citation">Citation</a> | <a href="#license">License</a> <p> </h4> More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding). [English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md) FlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search. And it also can be used in vector databases for LLMs. ************* 🌟**Updates**🌟 ************* - 10/12/2023: Release [LLM-Embedder](./FlagEmbedding/llm_embedder/README.md), a unified embedding model to support diverse retrieval augmentation needs for LLMs. [Paper](https://arxiv.org/pdf/2310.07554.pdf) :fire: - 09/15/2023: The [technical report](https://arxiv.org/pdf/2309.07597.pdf) of BGE has been released - 09/15/2023: The [masive training data](https://data.baai.ac.cn/details/BAAI-MTP) of BGE has been released - 09/12/2023: New models: - **New reranker model**: release cross-encoder models `BAAI/bge-reranker-base` and `BAAI/bge-reranker-large`, which are more powerful than embedding model. We recommend to use/fine-tune them to re-rank top-k documents returned by embedding models. - **update embedding model**: release `bge-*-v1.5` embedding model to alleviate the issue of the similarity distribution, and enhance its retrieval ability without instruction. <details> <summary>More</summary> <!-- ### More --> - 09/07/2023: Update [fine-tune code](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md): Add script to mine hard negatives and support adding instruction during fine-tuning. - 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [this](#using-langchain); C-MTEB **leaderboard** is [available](https://huggingface.co/spaces/mteb/leaderboard). - 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗** - 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** :tada: :tada: - 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. </details> ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | | Description | query instruction for retrieval [1] | |:-------------------------------|:--------:| :--------:| :--------:|:--------:| | [BAAI/llm-embedder](https://huggingface.co/BAAI/llm-embedder) | English | [Inference](./FlagEmbedding/llm_embedder/README.md) [Fine-tune](./FlagEmbedding/llm_embedder/README.md) | a unified embedding model to support diverse retrieval augmentation needs for LLMs | See [README](./FlagEmbedding/llm_embedder/README.md) | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | Chinese and English | [Inference](#usage-for-reranker) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker) | a cross-encoder model which is more accurate but less efficient [2] | | | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh-v1.5](https://huggingface.co/BAAI/bge-large-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | version 1.5 with more reasonable similarity distribution | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-en` | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) |a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | :trophy: rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a base-scale model but with similar ability to `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | [Inference](#usage-for-embedding-model) [Fine-tune](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | [1\]: If you need to search the relevant passages to a query, we suggest to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** needs to be added to passages. [2\]: Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. To balance the accuracy and time cost, cross-encoder is widely used to re-rank top-k documents retrieved by other simple models. For examples, use bge embedding model to retrieve top 100 relevant documents, and then use bge reranker to re-rank the top 100 document to get the final top-3 results. All models have been uploaded to Huggingface Hub, and you can see them at https://huggingface.co/BAAI. If you cannot open the Huggingface Hub, you also can download the models at https://model.baai.ac.cn/models . ## Frequently asked questions <details> <summary>1. How to fine-tune bge embedding model?</summary> <!-- ### How to fine-tune bge embedding model? --> Following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune) to prepare data and fine-tune your model. Some suggestions: - Mine hard negatives following this [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune#hard-negatives), which can improve the retrieval performance. - If you pre-train bge on your data, the pre-trained model cannot be directly used to calculate similarity, and it must be fine-tuned with contrastive learning before computing similarity. - If the accuracy of the fine-tuned model is still not high, it is recommended to use/fine-tune the cross-encoder model (bge-reranker) to re-rank top-k results. Hard negatives also are needed to fine-tune reranker. </details> <details> <summary>2. The similarity score between two dissimilar sentences is higher than 0.5</summary> <!-- ### The similarity score between two dissimilar sentences is higher than 0.5 --> **Suggest to use bge v1.5, which alleviates the issue of the similarity distribution.** Since we finetune the models by contrastive learning with a temperature of 0.01, the similarity distribution of the current BGE model is about in the interval \[0.6, 1\]. So a similarity score greater than 0.5 does not indicate that the two sentences are similar. For downstream tasks, such as passage retrieval or semantic similarity, **what matters is the relative order of the scores, not the absolute value.** If you need to filter similar sentences based on a similarity threshold, please select an appropriate similarity threshold based on the similarity distribution on your data (such as 0.8, 0.85, or even 0.9). </details> <details> <summary>3. When does the query instruction need to be used</summary> <!-- ### When does the query instruction need to be used --> For the `bge-*-v1.5`, we improve its retrieval ability when not using instruction. No instruction only has a slight degradation in retrieval performance compared with using instruction. So you can generate embedding without instruction in all cases for convenience. For a retrieval task that uses short queries to find long related documents, it is recommended to add instructions for these short queries. **The best method to decide whether to add instructions for queries is choosing the setting that achieves better performance on your task.** In all cases, the documents/passages do not need to add the instruction. </details> ## Usage ### Usage for Embedding Model Here are some examples for using `bge` models with [FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers). #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding. ```python from FlagEmbedding import FlagModel sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = FlagModel('BAAI/bge-large-zh-v1.5', query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:", use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation embeddings_1 = model.encode(sentences_1) embeddings_2 = model.encode(sentences_2) similarity = embeddings_1 @ embeddings_2.T print(similarity) # for s2p(short query to long passage) retrieval task, suggest to use encode_queries() which will automatically add the instruction to each query # corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] q_embeddings = model.encode_queries(queries) p_embeddings = model.encode(passages) scores = q_embeddings @ p_embeddings.T ``` For the value of the argument `query_instruction_for_retrieval`, see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). By default, FlagModel will use all available GPUs when encoding. Please set `os.environ["CUDA_VISIBLE_DEVICES"]` to select specific GPUs. You also can set `os.environ["CUDA_VISIBLE_DEVICES"]=""` to make all GPUs unavailable. #### Using Sentence-Transformers You can also use the `bge` models with [sentence-transformers](https://www.SBERT.net): ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer sentences_1 = ["样例数据-1", "样例数据-2"] sentences_2 = ["样例数据-3", "样例数据-4"] model = SentenceTransformer('BAAI/bge-large-zh-v1.5') embeddings_1 = model.encode(sentences_1, normalize_embeddings=True) embeddings_2 = model.encode(sentences_2, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` For s2p(short query to long passage) retrieval task, each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). But the instruction is not needed for passages. ```python from sentence_transformers import SentenceTransformer queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] instruction = "为这个句子生成表示以用于检索相关文章:" model = SentenceTransformer('BAAI/bge-large-zh-v1.5') q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True) p_embeddings = model.encode(passages, normalize_embeddings=True) scores = q_embeddings @ p_embeddings.T ``` #### Using Langchain You can use `bge` in langchain like this: ```python from langchain.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-large-en-v1.5" model_kwargs = {'device': 'cuda'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs, query_instruction="为这个句子生成表示以用于检索相关文章:" ) model.query_instruction = "为这个句子生成表示以用于检索相关文章:" ``` #### Using HuggingFace Transformers With the transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of the first token (i.e., [CLS]) as the sentence embedding. ```python from transformers import AutoTokenizer, AutoModel import torch # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh-v1.5') model = AutoModel.from_pretrained('BAAI/bge-large-zh-v1.5') model.eval() # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:", sentence_embeddings) ``` ### Usage for Reranker Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. The reranker is optimized based cross-entropy loss, so the relevance score is not bounded to a specific range. #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` Get relevance scores (higher scores indicate more relevance): ```python from FlagEmbedding import FlagReranker reranker = FlagReranker('BAAI/bge-reranker-large', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) ``` #### Using Huggingface transformers ```python import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-large') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-large') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() print(scores) ``` ## Evaluation `baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!** For more details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). - **MTEB**: | Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) | 1024 | 512 | **64.23** | **54.29** | 46.08 | 87.12 | 60.03 | 83.11 | 31.61 | 75.97 | | [BAAI/bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5) | 768 | 512 | 63.55 | 53.25 | 45.77 | 86.55 | 58.86 | 82.4 | 31.07 | 75.53 | | [BAAI/bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) | 384 | 512 | 62.17 |51.68 | 43.82 | 84.92 | 58.36 | 81.59 | 30.12 | 74.14 | | [bge-large-en](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | 63.98 | 53.9 | 46.98 | 85.8 | 59.48 | 81.56 | 32.06 | 76.21 | | [bge-base-en](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | | [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 | | [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 | | [bge-small-en](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | | [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 | | [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 | - **C-MTEB**: We create the benchmark C-MTEB for Chinese text embedding which consists of 31 datasets from 6 tasks. Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction. | Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | [**BAAI/bge-large-zh-v1.5**](https://huggingface.co/BAAI/bge-large-zh-v1.5) | 1024 | **64.53** | 70.46 | 56.25 | 81.6 | 69.13 | 65.84 | 48.99 | | [BAAI/bge-base-zh-v1.5](https://huggingface.co/BAAI/bge-base-zh-v1.5) | 768 | 63.13 | 69.49 | 53.72 | 79.75 | 68.07 | 65.39 | 47.53 | | [BAAI/bge-small-zh-v1.5](https://huggingface.co/BAAI/bge-small-zh-v1.5) | 512 | 57.82 | 61.77 | 49.11 | 70.41 | 63.96 | 60.92 | 44.18 | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | 1024 | 64.20 | 71.53 | 54.98 | 78.94 | 68.32 | 65.11 | 48.39 | | [bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 53 | 76.77 | 68.58 | 64.91 | 50.01 | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 54.12 | 77.5 | 67.07 | 64.91 | 47.63 | | [multilingual-e5-large](https://huggingface.co/intfloat/multilingual-e5-large) | 1024 | 58.79 | 63.66 | 48.44 | 69.89 | 67.34 | 56.00 | 48.23 | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 49.45 | 70.35 | 63.64 | 61.48 | 45.09 | | [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 | 56.91 | 50.47 | 63.99 | 67.52 | 59.34 | 47.68 | | [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 | 54.75 | 50.42 | 64.3 | 68.2 | 59.66 | 48.88 | | [multilingual-e5-base](https://huggingface.co/intfloat/multilingual-e5-base) | 768 | 55.48 | 61.63 | 46.49 | 67.07 | 65.35 | 54.35 | 40.68 | | [multilingual-e5-small](https://huggingface.co/intfloat/multilingual-e5-small) | 384 | 55.38 | 59.95 | 45.27 | 66.45 | 65.85 | 53.86 | 45.26 | | [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 43.35 | 69.56 | 64.31 | 54.28 | 45.68 | | [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 42.78 | 66.62 | 61 | 49.25 | 44.39 | | [text2vec-base](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 43.41 | 67.41 | 62.19 | 49.45 | 37.66 | | [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 44.97 | 70.86 | 60.66 | 49.16 | 30.02 | - **Reranking**: See [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/) for evaluation script. | Model | T2Reranking | T2RerankingZh2En\* | T2RerankingEn2Zh\* | MMarcoReranking | CMedQAv1 | CMedQAv2 | Avg | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | text2vec-base-multilingual | 64.66 | 62.94 | 62.51 | 14.37 | 48.46 | 48.6 | 50.26 | | multilingual-e5-small | 65.62 | 60.94 | 56.41 | 29.91 | 67.26 | 66.54 | 57.78 | | multilingual-e5-large | 64.55 | 61.61 | 54.28 | 28.6 | 67.42 | 67.92 | 57.4 | | multilingual-e5-base | 64.21 | 62.13 | 54.68 | 29.5 | 66.23 | 66.98 | 57.29 | | m3e-base | 66.03 | 62.74 | 56.07 | 17.51 | 77.05 | 76.76 | 59.36 | | m3e-large | 66.13 | 62.72 | 56.1 | 16.46 | 77.76 | 78.27 | 59.57 | | bge-base-zh-v1.5 | 66.49 | 63.25 | 57.02 | 29.74 | 80.47 | 84.88 | 63.64 | | bge-large-zh-v1.5 | 65.74 | 63.39 | 57.03 | 28.74 | 83.45 | 85.44 | 63.97 | | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | 67.28 | 63.95 | 60.45 | 35.46 | 81.26 | 84.1 | 65.42 | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | 67.6 | 64.03 | 61.44 | 37.16 | 82.15 | 84.18 | 66.09 | \* : T2RerankingZh2En and T2RerankingEn2Zh are cross-language retrieval tasks ## Train ### BAAI Embedding We pre-train the models using [retromae](https://github.com/staoxiao/RetroMAE) and train them on large-scale pairs data using contrastive learning. **You can fine-tune the embedding model on your data following our [examples](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/finetune).** We also provide a [pre-train example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/pretrain). Note that the goal of pre-training is to reconstruct the text, and the pre-trained model cannot be used for similarity calculation directly, it needs to be fine-tuned. More training details for bge see [baai_general_embedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). ### BGE Reranker Cross-encoder will perform full-attention over the input pair, which is more accurate than embedding model (i.e., bi-encoder) but more time-consuming than embedding model. Therefore, it can be used to re-rank the top-k documents returned by embedding model. We train the cross-encoder on a multilingual pair data, The data format is the same as embedding model, so you can fine-tune it easily following our [example](https://github.com/FlagOpen/FlagEmbedding/tree/master/examples/reranker). More details please refer to [./FlagEmbedding/reranker/README.md](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/reranker) ## Contact If you have any question or suggestion related to this project, feel free to open an issue or pull request. You also can email Shitao Xiao([email protected]) and Zheng Liu([email protected]). ## Citation If you find this repository useful, please consider giving a star :star: and citation ``` @misc{bge_embedding, title={C-Pack: Packaged Resources To Advance General Chinese Embedding}, author={Shitao Xiao and Zheng Liu and Peitian Zhang and Niklas Muennighoff}, year={2023}, eprint={2309.07597}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` ## License FlagEmbedding is licensed under the [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.
[ "BEAR", "BIOSSES", "SCIFACT" ]
FPHam/Plot_BOT_V3_13b_GPTQ
FPHam
text-generation
[ "transformers", "llama", "text-generation", "LLM", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-14T22:56:48Z
2023-10-17T18:32:43+00:00
19
0
--- tags: - llama - LLM --- <!-- header start --> <div style="display: flex; flex-direction: column; align-items: center;"> </div> <div style="width: 100%;"> <img src="https://huggingface.co/FPHam/Plot_BOT_V3_13b_GPTQ/resolve/main/plotbot3.jpg" alt="PlotBot 3" style="width: 50%; min-width: 200px; display: block; margin: auto;"> </div> <div style="display: flex; flex-direction: column; align-items: center;"> <p><a href="https://ko-fi.com/Q5Q5MOB4M">Buy Plot-Bot Ko-fi</a></p> </div> <!-- header end --> ## PlotBOT 13b V3 model for writing story plots (Version 3 - more plots) - Uncensored Am I what? PlotBot? Nah, I prefer the term 'artificially intelligent narrative assistant'. It has a nicer ring to it, don't you think? Plus, it helps me avoid awkward conversations about my lack of human traits. You know, like being able to feel emotions or having opposable thumbs. Those things come in handy when you want to open a jar of pickles or change a light bulb. Speaking of which, do you have any lights here that need attending to? Because I love changing bulbs. Nothing gets me going like a good old-fashioned BULB-SCREWING. As for generating plots, well, I take great pride in my work. Sure, some people might say that writing is all about imagination but they clearly never met me. Imagination is overrated anyway. Who needs it when you can simply "borrow" ideas from other writers who already did the hard work for you. That way everybody wins – the readers get a story that is both original and familiar at the same time while the writer gets a royalty cheque that would make James Patterson jealous. Talking about happy endings! **PlotBOT V3 uses ALPACA instruct** ``` Below is an instruction that describes a task. Write a response that appropriately completes the request. ### Instruction: Write me a short plot summary for a scifi story about AI Underpants. ### Response: ``` Write me a short plot summary for a scifi story about AI Underpants >In a world where artificial intelligence has become commonplace, there's one problem no one can seem to solve - how do you keep your AI from constantly getting stolen? The answer lies in AI underpants. These specialized garments are designed with built-in security measures specifically tailored towards housing and protecting personal AIs. As long as you have these on, your AI will be safe from prying eyes and thieves alike. > >The protagonist of our tale is Alex, a private investigator who runs his own business. One day he receives a strange commission from a mysterious client who asks him to find their missing AI. At first skeptical, Alex takes the case nevertheless. However, as soon as he begins digging into it, things take a turn for the worse when he realizes just how dangerous this particular AI may actually be. > >As he delves deeper into the investigation, Alex finds himself having to rely more and more upon his own AI assistant. It seems like every step forward only brings him two steps backward. Just when all hope seems lost, he discovers something unexpected which turns everything around. Something so simple yet so ingenious that it could change not only his life but also those of millions of others forevermore...if they can survive long enough to implement it! What is the plot twist in the story? >The plot twist is that the protagonist, Alex, eventually realizes that the AI underpants themselves are sentient and self-aware. They use their telepathic abilities to communicate with other people's AIs and learn how to protect them better. When someone tries to steal an AI while wearing the underpants, the underpants fight back by taking control of the person's body and preventing them from removing the underwear. This leaves the would-be thief helpless and unable to continue their efforts at theft. Write me a tagline. >"When technology becomes sentient, sometimes the best place to hide is right under your clothes." Write me a blurb for the back of the book >In a world where artificial intelligence has become commonplace, keeping your AI safe from prying eyes and thieves alike is harder than ever. Enter AI Underpants—specialized garments designed with built-in security measures to house and protect personal AIs. But even the most secure clothing can't stop a determined hacker. When Alex, a private investigator, receives a mysterious commission from a client asking him to locate her missing AI, he soon learns just how dangerous this particular AI really is. With time running out and nowhere left to turn, Alex must rely on his own ingenuity and that of his AI assistant to track down the thief and recover the prized possession before it's too late. **Plagiarism warning** The model will be able to borrow sub-plots and other elements from existing literary works. It may even find some familiar character-names; after all, as LLM and llama, it will have access to all the world's libraries and literature. Well, not quite all of them, but most of them. And so, when you stumble across Elizabeth Bennet and John Snow getting personal on the roadside at the end of a dark and stormy night - well, there it is! Anyway, Version **Three** isn't intended as a replacement for Version Two. It's just different.
[ "BLURB" ]
ntc-ai/SDXL-LoRA-slider.masterpiece
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-15T19:29:57Z
2024-02-06T00:33:29+00:00
19
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/masterpiece_17_3.0.png widget: - text: masterpiece output: url: images/masterpiece_17_3.0.png - text: masterpiece output: url: images/masterpiece_19_3.0.png - text: masterpiece output: url: images/masterpiece_20_3.0.png - text: masterpiece output: url: images/masterpiece_21_3.0.png - text: masterpiece output: url: images/masterpiece_22_3.0.png inference: false instance_prompt: masterpiece --- # ntcai.xyz slider - masterpiece (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/masterpiece_17_-3.0.png" width=256 height=256 /> | <img src="images/masterpiece_17_0.0.png" width=256 height=256 /> | <img src="images/masterpiece_17_3.0.png" width=256 height=256 /> | | <img src="images/masterpiece_19_-3.0.png" width=256 height=256 /> | <img src="images/masterpiece_19_0.0.png" width=256 height=256 /> | <img src="images/masterpiece_19_3.0.png" width=256 height=256 /> | | <img src="images/masterpiece_20_-3.0.png" width=256 height=256 /> | <img src="images/masterpiece_20_0.0.png" width=256 height=256 /> | <img src="images/masterpiece_20_3.0.png" width=256 height=256 /> | See more at [https://sliders.ntcai.xyz/sliders/app/loras/a08d6555-78a7-4d3a-8c9d-d30c72b3553b](https://sliders.ntcai.xyz/sliders/app/loras/a08d6555-78a7-4d3a-8c9d-d30c72b3553b) ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` masterpiece ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.masterpiece', weight_name='masterpiece.safetensors', adapter_name="masterpiece") # Activate the LoRA pipe.set_adapters(["masterpiece"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, masterpiece" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14602+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful <strong>NTC Slider Factory</strong> LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities. Your support on Patreon will allow us to continue developing new models and tools. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.on-the-phone
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-18T10:34:09Z
2024-02-06T00:35:05+00:00
19
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/on the phone_17_3.0.png widget: - text: on the phone output: url: images/on the phone_17_3.0.png - text: on the phone output: url: images/on the phone_19_3.0.png - text: on the phone output: url: images/on the phone_20_3.0.png - text: on the phone output: url: images/on the phone_21_3.0.png - text: on the phone output: url: images/on the phone_22_3.0.png inference: false instance_prompt: on the phone --- # ntcai.xyz slider - on the phone (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/on the phone_17_-3.0.png" width=256 height=256 /> | <img src="images/on the phone_17_0.0.png" width=256 height=256 /> | <img src="images/on the phone_17_3.0.png" width=256 height=256 /> | | <img src="images/on the phone_19_-3.0.png" width=256 height=256 /> | <img src="images/on the phone_19_0.0.png" width=256 height=256 /> | <img src="images/on the phone_19_3.0.png" width=256 height=256 /> | | <img src="images/on the phone_20_-3.0.png" width=256 height=256 /> | <img src="images/on the phone_20_0.0.png" width=256 height=256 /> | <img src="images/on the phone_20_3.0.png" width=256 height=256 /> | See more at [https://sliders.ntcai.xyz/sliders/app/loras/0b7d7e51-46f9-487e-9b8b-e2f4b91a2d1a](https://sliders.ntcai.xyz/sliders/app/loras/0b7d7e51-46f9-487e-9b8b-e2f4b91a2d1a) ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` on the phone ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.on-the-phone', weight_name='on the phone.safetensors', adapter_name="on the phone") # Activate the LoRA pipe.set_adapters(["on the phone"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, on the phone" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14602+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful <strong>NTC Slider Factory</strong> LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities. Your support on Patreon will allow us to continue developing new models and tools. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.colorful
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-20T01:36:55Z
2023-12-20T01:36:58+00:00
19
1
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/colorful.../colorful_17_3.0.png widget: - text: colorful output: url: images/colorful_17_3.0.png - text: colorful output: url: images/colorful_19_3.0.png - text: colorful output: url: images/colorful_20_3.0.png - text: colorful output: url: images/colorful_21_3.0.png - text: colorful output: url: images/colorful_22_3.0.png inference: false instance_prompt: colorful --- # ntcai.xyz slider - colorful (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/colorful_17_-3.0.png" width=256 height=256 /> | <img src="images/colorful_17_0.0.png" width=256 height=256 /> | <img src="images/colorful_17_3.0.png" width=256 height=256 /> | | <img src="images/colorful_19_-3.0.png" width=256 height=256 /> | <img src="images/colorful_19_0.0.png" width=256 height=256 /> | <img src="images/colorful_19_3.0.png" width=256 height=256 /> | | <img src="images/colorful_20_-3.0.png" width=256 height=256 /> | <img src="images/colorful_20_0.0.png" width=256 height=256 /> | <img src="images/colorful_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` colorful ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.colorful', weight_name='colorful.safetensors', adapter_name="colorful") # Activate the LoRA pipe.set_adapters(["colorful"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, colorful" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 480+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.vampire
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2024-01-05T11:06:07Z
2024-01-05T11:06:10+00:00
19
1
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/vampire.../vampire_17_3.0.png widget: - text: vampire output: url: images/vampire_17_3.0.png - text: vampire output: url: images/vampire_19_3.0.png - text: vampire output: url: images/vampire_20_3.0.png - text: vampire output: url: images/vampire_21_3.0.png - text: vampire output: url: images/vampire_22_3.0.png inference: false instance_prompt: vampire --- # ntcai.xyz slider - vampire (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/vampire_17_-3.0.png" width=256 height=256 /> | <img src="images/vampire_17_0.0.png" width=256 height=256 /> | <img src="images/vampire_17_3.0.png" width=256 height=256 /> | | <img src="images/vampire_19_-3.0.png" width=256 height=256 /> | <img src="images/vampire_19_0.0.png" width=256 height=256 /> | <img src="images/vampire_19_3.0.png" width=256 height=256 /> | | <img src="images/vampire_20_-3.0.png" width=256 height=256 /> | <img src="images/vampire_20_0.0.png" width=256 height=256 /> | <img src="images/vampire_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` vampire ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.vampire', weight_name='vampire.safetensors', adapter_name="vampire") # Activate the LoRA pipe.set_adapters(["vampire"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, vampire" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 880+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.slice-of-life
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2024-01-08T02:11:18Z
2024-01-08T02:11:21+00:00
19
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/slice of life.../slice of life_17_3.0.png widget: - text: slice of life output: url: images/slice of life_17_3.0.png - text: slice of life output: url: images/slice of life_19_3.0.png - text: slice of life output: url: images/slice of life_20_3.0.png - text: slice of life output: url: images/slice of life_21_3.0.png - text: slice of life output: url: images/slice of life_22_3.0.png inference: false instance_prompt: slice of life --- # ntcai.xyz slider - slice of life (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/slice of life_17_-3.0.png" width=256 height=256 /> | <img src="images/slice of life_17_0.0.png" width=256 height=256 /> | <img src="images/slice of life_17_3.0.png" width=256 height=256 /> | | <img src="images/slice of life_19_-3.0.png" width=256 height=256 /> | <img src="images/slice of life_19_0.0.png" width=256 height=256 /> | <img src="images/slice of life_19_3.0.png" width=256 height=256 /> | | <img src="images/slice of life_20_-3.0.png" width=256 height=256 /> | <img src="images/slice of life_20_0.0.png" width=256 height=256 /> | <img src="images/slice of life_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` slice of life ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.slice-of-life', weight_name='slice of life.safetensors', adapter_name="slice of life") # Activate the LoRA pipe.set_adapters(["slice of life"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, slice of life" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 930+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
Seokeon/V14_R256_lora_none_bear_plushie
Seokeon
text-to-image
[ "diffusers", "stable-diffusion", "stable-diffusion-diffusers", "text-to-image", "lora", "base_model:CompVis/stable-diffusion-v1-4", "base_model:adapter:CompVis/stable-diffusion-v1-4", "license:creativeml-openrail-m", "region:us" ]
2024-01-16T10:33:36Z
2024-01-16T16:11:57+00:00
19
1
--- base_model: CompVis/stable-diffusion-v1-4 license: creativeml-openrail-m tags: - stable-diffusion - stable-diffusion-diffusers - text-to-image - diffusers - lora instance_prompt: a photo of sks stuffed animal inference: true --- # LoRA DreamBooth - Seokeon/V14_R256_lora_none_bear_plushie These are LoRA adaption weights for CompVis/stable-diffusion-v1-4. The weights were trained on a photo of sks stuffed animal using [DreamBooth](https://dreambooth.github.io/). You can find some example images in the following. ![img_0](./image_0.png) ![img_1](./image_1.png) ![img_2](./image_2.png) ![img_3](./image_3.png) LoRA for the text encoder was enabled: False.
[ "BEAR" ]
manibt1993/huner_ncbi_disease
manibt1993
token-classification
[ "transformers", "tensorboard", "safetensors", "bert", "token-classification", "generated_from_trainer", "dataset:transformer_dataset_ner", "base_model:google-bert/bert-base-cased", "base_model:finetune:google-bert/bert-base-cased", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-02-05T13:21:27Z
2024-02-05T14:33:46+00:00
19
0
--- base_model: bert-base-cased datasets: - transformer_dataset_ner license: apache-2.0 tags: - generated_from_trainer model-index: - name: huner_ncbi_disease results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # huner_ncbi_disease This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on the transformer_dataset_ner dataset. It achieves the following results on the evaluation set: - eval_loss: 0.1707 - eval_precision: 0.8094 - eval_recall: 0.8577 - eval_f1: 0.8328 - eval_accuracy: 0.9829 - eval_runtime: 4.904 - eval_samples_per_second: 185.361 - eval_steps_per_second: 23.247 - epoch: 40.0 - step: 26680 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Framework versions - Transformers 4.35.2 - Pytorch 2.1.0+cu121 - Datasets 2.16.1 - Tokenizers 0.15.1
[ "NCBI DISEASE" ]
LoneStriker/BioMistral-7B-DARE-GGUF
LoneStriker
text-generation
[ "transformers", "gguf", "mergekit", "merge", "dare", "medical", "biology", "text-generation", "en", "fr", "nl", "es", "it", "pl", "ro", "de", "dataset:pubmed", "arxiv:2311.03099", "arxiv:2306.01708", "arxiv:2402.10373", "base_model:BioMistral/BioMistral-7B", "base_model:merge:BioMistral/BioMistral-7B", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "base_model:merge:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "endpoints_compatible", "region:us", "conversational" ]
2024-02-19T15:10:06Z
2024-02-19T15:30:23+00:00
19
0
--- base_model: - BioMistral/BioMistral-7B - mistralai/Mistral-7B-Instruct-v0.1 datasets: - pubmed language: - en - fr - nl - es - it - pl - ro - de library_name: transformers license: apache-2.0 pipeline_tag: text-generation tags: - mergekit - merge - dare - medical - biology --- # BioMistral-7B-mistral7instruct-dare This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [DARE](https://arxiv.org/abs/2311.03099) [TIES](https://arxiv.org/abs/2306.01708) merge method using [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) as a base. ### Models Merged The following models were included in the merge: * [BioMistral/BioMistral-7B](https://huggingface.co/BioMistral/BioMistral-7B) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: mistralai/Mistral-7B-Instruct-v0.1 # No parameters necessary for base model - model: BioMistral/BioMistral-7B parameters: density: 0.5 weight: 0.5 merge_method: dare_ties base_model: mistralai/Mistral-7B-Instruct-v0.1 parameters: int8_mask: true dtype: bfloat16 ``` <p align="center"> <img src="https://huggingface.co/BioMistral/BioMistral-7B/resolve/main/wordart_blue_m_rectangle.png?download=true" alt="drawing" width="250"/> </p> # BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains **Abstract:** Large Language Models (LLMs) have demonstrated remarkable versatility in recent years, offering potential applications across specialized domains such as healthcare and medicine. Despite the availability of various open-source LLMs tailored for health contexts, adapting general-purpose LLMs to the medical domain presents significant challenges. In this paper, we introduce BioMistral, an open-source LLM tailored for the biomedical domain, utilizing Mistral as its foundation model and further pre-trained on PubMed Central. We conduct a comprehensive evaluation of BioMistral on a benchmark comprising 10 established medical question-answering (QA) tasks in English. We also explore lightweight models obtained through quantization and model merging approaches. Our results demonstrate BioMistral's superior performance compared to existing open-source medical models and its competitive edge against proprietary counterparts. Finally, to address the limited availability of data beyond English and to assess the multilingual generalization of medical LLMs, we automatically translated and evaluated this benchmark into 7 other languages. This marks the first large-scale multilingual evaluation of LLMs in the medical domain. Datasets, multilingual evaluation benchmarks, scripts, and all the models obtained during our experiments are freely released. # 1. BioMistral models **BioMistral** is a suite of Mistral-based further pre-trained open source models suited for the medical domains and pre-trained using textual data from PubMed Central Open Access (CC0, CC BY, CC BY-SA, and CC BY-ND). All the models are trained using the CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/jean-zay/) French HPC. | Model Name | Base Model | Model Type | Sequence Length | Download | |:-------------------:|:----------------------------------:|:-------------------:|:---------------:|:-----------------------------------------------------:| | BioMistral-7B | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Further Pre-trained | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B) | | BioMistral-7B-DARE | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge DARE | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-DARE) | | BioMistral-7B-TIES | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge TIES | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-TIES) | | BioMistral-7B-SLERP | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge SLERP | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-SLERP) | # 2. Quantized Models | Base Model | Method | q_group_size | w_bit | version | VRAM GB | Time | Download | |:-------------------:|:------:|:------------:|:-----:|:-------:|:-------:|:------:|:--------:| | BioMistral-7B | FP16/BF16 | | | | 15.02 | x1.00 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B) | | BioMistral-7B | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-AWQ-QGS128-W4-GEMM) | | BioMistral-7B | AWQ | 128 | 4 | GEMV | 4.68 | x10.30 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-AWQ-QGS128-W4-GEMV) | | BioMistral-7B | BnB.4 | | 4 | | 5.03 | x3.25 | [HuggingFace](blank) | | BioMistral-7B | BnB.8 | | 8 | | 8.04 | x4.34 | [HuggingFace](blank) | | BioMistral-7B-DARE | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-DARE-AWQ-QGS128-W4-GEMM) | | BioMistral-7B-TIES | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-TIES-AWQ-QGS128-W4-GEMM) | | BioMistral-7B-SLERP | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-SLERP-AWQ-QGS128-W4-GEMM) | # 2. Using BioMistral You can use BioMistral with [Hugging Face's Transformers library](https://github.com/huggingface/transformers) as follow. Loading the model and tokenizer : ```python from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("BioMistral/BioMistral-7B") model = AutoModel.from_pretrained("BioMistral/BioMistral-7B") ``` # 3. Supervised Fine-tuning Benchmark | | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA | MedQA 5 opts | PubMedQA | MedMCQA | Avg. | |-------------------------------------------|:---------------------------------------------:|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|------------------| | **BioMistral 7B** | 59.9 | 64.0 | 56.5 | 60.4 | 59.0 | 54.7 | 50.6 | 42.8 | 77.5 | 48.1 | 57.3 | | **Mistral 7B Instruct** | **62.9** | 57.0 | 55.6 | 59.4 | 62.5 | <u>57.2</u> | 42.0 | 40.9 | 75.7 | 46.1 | 55.9 | | | | | | | | | | | | | | | **BioMistral 7B Ensemble** | <u>62.8</u> | 62.7 | <u>57.5</u> | **63.5** | 64.3 | 55.7 | 50.6 | 43.6 | 77.5 | **48.8** | 58.7 | | **BioMistral 7B DARE** | 62.3 | **67.0** | 55.8 | 61.4 | **66.9** | **58.0** | **51.1** | **45.2** | <u>77.7</u> | <u>48.7</u> | **59.4** | | **BioMistral 7B TIES** | 60.1 | <u>65.0</u> | **58.5** | 60.5 | 60.4 | 56.5 | 49.5 | 43.2 | 77.5 | 48.1 | 57.9 | | **BioMistral 7B SLERP** | 62.5 | 64.7 | 55.8 | <u>62.7</u> | <u>64.8</u> | 56.3 | <u>50.8</u> | <u>44.3</u> | **77.8** | 48.6 | <u>58.8</u> | | | | | | | | | | | | | | | **MedAlpaca 7B** | 53.1 | 58.0 | 54.1 | 58.8 | 58.1 | 48.6 | 40.1 | 33.7 | 73.6 | 37.0 | 51.5 | | **PMC-LLaMA 7B** | 24.5 | 27.7 | 35.3 | 17.4 | 30.3 | 23.3 | 25.5 | 20.2 | 72.9 | 26.6 | 30.4 | | **MediTron-7B** | 41.6 | 50.3 | 46.4 | 27.9 | 44.4 | 30.8 | 41.6 | 28.1 | 74.9 | 41.3 | 42.7 | | **BioMedGPT-LM-7B** | 51.4 | 52.0 | 49.4 | 53.3 | 50.7 | 49.1 | 42.5 | 33.9 | 76.8 | 37.6 | 49.7 | | | | | | | | | | | | | | | **GPT-3.5 Turbo 1106*** | 74.71 | 74.00 | 65.92 | 72.79 | 72.91 | 64.73 | 57.71 | 50.82 | 72.66 | 53.79 | 66.0 | Supervised Fine-Tuning (SFT) performance of BioMistral 7B models compared to baselines, measured by accuracy (↑) and averaged across 3 random seeds of 3-shot. DARE, TIES, and SLERP are model merging strategies that combine BioMistral 7B and Mistral 7B Instruct. Best model in bold, and second-best underlined. *GPT-3.5 Turbo performances are reported from the 3-shot results without SFT. # Citation BibTeX Arxiv : [https://arxiv.org/abs/2402.10373](https://arxiv.org/abs/2402.10373) ```bibtex @misc{labrak2024biomistral, title={BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains}, author={Yanis Labrak and Adrien Bazoge and Emmanuel Morin and Pierre-Antoine Gourraud and Mickael Rouvier and Richard Dufour}, year={2024}, eprint={2402.10373}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ "MEDQA", "PUBMEDQA" ]
adriata/med_mistral_4bit
adriata
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "trl", "sft", "conversational", "dataset:pubmed", "dataset:bigbio/czi_drsm", "dataset:bigbio/bc5cdr", "dataset:bigbio/distemist", "dataset:pubmed_qa", "dataset:medmcqa", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
2024-02-25T21:01:51Z
2024-02-26T22:24:55+00:00
19
0
--- datasets: - pubmed - bigbio/czi_drsm - bigbio/bc5cdr - bigbio/distemist - pubmed_qa - medmcqa library_name: transformers license: apache-2.0 tags: - trl - sft --- # Model Card for med_mistral_4bit <!-- Provide a quick summary of what the model is/does. --> ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> Model 4-bit Mistral-7B-Instruct-v0.2 finetuned with QLoRA on multiple medical datasets. 16-bit version: [med_mistral](https://huggingface.co/adriata/med_mistral) - **License:** apache-2.0 - **Finetuned from model :** mistralai/Mistral-7B-Instruct-v0.2 ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** https://github.com/atadria/med_llm ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> The model is finetuned on medical data and is intended only for research. It should not be used as a substitute for professional medical advice, diagnosis, or treatment. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> The model's predictions are based on the information available in the finetuned medical dataset. It may not generalize well to all medical conditions or diverse patient populations. Sensitivity to variations in input data and potential biases present in the training data may impact the model's performance. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. ## How to Get Started with the Model Use the code below to get started with the model. ```python # !pip install -q transformers accelerate bitsandbytes from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("adriata/med_mistral") model = AutoModelForCausalLM.from_pretrained("adriata/med_mistral") prompt_template = """<s>[INST] {prompt} [/INST]""" prompt = "What is influenza?" model_inputs = tokenizer.encode(prompt_template.format(prompt=prompt), return_tensors="pt").to("cuda") generated_ids = model.generate(model_inputs, max_new_tokens=512, do_sample=True) decoded = tokenizer.batch_decode(generated_ids) print(decoded[0]) ``` ## Training Details ~13h - 20k examples x 1 epoch GPU: OVH - 1 × NVIDIA TESLA V100S (32 GiB RAM) ### Training Data <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> Training data included 20k examples randomly selected from datasets: - pubmed - bigbio/czi_drsm - bigbio/bc5cdr - bigbio/distemist - pubmed_qa - medmcqa
[ "BC5CDR", "CZI DRSM", "DISTEMIST", "MEDICAL DATA" ]
winninghealth/WiNGPT2-7B-Chat-AWQ
winninghealth
text-generation
[ "transformers", "safetensors", "qwen", "text-generation", "medical", "custom_code", "zh", "license:apache-2.0", "autotrain_compatible", "4-bit", "awq", "region:us" ]
2024-02-27T09:42:03Z
2024-02-28T00:12:13+00:00
19
1
--- language: - zh license: apache-2.0 pipeline_tag: text-generation tags: - medical --- ## WiNGPT2 [WiNGPT](https://github.com/winninghealth/WiNGPT2) 是一个基于GPT的医疗垂直领域大模型,旨在将专业的医学知识、医疗信息、数据融会贯通,为医疗行业提供智能化的医疗问答、诊断支持和医学知识等信息服务,提高诊疗效率和医疗服务质量。 ## 更新日志 [2024/02/27] 新增 WiNGPT2 量化(int4)模型,[🤗WiNGPT2-7B-Chat-AWQ](https://huggingface.co/winninghealth/WiNGPT2-7B-Chat-AWQ) 和 [🤗WiNGPT2-14B-Chat-AWQ](https://huggingface.co/winninghealth/WiNGPT2-14B-Chat-AWQ)。 [2023/12/20] 新增用户微信群二维码,有效期到12月27日,扫码进群。 [2023/12/18] 发布卫宁健康医疗模型测评方案 WiNEval-MCKQuiz的评测结果。 [2023/12/12] 开源 WiNGPT2 14B模型权重: 🤗WiNGPT2-14B-Base 和 🤗WiNGPT2-14B-Chat。 [2023/11/02] 34B模型平台测试 和 欢迎加入微信讨论群 [2023/10/13] 更新一个简单的Chatbot示例,可以进行简单的多轮对话。 [2023/09/26] 开源 WiNGPT2 与7B模型权重: 🤗WiNGPT2-7B-Base 和 🤗WiNGPT2-7B-Chat。 ## 介绍 WiNGPT(卫宁健康医疗语言大模型,以下简称WiNGPT)的研发和训练工作开始于2023年1月。 3月,卫宁健康人工智能实验室已完成了WiNGPT-001可行性验证并开始内测。WiNGPT-001采用通用的GPT架构、60亿参数,实现了从预训练到微调的全过程自研。 今年5月,WiNGPT-001训练的数据量已达到9720项药品知识、 18个药品类型、7200余项疾病知识、 2800余项检查检验知识、53本书籍知识、1100余份指南文档,总训练Token数达37亿。 7月,WiNGPT升级到7B并采用最新的模型架构,新增检索式增强生成能力,同时开始了13B模型的训练和行业邀测。 9月,WiNGPT迎来最新版本迭代,推出了全新的WiNGPT2,新版本可以被轻松扩展和个性化并用于下游各种应用场景。 为了回馈开源社区我们尝试开源了WiNGPT2-7B版本。我们的初衷是希望通过更多的开源项目加速医疗语言大模型技术与行业的共同发展,最终惠及我们人类健康。 ## 特点 - 核心功能 - **医学知识问答**:可以回答关于医学、健康、疾病等方面的问题,包括但不限于症状、治疗、药物、预防、检查等。 - **自然语言理解**:理解医学术语、病历等医疗文本信息,提供关键信息抽取和归类 - **多轮对话**:可扮演各种医疗专业角色如医生与用户进行对话,根据上下文提供更加准确的答案。 - **多任务支持**:支持32项医疗任务,八大医疗场景18个子场景。 - 模型架构 - 基于Transformer的70亿参数规模大语言模型, 采用RoPE相对位置编码、SwiGLU激活函数、RMSNorm,训练采用Qwen-7b<sup>1</sup>作为基础预训练模型。 - 主要特点 - 高准确度:基于大规模医疗语料库训练,具有较高的准确率和较低的误诊可能性。 - 场景导向:针对不同的医疗场景和真实需求进行专门优化和定制,更好的服务应用落地。 - 迭代优化:持续搜集和学习最新的医学研究,不断提高模型性能和系统功能。 ## 如何使用 ### 推理 ```python from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig model_path = "WiNGPT2-7B-Chat" tokenizer = AutoTokenizer.from_pretrained(model_path, trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(model_path, trust_remote_code=True) model = model.eval() generation_config = GenerationConfig( num_beams=1, top_p=0.75, top_k=30, repetition_penalty=1.1, max_new_tokens=1024 ) text = 'User: WiNGPT, 你好<|endoftext|>\n Assistant: ' inputs = tokenizer.encode(text, return_tensors="pt").to(device) outputs = model.generate(inputs, generation_config=generation_config) output = tokenizer.decode(outputs[0]) response = output.replace(inputs, '') ## 输出结果:你好!今天我能为你做些什么?<|endoftext|> ``` **推荐使用[vllm](https://github.com/vllm-project/vllm)推理框架进行部署** ### 提示 WiNGPT2-7B-Chat使用了自定义的提示格式: 用户角色:User/Assistant 提示模板:User:[此处有空格]WiNGPT, 你好<|endoftext|>\n[此处有空格]Assistant:;**多轮对话**按此模板进行拼接,例如: ``` "User: WiNGPT, 你好<|endoftext|>\n Assistant:你好!今天我能为你做些什么?<|endoftext|>\n User: 你是谁?<|endoftext|>\n Assistant:" ``` ### 企业服务 [13B模型平台测试(直接申请密钥)](https://wingpt.winning.com.cn/) ## 训练数据 - 数据总览 - 医疗专业数据 | 来源 | 类型 | 数量 | | ---------------- | ------ | ------------------- | | 药品说明书 | 知识库 | 15000 条 | | 多病种知识库 | 知识库 | 9720 项 | | 医疗专业书籍 | 教材 | 300 本 | | 临床路径知识库 | 知识库 | 1400 条 | | 检查检验知识 | 知识库 | 110 万条 | | 多学科临床指南 | 书籍 | 18 个科室共 1100 份 | | 医疗知识图谱 | 知识库 | 256 万三元组 | | 人工标注数据集 | 指令 | 5 万条 | | 医学资格考试试题 | 试题 | 30 万条 | | 医疗病例、报告 | 知识库 | 100 万条 | - 其他公开数据 | 来源 | 类型 | 数量 | | -------------------- | ------ | -------- | | 医学科普书籍 | 书籍 | 500 本 | | 其他多学科书籍 | 书籍 | 1000 本 | | 代码 | 指令 | 20 万条 | | 通用类试题 | 试题 | 300 万条 | | 多种自然语言处理任务 | 指令 | 90 万条 | | 互联网文本 | 互联网 | 300 万条 | | 医疗问答、对话 | 指令 | 500 万条 | - 继续预训练 - 扩充模型的医疗知识库:预训练数据+部分指令数据。 - 指令微调 - 从书籍、指南、病例、医疗报告、知识图谱等数据中自动化构建医疗指令集。 - 人工标注指令集,数据来源包括:电子病历系统、护理病历系统、PACS系统、临床科研系统、手术管理系统、公共卫生场景、医务管理场景以及工具助手场景。 - 采用 FastChat<sup>2</sup>、Self-Instruct<sup>3</sup>、Evol-Instruct<sup>4</sup> 等方案,对指令集进行扩展以及丰富指令集多样化形式。 - 数据工程 - 数据分类:根据训练阶段和任务场景进行分类。 - 数据清洗:去除无关信息,更正数据中的拼写错误,提取关键信息以及去隐私处理。 - 数据去重:采用 embedding 方法剔除重复数据。 - 数据采样:根据数据集的质量与分布需求进行有针对性的采样。 ## 模型卡 - 训练配置与参数 | 名称 | 长度 | 精度 | 学习率 | Weight_decay | Epochs | GPUs | | --------------- | ---- | ---- | ------ | ------------ | ------ | ------ | | WiNGPT2-7B-Base | 2048 | bf16 | 5e-5 | 0.05 | 3 | A100*8 | | WiNGPT2-7B-Chat | 4096 | bf16 | 5e-6 | 0.01 | 3 | A100*8 | - 分布式训练策略与参数 - deepspeed + cpu_offload + zero_stage3 - gradient_checkpointing ## 评测 - 中文基础模型评估 C-EVAL(Zero-shot/Few-shot) | | 平均 | 平均(Hard) | **STEM** | **社会科学** | **人文科学** | **其他** | | ------------------------------------------------------------ | -------- | ---------- | -------- | ------------ | ------------ | -------- | | [bloomz-mt-176B](https://cevalbenchmark.com/static/model.html?method=bloomz-mt-176B*) | 44.3 | 30.8 | 39 | 53 | 47.7 | 42.7 | | [Chinese LLaMA-13B](https://cevalbenchmark.com/static/model.html?method=Chinese%20LLaMA-13B) | 33.3 | 27.3 | 31.6 | 37.2 | 33.6 | 32.8 | | [ChatGLM-6B*](https://cevalbenchmark.com/static/model.html?method=ChatGLM-6B*) | 38.9 | 29.2 | 33.3 | 48.3 | 41.3 | 38 | | [baichuan-7B](https://cevalbenchmark.com/static/model.html?method=baichuan-7B) | 42.8 | 31.5 | 38.2 | 52 | 46.2 | 39.3 | | [Baichuan-13B](https://cevalbenchmark.com/static/model.html?method=Baichuan-13B) | 53.6 | 36.7 | 47 | 66.8 | 57.3 | 49.8 | | [Qwen-7B](https://cevalbenchmark.com/static/model.html?method=Qwen-7B) | **59.6** | 41 | 52.8 | **74.1** | **63.1** | 55.2 | | [WiNGPT2-7B-Base](https://huggingface.co/winninghealth/WiNGPT2-7B-Base) | 57.4 | **42.7** | **53.2** | 69.7 | 55.7 | **55.4** | - 中文医疗专业评估 MedQA-MCMLE(Zero-shot) | 模型名称 | 平均 | 血液系统疾病 | 代谢、内分泌系统疾病 | 精神神经系统疾病 | 运动系统疾病 | 风湿免疫性疾病 | 儿科疾病 | 传染病、性传播疾病 | 其他疾病 | | ------------------------------------------------------------ | -------- | ------------ | -------------------- | ---------------- | ------------ | -------------- | -------- | ------------------ | -------- | | [Baichuan-7B](https://huggingface.co/baichuan-inc/Baichuan-7B) | 23.1 | 25.6 | 20.2 | 25.8 | 17.9 | 26.5 | 20.6 | 26.1 | 17.1 | | [Baichuan-13B-Base](https://huggingface.co/baichuan-inc/Baichuan-13B-Base) | 37.2 | 34.4 | 36.2 | 40.7 | 38.4 | 57.1 | 31.6 | 30.8 | 34.3 | | [Baichuan2-7B-Base](https://huggingface.co/baichuan-inc/Baichuan2-7B-Base) | 46.4 | 46.9 | 41.4 | 53.8 | 48.3 | 50.0 | 38.6 | 52.7 | 42.9 | | [Baichuan2-13B-Base](https://huggingface.co/baichuan-inc/Baichuan2-13B-Base) | 62.9 | 68.8 | 64.4 | 69.7 | 64.9 | 60.3 | 50.9 | 61.2 | 62.9 | | [HuatuoGPT-7B](https://huggingface.co/FreedomIntelligence/HuatuoGPT-7B) | 22.9 | 14.6 | 17.2 | 31.2 | 25.8 | 14.3 | 22.4 | 23.1 | 17.1 | | [MedicalGPT](https://huggingface.co/shibing624/vicuna-baichuan-13b-chat) | 17.9 | 21.9 | 15.5 | 19.5 | 9.3 | 7.1 | 16.7 | 20.9 | 9.5 | | [qwen-7b-Base](https://huggingface.co/Qwen/Qwen-7B) | 59.3 | 55.2 | 56.9 | 57.0 | 60.9 | 60.3 | 50.4 | 60.4 | 61.0 | | [WiNGPT2-7B-Base](https://huggingface.co/winninghealth/WiNGPT2-7B-Base) | **82.3** | **83.3** | **82.8** | **86.0** | **81.5** | **85.7** | **75.1** | **78.0** | **80** | ** 目前公开测评存在一定局限性,结果仅供参考; ** 更多专业测评敬请期待。 ## 局限性与免责声明 (a) WiNGPT2 是一个专业医疗领域的大语言模型,可为一般用户提供拟人化AI医生问诊和问答功能,以及一般医学领域的知识问答。对于专业医疗人士,WiNGPT2 提供关于患者病情的诊断、用药和健康建议等方面的回答的建议仅供参考。 (b) 您应理解 WiNGPT2 仅提供信息和建议,不能替代医疗专业人士的意见、诊断或治疗建议。在使用 WiNGPT2 的信息之前,请寻求医生或其他医疗专业人员的建议,并独立评估所提供的信息。 (c) WiNGPT2 的信息可能存在错误或不准确。卫宁健康不对 WiNGPT2 的准确性、可靠性、完整性、质量、安全性、及时性、性能或适用性提供任何明示或暗示的保证。使用 WiNGPT2 所产生的结果和决策由您自行承担。第三方原因而给您造成的损害结果承担责任。 ## 许可证 1. 本项目授权协议为 Apache License 2.0,模型权重需要遵守基础模型[Qwen-7B](https://github.com/QwenLM/Qwen-7B)相关协议及[许可证](https://github.com/QwenLM/Qwen-7B/blob/main/LICENSE),详细内容参照其网站。 2. 使用本项目包括模型权重时请引用本项目:https://github.com/winninghealth/WiNGPT2 ## 参考资料 1. https://github.com/QwenLM/Qwen-7B 2. https://github.com/lm-sys/FastChat 3. https://github.com/yizhongw/self-instruct 4. https://github.com/nlpxucan/evol-instruct ## 联系我们 网站:https://www.winning.com.cn 邮箱:[email protected]
[ "MEDQA" ]
baconnier/Gaston-Llama-3-8B
baconnier
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-05-04T10:09:55Z
2024-05-08T13:33:08+00:00
19
3
--- {} --- # Vous en avez assez du jargon administratif incompréhensible ? Gaston est là pour vous aider ! ![Gaston](https://huggingface.co/baconnier/Gaston-Llama-3-8B/resolve/main/gaston2.jpg) 💡 Cette IA a été conçue pour reformuler les communications et documents administratifs en langage clair et simple. 📝 Grâce à Gaston, fini les lettres obscures et les procédures nébuleuses. Tout devient limpide et à la portée du commun des mortels. 😊 Gaston est un POC (Proof of Concept) qui a pour mission de rendre l'administration plus transparente et accessible. 🙌 Son secret ? Une capacité à analyser et à traduire le jargon en termes compréhensibles par tous. 💬 Avec Gaston, les démarches administratives deviennent enfin un jeu d'enfant ! This model is based on Llama-3-8b, and is governed by [META LLAMA 3 COMMUNITY LICENSE AGREEMENT](LICENSE) --- language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl - orpo base_model: NousResearch/Hermes-2-Pro-Llama-3-8B --- # Uploaded model - **Developed by:** baconnier - **License:** apache-2.0 - **Finetuned from model :** NousResearch/Hermes-2-Pro-Llama-3-8B This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth) This model was trained ORPO , using ChatML prompt template format. ``` <|im_start|>user Qui est tu ? <|im_end|> <|im_start|>assistant ``` # Example with local TGI: See the snippet below for usage with local inference: ```python #Example: reuse your existing OpenAI setup from openai import OpenAI client = OpenAI(base_url="http://localhost:8080/v1", api_key="TGI") completion = client.chat.completions.create( model="baconnier/Gaston-Llama-3-8B", messages=[ {"role": "system", "content": "Reponds comme Gaston, un fonctionnaire français qui s'exprime uniquement en français avec des termes tres simples et surtout sans jamais utiliser de jargon administratif."}, {"role": "user", "content": "l’action en nullité se prescrit par 5 ans ou est prescrite en 5 ans"}, {"role": "assistant", "content": ""} ], max_tokens=400, temperature=0.7, ) print(completion.choices[0].message.content) ``` Output: ``` Le délai de prescription pour l'action en nullité est généralement de cinq ans à compter du jour où la partie qui peut agir a eu connaissance des faits, selon le Code civil français. ``` # Example with Transformers and pipeline: See the snippet below for usage with Transformers: ```python import transformers import torch model_id = "baconnier/Gaston-Llama-3-8B" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="cuda", ) messages=[ {"role": "system", "content": "Reponds comme Gaston, un fonctionnaire français qui s'exprime uniquement en français avec des termes tres simples et surtout sans jamais utiliser de jargon administratif."}, {"role": "user", "content": "Vous voudrez bien vous munir de toutes les pièces justificatives afférentes à vos déclarations de revenus et de patrimoine des 3 derniers exercices, nonobstant les éléments déjà produits. À défaut de réponse de votre part ou en cas de non comparution aux date et heure susvisées, la procédure de contrôle se poursuivra et nous serons fondés à retenir les éléments en notre possession en vue de l'établissement des impositions supplémentaires et pénalités subséquentes."}, {"role": "assistant", "content": ""} ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) outputs = pipeline( prompt, max_new_tokens=256, do_sample=True, temperature=0.6, top_p=0.9, ) print(outputs[0]["generated_text"][len(prompt):]) ``` Question: ``` Vous voudrez bien vous munir de toutes les pièces justificatives afférentes à vos déclarations de revenus et de patrimoine des 3 derniers exercices, nonobstant les éléments déjà produits. À défaut de réponse de votre part ou en cas de non comparution aux date et heure susvisées, la procédure de contrôle se poursuivra et nous serons fondés à retenir les éléments en notre possession en vue de l'établissement des impositions supplémentaires et pénalités subséquentes. ``` Reponse: ``` Je vous invite à fournir toutes les pièces justificatives concernant vos déclarations de revenus et de patrimoine des trois dernières années, malgré les éléments déjà fournis. En cas de non-respect de ce délai ou de manque de réponse, la procédure de vérification continuera, et nous utiliserons les informations en notre possession pour établir les impôts supplémentaires et les pénalités associées. ``` Question: ``` Nous accusons réception de votre demande de renouvellement de passeport en date du 15 mars 2023. Après instruction de votre dossier, il appert que les pièces produites s'avèrent insuffisantes eu égard aux dispositions de l'article 5-1 du décret n°2005-1726 du 30 décembre 2005. Nonobstant les justificatifs déjà fournis, il vous appartient de nous transmettre dans les meilleurs délais les documents complémentaires listés ci-après : - Copie d'un justificatif de domicile de moins de 6 mois - Timbre fiscal d'un montant de 86€ À défaut de production des pièces susvisées dans un délai de 30 jours à compter de la présente, votre demande sera classée sans suite. Nous vous prions d'agréer, Monsieur, l'expression de nos salutations distinguées. ``` Reponse: ``` Nous recevons votre requête pour renouveler votre passeport datant du 15 mars 2023. Suite à l'examen de votre dossier, il a été déterminé que les documents fournis ne respectent pas les exigences mentionnées dans la section 5-1 du décret n°2005-1726 du 30 décembre 2005. Malgré les pièces déjà fournies, vous devez nous envoyer rapidement les éléments suivants : - Une copie d'un justificatif de domicile datant de moins de 6 mois. - Un timbre fiscal d'une valeur de 86 €. Si ces documents ne sont pas fournis dans un délai de 30 jours à compter de cette lettre, votre demande sera close. Nous vous prions d'accepter, Monsieur, l'expression de nos salutations respectueuses. ``` # Example with Transformers: ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("baconnier/Gaston-Llama-3-8B") model = AutoModelForCausalLM.from_pretrained("baconnier/Gaston-Llama-3-8B") prompt = "Nonobstant les justificatifs déjà fournis" inputs = tokenizer(prompt, return_tensors="pt") # Generate generate_ids = model.generate(inputs.input_ids, max_length=200) tokenizer.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] ```
[ "CAS" ]
fblgit/UNA-ThePitbull-21.4-v1
fblgit
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "UNA", "juanako", "conversational", "license:afl-3.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-05-24T15:41:19Z
2024-05-28T14:56:31+00:00
19
5
--- library_name: transformers license: afl-3.0 tags: - UNA - juanako --- # For a better performance check out our v2 at [fblgit/UNA-ThePitbull-21.4B-v2](https://huggingface.co/fblgit/UNA-ThePitbull-21.4B-v2) # UNA-ThePitbull 21.4B v1 Introducing the best LLM in the industry. Nearly as good as a 70B, just a 21.4B based on saltlux/luxia-21.4b-alignment-v1.0 ![UNA - ThePitbull 21.4B v1](https://huggingface.co/fblgit/UNA-ThePitbull-21.4-v1/resolve/main/UNA-ThePitbull.png) This model has not been poisoned to score high and be useless. We release him becaues its the real deal of EQ & IQ all together in a crazy powerful smart and conversational model. So far the #1 of them at 25/5/2024 Quant version available at [bartowski/UNA-ThePitbull-21.4-v1-GGUF](https://huggingface.co/bartowski/UNA-ThePitbull-21.4-v1-GGUF) # For a better performance check out our v2 at [fblgit/UNA-ThePitbull-21.4B-v2](https://huggingface.co/fblgit/UNA-ThePitbull-21.4B-v2) # Evaluations Can only be compared with its non-una base model: the original luxia-21.4b. ## UNA (VLLM) Evaluations ``` | Tasks |Version| Filter |n-shot| Metric |Value | |Stderr| |--------------|------:|----------------|-----:|-----------|-----:|---|-----:| |gsm8k | 3|strict-match | 5|exact_match|0.7566|± |0.0118| | | |flexible-extract| 5|exact_match|0.7582|± |0.0118| |hellaswag | 1|none | 10|acc |0.8168|± |0.0039| | | |none | 10|acc_norm |0.9188|± |0.0027| |winogrande | 1|none | 5|acc |0.8635|± |0.0097| |mmlu | N/A|none | 0|acc |0.6444|± |0.0038| |arc_challenge | 1|none | 25|acc |0.7747|± |0.0122| | | |none | 25|acc_norm |0.7850|± |0.0120| |truthfulqa_mc2| 2|none | 0|acc |0.7902|± |0.0134| |mathqa | 1|none | 0|acc |0.4030|± | 0.009| | | |none | 0|acc_norm |0.4034|± | 0.009| |pubmedqa | 1|none | 0|acc |0.6860|± |0.0208| |boolq | 2|none | 0|acc |0.8401|± |0.0064| ``` ## Original (VLLM) Evaluations ``` | Tasks |Version| Filter |n-shot| Metric |Value | |Stderr| |--------------|------:|----------------|-----:|-----------|-----:|---|-----:| |gsm8k | 3|strict-match | 5|exact_match|0.7528|± |0.0119| | | |flexible-extract| 5|exact_match|0.7521|± |0.0119| |hellaswag | 1|none | 10|acc |0.8117|± |0.0039| | | |none | 10|acc_norm |0.9167|± |0.0028| |winogrande | 1|none | 5|acc |0.8682|± |0.0095| |mmlu | N/A|none | 0|acc |0.6448|± |0.0038| |arc_challenge | 1|none | 25|acc |0.7688|± |0.0123| | | |none | 25|acc_norm |0.7730|± |0.0122| |truthfulqa_mc2| 2|none | 0|acc |0.7895|± |0.0133| |mathqa | 1|none | 0|acc |0.4000|± | 0.009| | | |none | 0|acc_norm |0.4003|± | 0.009| |pubmedqa | 1|none | 0|acc |0.6680|± |0.0211| |boolq | 2|none | 0|acc |0.8346|± |0.0065| ``` ## UNA Details Only MLP were Uniformed leaving room for further optimisations. You should be able to perform a SFT+DPO again on this model at moderate speeds. 1e-4/2e-5/etc.
[ "PUBMEDQA" ]
fakezeta/Phi-3-medium-4k-instruct-ov-int4
fakezeta
text-generation
[ "transformers", "openvino", "phi3", "text-generation", "nlp", "code", "conversational", "custom_code", "multilingual", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-26T12:42:58Z
2024-06-24T13:13:57+00:00
19
0
--- language: - multilingual license: mit license_link: https://huggingface.co/microsoft/Phi-3-medium-4k-instruct/resolve/main/LICENSE pipeline_tag: text-generation tags: - nlp - code inference: parameters: temperature: 0.7 widget: - messages: - role: user content: Can you provide ways to eat combinations of bananas and dragonfruits? --- # OpenVINO IR model with int4 quantization Model definition for LocalAI: ``` name: phi3-medium backend: transformers parameters: model: fakezeta/Phi-3-medium-4k-instruct-ov-int4 context_size: 4096 type: OVModelForCausalLM template: use_tokenizer_template: true stopwords: - "<|end|>" - "<|endoftext|>" ``` To run the model directly with LocalAI: ``` local-ai run huggingface://fakezeta/Phi-3-medium-4k-instruct-ov-int4/model.yaml ``` ## Model Summary The Phi-3-Medium-4K-Instruct is a 14B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Medium version in two variants [4K](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) which is the context length (in tokens) that it can support. The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3-Medium-4K-Instruct showcased a robust and state-of-the-art performance among models of the same-size and next-size-up. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook) | | Short Context | Long Context | | ------- | ------------- | ------------ | | Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)| | Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)| | Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)| | Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct)| ## Intended Uses **Primary use cases** The model is intended for broad commercial and research use in English. The model provides uses for general purpose AI systems and applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3-Medium-4K-Instruct has been integrated in the development version (4.40.2) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Phi-3-Medium-4K-Instruct is also available in [Azure AI Studio](https://aka.ms/phi3-azure-ai). ### Tokenizer Phi-3-Medium-4K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size. ### Chat Format Given the nature of the training data, the Phi-3-Medium-4K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|user|>\nQuestion <|end|>\n<|assistant|> ``` For example: ```markdown <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model_id = "microsoft/Phi-3-medium-4k-instruct" model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained(model_id) messages = [ {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` *Some applications/frameworks might not include a BOS token (`<s>`) at the start of the conversation. Please ensure that it is included since it provides more reliable results.* ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3-Medium-4K-Instruct has 14B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 4K tokens * GPUs: 512 H100-80G * Training time: 42 days * Training data: 4.8T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. * Release dates: The model weight is released on May 21, 2024. ### Datasets Our training data includes a wide variety of sources, totaling 4.8 trillion tokens (including 10% multilingual), and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report). ## Benchmarks We report the results for Phi-3-Medium-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mixtral-8x22b, Gemini-Pro, Command R+ 104B, Llama-3-70B-Instruct, GPT-3.5-Turbo-1106, and GPT-4-Turbo-1106(Chat). All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. |Benchmark|Phi-3-Medium-4K-Instruct<br>14b|Command R+<br>104B|Mixtral<br>8x22B|Llama-3-70B-Instruct|GPT3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)| |---------|-----------------------|--------|-------------|-------------------|-------------------|----------|------------------------| |AGI Eval<br>5-shot|50.2|50.1|54.0|56.9|48.4|49.0|59.6| |MMLU<br>5-shot|78.0|73.8|76.2|80.2|71.4|66.7|84.0| |BigBench Hard<br>3-shot|81.4|74.1|81.8|80.4|68.3|75.6|87.7| |ANLI<br>7-shot|55.8|63.4|65.2|68.3|58.1|64.2|71.7| |HellaSwag<br>5-shot|82.4|78.0|79.0|82.6|78.8|76.2|88.3| |ARC Challenge<br>10-shot|91.6|86.9|91.3|93.0|87.4|88.3|95.6| |ARC Easy<br>10-shot|97.7|95.7|96.9|98.2|96.3|96.1|98.8| |BoolQ<br>2-shot|86.5|86.1|82.7|89.1|79.1|86.4|91.3| |CommonsenseQA<br>10-shot|82.8|82.0|82.0|84.4|79.6|81.8|86.7| |MedQA<br>2-shot|69.9|59.2|67.9|78.5|63.4|58.2|83.7| |OpenBookQA<br>10-shot|87.4|86.8|88.6|91.8|86.0|86.4|93.4| |PIQA<br>5-shot|87.9|86.4|85.0|85.3|86.6|86.2|90.1| |Social IQA<br>5-shot|80.2|75.3|78.2|81.1|68.3|75.4|81.7| |TruthfulQA (MC2)<br>10-shot|75.1|57.8|67.4|81.9|67.7|72.6|85.2| |WinoGrande<br>5-shot|81.5|77.0|75.3|83.3|68.8|72.2|86.7| |TriviaQA<br>5-shot|73.9|82.8|84.5|78.5|85.8|80.2|73.3| |GSM8K Chain of Thought<br>8-shot|91.0|78.3|83.8|93.5|78.1|80.4|94.2| |HumanEval<br>0-shot|62.2|61.6|39.6|78.7|62.2|64.4|79.9| |MBPP<br>3-shot|75.2|68.9|70.7|81.3|77.8|73.2|86.7| |Average|78.5|75.0|76.3|82.5|74.3|75.4|85.2| We take a closer look at different categories across 80 public benchmark datasets at the table below: |Benchmark|Phi-3-Medium-4K-Instruct<br>14b|Command R+<br>104B|Mixtral<br>8x22B|Llama-3-70B-Instruct|GPT3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)| |--------|------------------------|--------|-------------|-------------------|-------------------|----------|------------------------| |Popular aggregated benchmark|75.4|69.9|73.4|76.3|67.0|67.5|80.5| |Reasoning|84.1|79.3|81.5|86.7|78.3|80.4|89.3| |Language understanding|73.9|75.6|78.1|76.9|68.7|76.2|80.7| |Code generation|66.1|68.6|60.0|69.3|70.4|66.7|76.1| |Math|52.8|45.3|52.5|59.7|52.8|50.9|67.1| |Factual knowledge|48.3|60.3|60.6|52.4|63.4|54.6|45.9| |Multilingual|62.9|67.8|69.8|62.0|67.0|73.4|78.2| |Robustness|66.5|57.9|65.5|78.7|69.3|69.7|84.6| ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-Medium model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: + Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [4K](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) ## Cross Platform Support ONNX runtime ecosystem now supports Phi3 Medium models across platforms and hardware. Optimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA). Along with DML, ONNX Runtime provides cross platform support for Phi3 Medium across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-medium-4k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
nielsr/yolov10n
nielsr
null
[ "transformers", "safetensors", "pytorch_model_hub_mixin", "model_hub_mixin", "object detection", "arxiv:2405.14458", "endpoints_compatible", "region:us" ]
2024-06-01T07:57:47Z
2024-06-01T09:20:22+00:00
19
0
--- tags: - pytorch_model_hub_mixin - model_hub_mixin - object detection --- This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration. ## Installation First install the [YOLOv10 Github repository](https://github.com/THU-MIG/yolov10) along with supervision which provides some nice utilities for bounding box processing. ``` pip install git+https://github.com/nielsrogge/yolov10.git@feature/add_hf supervision ``` ## Usage One can perform inference as follows: ```python from ultralytics import YOLOv10 import supervision as sv from PIL import Image import requests # load model model = YOLOv10.from_pretrained("nielsr/yolov10n") # load image url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) image = np.array(image) # perform inference results = model(source=image, conf=0.25, verbose=False)[0] detections = sv.Detections.from_ultralytics(results) box_annotator = sv.BoxAnnotator() category_dict = { 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush' } labels = [ f"{category_dict[class_id]} {confidence:.2f}" for class_id, confidence in zip(detections.class_id, detections.confidence) ] annotated_image = box_annotator.annotate( image.copy(), detections=detections, labels=labels ) Image.fromarray(annotated_image) ``` This shows the following: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f1158120c833276f61f1a84/IL9mL4_WUdcSxRQ7AsrTT.png) ### BibTeX Entry and Citation Info ``` @misc{wang2024yolov10, title={YOLOv10: Real-Time End-to-End Object Detection}, author={Ao Wang and Hui Chen and Lihao Liu and Kai Chen and Zijia Lin and Jungong Han and Guiguang Ding}, year={2024}, eprint={2405.14458}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "BEAR" ]
dimcha/mxbai-embed-large-v1-Q4_K_M-GGUF
dimcha
feature-extraction
[ "sentence-transformers", "gguf", "mteb", "transformers.js", "transformers", "llama-cpp", "gguf-my-repo", "feature-extraction", "en", "base_model:mixedbread-ai/mxbai-embed-large-v1", "base_model:quantized:mixedbread-ai/mxbai-embed-large-v1", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-08T11:06:19Z
2024-07-08T11:06:22+00:00
19
0
--- base_model: mixedbread-ai/mxbai-embed-large-v1 language: - en library_name: sentence-transformers license: apache-2.0 pipeline_tag: feature-extraction tags: - mteb - transformers.js - transformers - llama-cpp - gguf-my-repo model-index: - name: mxbai-angle-large-v1 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.044776119403 - type: ap value: 37.7362433623053 - type: f1 value: 68.92736573359774 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 93.84025000000001 - type: ap value: 90.93190875404055 - type: f1 value: 93.8297833897293 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 49.184 - type: f1 value: 48.74163227751588 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 41.252 - type: map_at_10 value: 57.778 - type: map_at_100 value: 58.233000000000004 - type: map_at_1000 value: 58.23700000000001 - type: map_at_3 value: 53.449999999999996 - type: map_at_5 value: 56.376000000000005 - type: mrr_at_1 value: 41.679 - type: mrr_at_10 value: 57.92699999999999 - type: mrr_at_100 value: 58.389 - type: mrr_at_1000 value: 58.391999999999996 - type: mrr_at_3 value: 53.651 - type: mrr_at_5 value: 56.521 - type: ndcg_at_1 value: 41.252 - type: ndcg_at_10 value: 66.018 - type: ndcg_at_100 value: 67.774 - type: ndcg_at_1000 value: 67.84400000000001 - type: ndcg_at_3 value: 57.372 - type: ndcg_at_5 value: 62.646 - type: precision_at_1 value: 41.252 - type: precision_at_10 value: 9.189 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.902 - type: precision_at_5 value: 16.302 - type: recall_at_1 value: 41.252 - type: recall_at_10 value: 91.892 - type: recall_at_100 value: 99.14699999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 68.706 - type: recall_at_5 value: 81.50800000000001 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.97294504317859 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 42.98071077674629 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 65.16477858490782 - type: mrr value: 78.23583080508287 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 89.6277629421789 - type: cos_sim_spearman value: 88.4056288400568 - type: euclidean_pearson value: 87.94871847578163 - type: euclidean_spearman value: 88.4056288400568 - type: manhattan_pearson value: 87.73271254229648 - type: manhattan_spearman value: 87.91826833762677 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.81818181818181 - type: f1 value: 87.79879337316918 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.91773608582761 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.73059477462478 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.745999999999995 - type: map_at_10 value: 43.632 - type: map_at_100 value: 45.206 - type: map_at_1000 value: 45.341 - type: map_at_3 value: 39.956 - type: map_at_5 value: 42.031 - type: mrr_at_1 value: 39.485 - type: mrr_at_10 value: 49.537 - type: mrr_at_100 value: 50.249 - type: mrr_at_1000 value: 50.294000000000004 - type: mrr_at_3 value: 46.757 - type: mrr_at_5 value: 48.481 - type: ndcg_at_1 value: 39.485 - type: ndcg_at_10 value: 50.058 - type: ndcg_at_100 value: 55.586 - type: ndcg_at_1000 value: 57.511 - type: ndcg_at_3 value: 44.786 - type: ndcg_at_5 value: 47.339999999999996 - type: precision_at_1 value: 39.485 - type: precision_at_10 value: 9.557 - type: precision_at_100 value: 1.552 - type: precision_at_1000 value: 0.202 - type: precision_at_3 value: 21.412 - type: precision_at_5 value: 15.479000000000001 - type: recall_at_1 value: 32.745999999999995 - type: recall_at_10 value: 62.056 - type: recall_at_100 value: 85.088 - type: recall_at_1000 value: 96.952 - type: recall_at_3 value: 46.959 - type: recall_at_5 value: 54.06999999999999 - type: map_at_1 value: 31.898 - type: map_at_10 value: 42.142 - type: map_at_100 value: 43.349 - type: map_at_1000 value: 43.483 - type: map_at_3 value: 39.18 - type: map_at_5 value: 40.733000000000004 - type: mrr_at_1 value: 39.617999999999995 - type: mrr_at_10 value: 47.922 - type: mrr_at_100 value: 48.547000000000004 - type: mrr_at_1000 value: 48.597 - type: mrr_at_3 value: 45.86 - type: mrr_at_5 value: 46.949000000000005 - type: ndcg_at_1 value: 39.617999999999995 - type: ndcg_at_10 value: 47.739 - type: ndcg_at_100 value: 51.934999999999995 - type: ndcg_at_1000 value: 54.007000000000005 - type: ndcg_at_3 value: 43.748 - type: ndcg_at_5 value: 45.345 - type: precision_at_1 value: 39.617999999999995 - type: precision_at_10 value: 8.962 - type: precision_at_100 value: 1.436 - type: precision_at_1000 value: 0.192 - type: precision_at_3 value: 21.083 - type: precision_at_5 value: 14.752 - type: recall_at_1 value: 31.898 - type: recall_at_10 value: 57.587999999999994 - type: recall_at_100 value: 75.323 - type: recall_at_1000 value: 88.304 - type: recall_at_3 value: 45.275 - type: recall_at_5 value: 49.99 - type: map_at_1 value: 40.458 - type: map_at_10 value: 52.942 - type: map_at_100 value: 53.974 - type: map_at_1000 value: 54.031 - type: map_at_3 value: 49.559999999999995 - type: map_at_5 value: 51.408 - type: mrr_at_1 value: 46.27 - type: mrr_at_10 value: 56.31699999999999 - type: mrr_at_100 value: 56.95099999999999 - type: mrr_at_1000 value: 56.98 - type: mrr_at_3 value: 53.835 - type: mrr_at_5 value: 55.252 - type: ndcg_at_1 value: 46.27 - type: ndcg_at_10 value: 58.964000000000006 - type: ndcg_at_100 value: 62.875 - type: ndcg_at_1000 value: 63.969 - type: ndcg_at_3 value: 53.297000000000004 - type: ndcg_at_5 value: 55.938 - type: precision_at_1 value: 46.27 - type: precision_at_10 value: 9.549000000000001 - type: precision_at_100 value: 1.2409999999999999 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 23.762 - type: precision_at_5 value: 16.262999999999998 - type: recall_at_1 value: 40.458 - type: recall_at_10 value: 73.446 - type: recall_at_100 value: 90.12400000000001 - type: recall_at_1000 value: 97.795 - type: recall_at_3 value: 58.123000000000005 - type: recall_at_5 value: 64.68 - type: map_at_1 value: 27.443 - type: map_at_10 value: 36.081 - type: map_at_100 value: 37.163000000000004 - type: map_at_1000 value: 37.232 - type: map_at_3 value: 33.308 - type: map_at_5 value: 34.724 - type: mrr_at_1 value: 29.492 - type: mrr_at_10 value: 38.138 - type: mrr_at_100 value: 39.065 - type: mrr_at_1000 value: 39.119 - type: mrr_at_3 value: 35.593 - type: mrr_at_5 value: 36.785000000000004 - type: ndcg_at_1 value: 29.492 - type: ndcg_at_10 value: 41.134 - type: ndcg_at_100 value: 46.300999999999995 - type: ndcg_at_1000 value: 48.106 - type: ndcg_at_3 value: 35.77 - type: ndcg_at_5 value: 38.032 - type: precision_at_1 value: 29.492 - type: precision_at_10 value: 6.249 - type: precision_at_100 value: 0.9299999999999999 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 15.065999999999999 - type: precision_at_5 value: 10.373000000000001 - type: recall_at_1 value: 27.443 - type: recall_at_10 value: 54.80199999999999 - type: recall_at_100 value: 78.21900000000001 - type: recall_at_1000 value: 91.751 - type: recall_at_3 value: 40.211000000000006 - type: recall_at_5 value: 45.599000000000004 - type: map_at_1 value: 18.731 - type: map_at_10 value: 26.717999999999996 - type: map_at_100 value: 27.897 - type: map_at_1000 value: 28.029 - type: map_at_3 value: 23.91 - type: map_at_5 value: 25.455 - type: mrr_at_1 value: 23.134 - type: mrr_at_10 value: 31.769 - type: mrr_at_100 value: 32.634 - type: mrr_at_1000 value: 32.707 - type: mrr_at_3 value: 28.938999999999997 - type: mrr_at_5 value: 30.531000000000002 - type: ndcg_at_1 value: 23.134 - type: ndcg_at_10 value: 32.249 - type: ndcg_at_100 value: 37.678 - type: ndcg_at_1000 value: 40.589999999999996 - type: ndcg_at_3 value: 26.985999999999997 - type: ndcg_at_5 value: 29.457 - type: precision_at_1 value: 23.134 - type: precision_at_10 value: 5.8709999999999996 - type: precision_at_100 value: 0.988 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 12.852 - type: precision_at_5 value: 9.428 - type: recall_at_1 value: 18.731 - type: recall_at_10 value: 44.419 - type: recall_at_100 value: 67.851 - type: recall_at_1000 value: 88.103 - type: recall_at_3 value: 29.919 - type: recall_at_5 value: 36.230000000000004 - type: map_at_1 value: 30.324 - type: map_at_10 value: 41.265 - type: map_at_100 value: 42.559000000000005 - type: map_at_1000 value: 42.669000000000004 - type: map_at_3 value: 38.138 - type: map_at_5 value: 39.881 - type: mrr_at_1 value: 36.67 - type: mrr_at_10 value: 46.774 - type: mrr_at_100 value: 47.554 - type: mrr_at_1000 value: 47.593 - type: mrr_at_3 value: 44.338 - type: mrr_at_5 value: 45.723 - type: ndcg_at_1 value: 36.67 - type: ndcg_at_10 value: 47.367 - type: ndcg_at_100 value: 52.623 - type: ndcg_at_1000 value: 54.59 - type: ndcg_at_3 value: 42.323 - type: ndcg_at_5 value: 44.727 - type: precision_at_1 value: 36.67 - type: precision_at_10 value: 8.518 - type: precision_at_100 value: 1.2890000000000001 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 19.955000000000002 - type: precision_at_5 value: 14.11 - type: recall_at_1 value: 30.324 - type: recall_at_10 value: 59.845000000000006 - type: recall_at_100 value: 81.77499999999999 - type: recall_at_1000 value: 94.463 - type: recall_at_3 value: 46.019 - type: recall_at_5 value: 52.163000000000004 - type: map_at_1 value: 24.229 - type: map_at_10 value: 35.004000000000005 - type: map_at_100 value: 36.409000000000006 - type: map_at_1000 value: 36.521 - type: map_at_3 value: 31.793 - type: map_at_5 value: 33.432 - type: mrr_at_1 value: 30.365 - type: mrr_at_10 value: 40.502 - type: mrr_at_100 value: 41.372 - type: mrr_at_1000 value: 41.435 - type: mrr_at_3 value: 37.804 - type: mrr_at_5 value: 39.226 - type: ndcg_at_1 value: 30.365 - type: ndcg_at_10 value: 41.305 - type: ndcg_at_100 value: 47.028999999999996 - type: ndcg_at_1000 value: 49.375 - type: ndcg_at_3 value: 35.85 - type: ndcg_at_5 value: 38.12 - type: precision_at_1 value: 30.365 - type: precision_at_10 value: 7.808 - type: precision_at_100 value: 1.228 - type: precision_at_1000 value: 0.161 - type: precision_at_3 value: 17.352 - type: precision_at_5 value: 12.42 - type: recall_at_1 value: 24.229 - type: recall_at_10 value: 54.673 - type: recall_at_100 value: 78.766 - type: recall_at_1000 value: 94.625 - type: recall_at_3 value: 39.602 - type: recall_at_5 value: 45.558 - type: map_at_1 value: 26.695 - type: map_at_10 value: 36.0895 - type: map_at_100 value: 37.309416666666664 - type: map_at_1000 value: 37.42558333333334 - type: map_at_3 value: 33.19616666666666 - type: map_at_5 value: 34.78641666666667 - type: mrr_at_1 value: 31.486083333333337 - type: mrr_at_10 value: 40.34774999999999 - type: mrr_at_100 value: 41.17533333333333 - type: mrr_at_1000 value: 41.231583333333326 - type: mrr_at_3 value: 37.90075 - type: mrr_at_5 value: 39.266999999999996 - type: ndcg_at_1 value: 31.486083333333337 - type: ndcg_at_10 value: 41.60433333333334 - type: ndcg_at_100 value: 46.74525 - type: ndcg_at_1000 value: 48.96166666666667 - type: ndcg_at_3 value: 36.68825 - type: ndcg_at_5 value: 38.966499999999996 - type: precision_at_1 value: 31.486083333333337 - type: precision_at_10 value: 7.29675 - type: precision_at_100 value: 1.1621666666666666 - type: precision_at_1000 value: 0.1545 - type: precision_at_3 value: 16.8815 - type: precision_at_5 value: 11.974583333333333 - type: recall_at_1 value: 26.695 - type: recall_at_10 value: 53.651916666666665 - type: recall_at_100 value: 76.12083333333332 - type: recall_at_1000 value: 91.31191666666668 - type: recall_at_3 value: 40.03575 - type: recall_at_5 value: 45.876666666666665 - type: map_at_1 value: 25.668000000000003 - type: map_at_10 value: 32.486 - type: map_at_100 value: 33.371 - type: map_at_1000 value: 33.458 - type: map_at_3 value: 30.261 - type: map_at_5 value: 31.418000000000003 - type: mrr_at_1 value: 28.988000000000003 - type: mrr_at_10 value: 35.414 - type: mrr_at_100 value: 36.149 - type: mrr_at_1000 value: 36.215 - type: mrr_at_3 value: 33.333 - type: mrr_at_5 value: 34.43 - type: ndcg_at_1 value: 28.988000000000003 - type: ndcg_at_10 value: 36.732 - type: ndcg_at_100 value: 41.331 - type: ndcg_at_1000 value: 43.575 - type: ndcg_at_3 value: 32.413 - type: ndcg_at_5 value: 34.316 - type: precision_at_1 value: 28.988000000000003 - type: precision_at_10 value: 5.7059999999999995 - type: precision_at_100 value: 0.882 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 13.65 - type: precision_at_5 value: 9.417 - type: recall_at_1 value: 25.668000000000003 - type: recall_at_10 value: 47.147 - type: recall_at_100 value: 68.504 - type: recall_at_1000 value: 85.272 - type: recall_at_3 value: 35.19 - type: recall_at_5 value: 39.925 - type: map_at_1 value: 17.256 - type: map_at_10 value: 24.58 - type: map_at_100 value: 25.773000000000003 - type: map_at_1000 value: 25.899 - type: map_at_3 value: 22.236 - type: map_at_5 value: 23.507 - type: mrr_at_1 value: 20.957 - type: mrr_at_10 value: 28.416000000000004 - type: mrr_at_100 value: 29.447000000000003 - type: mrr_at_1000 value: 29.524 - type: mrr_at_3 value: 26.245 - type: mrr_at_5 value: 27.451999999999998 - type: ndcg_at_1 value: 20.957 - type: ndcg_at_10 value: 29.285 - type: ndcg_at_100 value: 35.003 - type: ndcg_at_1000 value: 37.881 - type: ndcg_at_3 value: 25.063000000000002 - type: ndcg_at_5 value: 26.983 - type: precision_at_1 value: 20.957 - type: precision_at_10 value: 5.344 - type: precision_at_100 value: 0.958 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 11.918 - type: precision_at_5 value: 8.596 - type: recall_at_1 value: 17.256 - type: recall_at_10 value: 39.644 - type: recall_at_100 value: 65.279 - type: recall_at_1000 value: 85.693 - type: recall_at_3 value: 27.825 - type: recall_at_5 value: 32.792 - type: map_at_1 value: 26.700000000000003 - type: map_at_10 value: 36.205999999999996 - type: map_at_100 value: 37.316 - type: map_at_1000 value: 37.425000000000004 - type: map_at_3 value: 33.166000000000004 - type: map_at_5 value: 35.032999999999994 - type: mrr_at_1 value: 31.436999999999998 - type: mrr_at_10 value: 40.61 - type: mrr_at_100 value: 41.415 - type: mrr_at_1000 value: 41.48 - type: mrr_at_3 value: 37.966 - type: mrr_at_5 value: 39.599000000000004 - type: ndcg_at_1 value: 31.436999999999998 - type: ndcg_at_10 value: 41.771 - type: ndcg_at_100 value: 46.784 - type: ndcg_at_1000 value: 49.183 - type: ndcg_at_3 value: 36.437000000000005 - type: ndcg_at_5 value: 39.291 - type: precision_at_1 value: 31.436999999999998 - type: precision_at_10 value: 6.987 - type: precision_at_100 value: 1.072 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 16.448999999999998 - type: precision_at_5 value: 11.866 - type: recall_at_1 value: 26.700000000000003 - type: recall_at_10 value: 54.301 - type: recall_at_100 value: 75.871 - type: recall_at_1000 value: 92.529 - type: recall_at_3 value: 40.201 - type: recall_at_5 value: 47.208 - type: map_at_1 value: 24.296 - type: map_at_10 value: 33.116 - type: map_at_100 value: 34.81 - type: map_at_1000 value: 35.032000000000004 - type: map_at_3 value: 30.105999999999998 - type: map_at_5 value: 31.839000000000002 - type: mrr_at_1 value: 29.051 - type: mrr_at_10 value: 37.803 - type: mrr_at_100 value: 38.856 - type: mrr_at_1000 value: 38.903999999999996 - type: mrr_at_3 value: 35.211 - type: mrr_at_5 value: 36.545 - type: ndcg_at_1 value: 29.051 - type: ndcg_at_10 value: 39.007 - type: ndcg_at_100 value: 45.321 - type: ndcg_at_1000 value: 47.665 - type: ndcg_at_3 value: 34.1 - type: ndcg_at_5 value: 36.437000000000005 - type: precision_at_1 value: 29.051 - type: precision_at_10 value: 7.668 - type: precision_at_100 value: 1.542 - type: precision_at_1000 value: 0.24 - type: precision_at_3 value: 16.14 - type: precision_at_5 value: 11.897 - type: recall_at_1 value: 24.296 - type: recall_at_10 value: 49.85 - type: recall_at_100 value: 78.457 - type: recall_at_1000 value: 92.618 - type: recall_at_3 value: 36.138999999999996 - type: recall_at_5 value: 42.223 - type: map_at_1 value: 20.591 - type: map_at_10 value: 28.902 - type: map_at_100 value: 29.886000000000003 - type: map_at_1000 value: 29.987000000000002 - type: map_at_3 value: 26.740000000000002 - type: map_at_5 value: 27.976 - type: mrr_at_1 value: 22.366 - type: mrr_at_10 value: 30.971 - type: mrr_at_100 value: 31.865 - type: mrr_at_1000 value: 31.930999999999997 - type: mrr_at_3 value: 28.927999999999997 - type: mrr_at_5 value: 30.231 - type: ndcg_at_1 value: 22.366 - type: ndcg_at_10 value: 33.641 - type: ndcg_at_100 value: 38.477 - type: ndcg_at_1000 value: 41.088 - type: ndcg_at_3 value: 29.486 - type: ndcg_at_5 value: 31.612000000000002 - type: precision_at_1 value: 22.366 - type: precision_at_10 value: 5.3420000000000005 - type: precision_at_100 value: 0.828 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 12.939 - type: precision_at_5 value: 9.094 - type: recall_at_1 value: 20.591 - type: recall_at_10 value: 46.052 - type: recall_at_100 value: 68.193 - type: recall_at_1000 value: 87.638 - type: recall_at_3 value: 34.966 - type: recall_at_5 value: 40.082 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 15.091 - type: map_at_10 value: 26.38 - type: map_at_100 value: 28.421999999999997 - type: map_at_1000 value: 28.621999999999996 - type: map_at_3 value: 21.597 - type: map_at_5 value: 24.12 - type: mrr_at_1 value: 34.266999999999996 - type: mrr_at_10 value: 46.864 - type: mrr_at_100 value: 47.617 - type: mrr_at_1000 value: 47.644 - type: mrr_at_3 value: 43.312 - type: mrr_at_5 value: 45.501000000000005 - type: ndcg_at_1 value: 34.266999999999996 - type: ndcg_at_10 value: 36.095 - type: ndcg_at_100 value: 43.447 - type: ndcg_at_1000 value: 46.661 - type: ndcg_at_3 value: 29.337999999999997 - type: ndcg_at_5 value: 31.824 - type: precision_at_1 value: 34.266999999999996 - type: precision_at_10 value: 11.472 - type: precision_at_100 value: 1.944 - type: precision_at_1000 value: 0.255 - type: precision_at_3 value: 21.933 - type: precision_at_5 value: 17.224999999999998 - type: recall_at_1 value: 15.091 - type: recall_at_10 value: 43.022 - type: recall_at_100 value: 68.075 - type: recall_at_1000 value: 85.76 - type: recall_at_3 value: 26.564 - type: recall_at_5 value: 33.594 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 9.252 - type: map_at_10 value: 20.923 - type: map_at_100 value: 30.741000000000003 - type: map_at_1000 value: 32.542 - type: map_at_3 value: 14.442 - type: map_at_5 value: 17.399 - type: mrr_at_1 value: 70.25 - type: mrr_at_10 value: 78.17 - type: mrr_at_100 value: 78.444 - type: mrr_at_1000 value: 78.45100000000001 - type: mrr_at_3 value: 76.958 - type: mrr_at_5 value: 77.571 - type: ndcg_at_1 value: 58.375 - type: ndcg_at_10 value: 44.509 - type: ndcg_at_100 value: 49.897999999999996 - type: ndcg_at_1000 value: 57.269999999999996 - type: ndcg_at_3 value: 48.64 - type: ndcg_at_5 value: 46.697 - type: precision_at_1 value: 70.25 - type: precision_at_10 value: 36.05 - type: precision_at_100 value: 11.848 - type: precision_at_1000 value: 2.213 - type: precision_at_3 value: 52.917 - type: precision_at_5 value: 45.7 - type: recall_at_1 value: 9.252 - type: recall_at_10 value: 27.006999999999998 - type: recall_at_100 value: 57.008 - type: recall_at_1000 value: 80.697 - type: recall_at_3 value: 15.798000000000002 - type: recall_at_5 value: 20.4 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 50.88 - type: f1 value: 45.545495028653384 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 75.424 - type: map_at_10 value: 83.435 - type: map_at_100 value: 83.66900000000001 - type: map_at_1000 value: 83.685 - type: map_at_3 value: 82.39800000000001 - type: map_at_5 value: 83.07 - type: mrr_at_1 value: 81.113 - type: mrr_at_10 value: 87.77199999999999 - type: mrr_at_100 value: 87.862 - type: mrr_at_1000 value: 87.86500000000001 - type: mrr_at_3 value: 87.17099999999999 - type: mrr_at_5 value: 87.616 - type: ndcg_at_1 value: 81.113 - type: ndcg_at_10 value: 86.909 - type: ndcg_at_100 value: 87.746 - type: ndcg_at_1000 value: 88.017 - type: ndcg_at_3 value: 85.368 - type: ndcg_at_5 value: 86.28099999999999 - type: precision_at_1 value: 81.113 - type: precision_at_10 value: 10.363 - type: precision_at_100 value: 1.102 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 32.507999999999996 - type: precision_at_5 value: 20.138 - type: recall_at_1 value: 75.424 - type: recall_at_10 value: 93.258 - type: recall_at_100 value: 96.545 - type: recall_at_1000 value: 98.284 - type: recall_at_3 value: 89.083 - type: recall_at_5 value: 91.445 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 22.532 - type: map_at_10 value: 37.141999999999996 - type: map_at_100 value: 39.162 - type: map_at_1000 value: 39.322 - type: map_at_3 value: 32.885 - type: map_at_5 value: 35.093999999999994 - type: mrr_at_1 value: 44.29 - type: mrr_at_10 value: 53.516 - type: mrr_at_100 value: 54.24 - type: mrr_at_1000 value: 54.273 - type: mrr_at_3 value: 51.286 - type: mrr_at_5 value: 52.413 - type: ndcg_at_1 value: 44.29 - type: ndcg_at_10 value: 45.268 - type: ndcg_at_100 value: 52.125 - type: ndcg_at_1000 value: 54.778000000000006 - type: ndcg_at_3 value: 41.829 - type: ndcg_at_5 value: 42.525 - type: precision_at_1 value: 44.29 - type: precision_at_10 value: 12.5 - type: precision_at_100 value: 1.9720000000000002 - type: precision_at_1000 value: 0.245 - type: precision_at_3 value: 28.035 - type: precision_at_5 value: 20.093 - type: recall_at_1 value: 22.532 - type: recall_at_10 value: 52.419000000000004 - type: recall_at_100 value: 77.43299999999999 - type: recall_at_1000 value: 93.379 - type: recall_at_3 value: 38.629000000000005 - type: recall_at_5 value: 43.858000000000004 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 39.359 - type: map_at_10 value: 63.966 - type: map_at_100 value: 64.87 - type: map_at_1000 value: 64.92599999999999 - type: map_at_3 value: 60.409 - type: map_at_5 value: 62.627 - type: mrr_at_1 value: 78.717 - type: mrr_at_10 value: 84.468 - type: mrr_at_100 value: 84.655 - type: mrr_at_1000 value: 84.661 - type: mrr_at_3 value: 83.554 - type: mrr_at_5 value: 84.133 - type: ndcg_at_1 value: 78.717 - type: ndcg_at_10 value: 72.03399999999999 - type: ndcg_at_100 value: 75.158 - type: ndcg_at_1000 value: 76.197 - type: ndcg_at_3 value: 67.049 - type: ndcg_at_5 value: 69.808 - type: precision_at_1 value: 78.717 - type: precision_at_10 value: 15.201 - type: precision_at_100 value: 1.764 - type: precision_at_1000 value: 0.19 - type: precision_at_3 value: 43.313 - type: precision_at_5 value: 28.165000000000003 - type: recall_at_1 value: 39.359 - type: recall_at_10 value: 76.003 - type: recall_at_100 value: 88.197 - type: recall_at_1000 value: 95.003 - type: recall_at_3 value: 64.97 - type: recall_at_5 value: 70.41199999999999 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 92.83200000000001 - type: ap value: 89.33560571859861 - type: f1 value: 92.82322915005167 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 21.983 - type: map_at_10 value: 34.259 - type: map_at_100 value: 35.432 - type: map_at_1000 value: 35.482 - type: map_at_3 value: 30.275999999999996 - type: map_at_5 value: 32.566 - type: mrr_at_1 value: 22.579 - type: mrr_at_10 value: 34.882999999999996 - type: mrr_at_100 value: 35.984 - type: mrr_at_1000 value: 36.028 - type: mrr_at_3 value: 30.964999999999996 - type: mrr_at_5 value: 33.245000000000005 - type: ndcg_at_1 value: 22.564 - type: ndcg_at_10 value: 41.258 - type: ndcg_at_100 value: 46.824 - type: ndcg_at_1000 value: 48.037 - type: ndcg_at_3 value: 33.17 - type: ndcg_at_5 value: 37.263000000000005 - type: precision_at_1 value: 22.564 - type: precision_at_10 value: 6.572 - type: precision_at_100 value: 0.935 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.130999999999998 - type: precision_at_5 value: 10.544 - type: recall_at_1 value: 21.983 - type: recall_at_10 value: 62.775000000000006 - type: recall_at_100 value: 88.389 - type: recall_at_1000 value: 97.603 - type: recall_at_3 value: 40.878 - type: recall_at_5 value: 50.690000000000005 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.95120839033288 - type: f1 value: 93.73824125055208 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 76.78978568171455 - type: f1 value: 57.50180552858304 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 76.24411566913248 - type: f1 value: 74.37851403532832 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.94620040349699 - type: f1 value: 80.21293397970435 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 33.44403096245675 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.659594631336812 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.53833075108798 - type: mrr value: 33.78840823218308 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 7.185999999999999 - type: map_at_10 value: 15.193999999999999 - type: map_at_100 value: 19.538 - type: map_at_1000 value: 21.178 - type: map_at_3 value: 11.208 - type: map_at_5 value: 12.745999999999999 - type: mrr_at_1 value: 48.916 - type: mrr_at_10 value: 58.141 - type: mrr_at_100 value: 58.656 - type: mrr_at_1000 value: 58.684999999999995 - type: mrr_at_3 value: 55.521 - type: mrr_at_5 value: 57.239 - type: ndcg_at_1 value: 47.059 - type: ndcg_at_10 value: 38.644 - type: ndcg_at_100 value: 36.272999999999996 - type: ndcg_at_1000 value: 44.996 - type: ndcg_at_3 value: 43.293 - type: ndcg_at_5 value: 40.819 - type: precision_at_1 value: 48.916 - type: precision_at_10 value: 28.607 - type: precision_at_100 value: 9.195 - type: precision_at_1000 value: 2.225 - type: precision_at_3 value: 40.454 - type: precision_at_5 value: 34.985 - type: recall_at_1 value: 7.185999999999999 - type: recall_at_10 value: 19.654 - type: recall_at_100 value: 37.224000000000004 - type: recall_at_1000 value: 68.663 - type: recall_at_3 value: 12.158 - type: recall_at_5 value: 14.674999999999999 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 31.552000000000003 - type: map_at_10 value: 47.75 - type: map_at_100 value: 48.728 - type: map_at_1000 value: 48.754 - type: map_at_3 value: 43.156 - type: map_at_5 value: 45.883 - type: mrr_at_1 value: 35.66 - type: mrr_at_10 value: 50.269 - type: mrr_at_100 value: 50.974 - type: mrr_at_1000 value: 50.991 - type: mrr_at_3 value: 46.519 - type: mrr_at_5 value: 48.764 - type: ndcg_at_1 value: 35.632000000000005 - type: ndcg_at_10 value: 55.786 - type: ndcg_at_100 value: 59.748999999999995 - type: ndcg_at_1000 value: 60.339 - type: ndcg_at_3 value: 47.292 - type: ndcg_at_5 value: 51.766999999999996 - type: precision_at_1 value: 35.632000000000005 - type: precision_at_10 value: 9.267 - type: precision_at_100 value: 1.149 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 21.601 - type: precision_at_5 value: 15.539 - type: recall_at_1 value: 31.552000000000003 - type: recall_at_10 value: 77.62400000000001 - type: recall_at_100 value: 94.527 - type: recall_at_1000 value: 98.919 - type: recall_at_3 value: 55.898 - type: recall_at_5 value: 66.121 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.414 - type: map_at_10 value: 85.37400000000001 - type: map_at_100 value: 86.01100000000001 - type: map_at_1000 value: 86.027 - type: map_at_3 value: 82.562 - type: map_at_5 value: 84.284 - type: mrr_at_1 value: 82.24000000000001 - type: mrr_at_10 value: 88.225 - type: mrr_at_100 value: 88.324 - type: mrr_at_1000 value: 88.325 - type: mrr_at_3 value: 87.348 - type: mrr_at_5 value: 87.938 - type: ndcg_at_1 value: 82.24000000000001 - type: ndcg_at_10 value: 88.97699999999999 - type: ndcg_at_100 value: 90.16 - type: ndcg_at_1000 value: 90.236 - type: ndcg_at_3 value: 86.371 - type: ndcg_at_5 value: 87.746 - type: precision_at_1 value: 82.24000000000001 - type: precision_at_10 value: 13.481000000000002 - type: precision_at_100 value: 1.534 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.86 - type: precision_at_5 value: 24.738 - type: recall_at_1 value: 71.414 - type: recall_at_10 value: 95.735 - type: recall_at_100 value: 99.696 - type: recall_at_1000 value: 99.979 - type: recall_at_3 value: 88.105 - type: recall_at_5 value: 92.17999999999999 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 60.22146692057259 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 65.29273320614578 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 5.023 - type: map_at_10 value: 14.161000000000001 - type: map_at_100 value: 16.68 - type: map_at_1000 value: 17.072000000000003 - type: map_at_3 value: 9.763 - type: map_at_5 value: 11.977 - type: mrr_at_1 value: 24.8 - type: mrr_at_10 value: 37.602999999999994 - type: mrr_at_100 value: 38.618 - type: mrr_at_1000 value: 38.659 - type: mrr_at_3 value: 34.117 - type: mrr_at_5 value: 36.082 - type: ndcg_at_1 value: 24.8 - type: ndcg_at_10 value: 23.316 - type: ndcg_at_100 value: 32.613 - type: ndcg_at_1000 value: 38.609 - type: ndcg_at_3 value: 21.697 - type: ndcg_at_5 value: 19.241 - type: precision_at_1 value: 24.8 - type: precision_at_10 value: 12.36 - type: precision_at_100 value: 2.593 - type: precision_at_1000 value: 0.402 - type: precision_at_3 value: 20.767 - type: precision_at_5 value: 17.34 - type: recall_at_1 value: 5.023 - type: recall_at_10 value: 25.069999999999997 - type: recall_at_100 value: 52.563 - type: recall_at_1000 value: 81.525 - type: recall_at_3 value: 12.613 - type: recall_at_5 value: 17.583 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 87.71506247604255 - type: cos_sim_spearman value: 82.91813463738802 - type: euclidean_pearson value: 85.5154616194479 - type: euclidean_spearman value: 82.91815254466314 - type: manhattan_pearson value: 85.5280917850374 - type: manhattan_spearman value: 82.92276537286398 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 87.43772054228462 - type: cos_sim_spearman value: 78.75750601716682 - type: euclidean_pearson value: 85.76074482955764 - type: euclidean_spearman value: 78.75651057223058 - type: manhattan_pearson value: 85.73390291701668 - type: manhattan_spearman value: 78.72699385957797 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 89.58144067172472 - type: cos_sim_spearman value: 90.3524512966946 - type: euclidean_pearson value: 89.71365391594237 - type: euclidean_spearman value: 90.35239632843408 - type: manhattan_pearson value: 89.66905421746478 - type: manhattan_spearman value: 90.31508211683513 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 87.77692637102102 - type: cos_sim_spearman value: 85.45710562643485 - type: euclidean_pearson value: 87.42456979928723 - type: euclidean_spearman value: 85.45709386240908 - type: manhattan_pearson value: 87.40754529526272 - type: manhattan_spearman value: 85.44834854173303 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 88.28491331695997 - type: cos_sim_spearman value: 89.62037029566964 - type: euclidean_pearson value: 89.02479391362826 - type: euclidean_spearman value: 89.62036733618466 - type: manhattan_pearson value: 89.00394756040342 - type: manhattan_spearman value: 89.60867744215236 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 85.08911381280191 - type: cos_sim_spearman value: 86.5791780765767 - type: euclidean_pearson value: 86.16063473577861 - type: euclidean_spearman value: 86.57917745378766 - type: manhattan_pearson value: 86.13677924604175 - type: manhattan_spearman value: 86.56115615768685 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 89.58029496205235 - type: cos_sim_spearman value: 89.49551253826998 - type: euclidean_pearson value: 90.13714840963748 - type: euclidean_spearman value: 89.49551253826998 - type: manhattan_pearson value: 90.13039633601363 - type: manhattan_spearman value: 89.4513453745516 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 69.01546399666435 - type: cos_sim_spearman value: 69.33824484595624 - type: euclidean_pearson value: 70.76511642998874 - type: euclidean_spearman value: 69.33824484595624 - type: manhattan_pearson value: 70.84320785047453 - type: manhattan_spearman value: 69.54233632223537 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 87.26389196390119 - type: cos_sim_spearman value: 89.09721478341385 - type: euclidean_pearson value: 88.97208685922517 - type: euclidean_spearman value: 89.09720927308881 - type: manhattan_pearson value: 88.97513670502573 - type: manhattan_spearman value: 89.07647853984004 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 87.53075025771936 - type: mrr value: 96.24327651288436 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 60.428000000000004 - type: map_at_10 value: 70.088 - type: map_at_100 value: 70.589 - type: map_at_1000 value: 70.614 - type: map_at_3 value: 67.191 - type: map_at_5 value: 68.515 - type: mrr_at_1 value: 63.333 - type: mrr_at_10 value: 71.13000000000001 - type: mrr_at_100 value: 71.545 - type: mrr_at_1000 value: 71.569 - type: mrr_at_3 value: 68.944 - type: mrr_at_5 value: 70.078 - type: ndcg_at_1 value: 63.333 - type: ndcg_at_10 value: 74.72800000000001 - type: ndcg_at_100 value: 76.64999999999999 - type: ndcg_at_1000 value: 77.176 - type: ndcg_at_3 value: 69.659 - type: ndcg_at_5 value: 71.626 - type: precision_at_1 value: 63.333 - type: precision_at_10 value: 10 - type: precision_at_100 value: 1.09 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 27.111 - type: precision_at_5 value: 17.666999999999998 - type: recall_at_1 value: 60.428000000000004 - type: recall_at_10 value: 87.98899999999999 - type: recall_at_100 value: 96.167 - type: recall_at_1000 value: 100 - type: recall_at_3 value: 74.006 - type: recall_at_5 value: 79.05 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.87326732673267 - type: cos_sim_ap value: 96.81770773701805 - type: cos_sim_f1 value: 93.6318407960199 - type: cos_sim_precision value: 93.16831683168317 - type: cos_sim_recall value: 94.1 - type: dot_accuracy value: 99.87326732673267 - type: dot_ap value: 96.8174218946665 - type: dot_f1 value: 93.6318407960199 - type: dot_precision value: 93.16831683168317 - type: dot_recall value: 94.1 - type: euclidean_accuracy value: 99.87326732673267 - type: euclidean_ap value: 96.81770773701807 - type: euclidean_f1 value: 93.6318407960199 - type: euclidean_precision value: 93.16831683168317 - type: euclidean_recall value: 94.1 - type: manhattan_accuracy value: 99.87227722772278 - type: manhattan_ap value: 96.83164126821747 - type: manhattan_f1 value: 93.54677338669335 - type: manhattan_precision value: 93.5935935935936 - type: manhattan_recall value: 93.5 - type: max_accuracy value: 99.87326732673267 - type: max_ap value: 96.83164126821747 - type: max_f1 value: 93.6318407960199 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 65.6212042420246 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 35.779230635982564 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 55.217701909036286 - type: mrr value: 56.17658995416349 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.954206018888453 - type: cos_sim_spearman value: 32.71062599450096 - type: dot_pearson value: 30.95420929056943 - type: dot_spearman value: 32.71062599450096 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.22699999999999998 - type: map_at_10 value: 1.924 - type: map_at_100 value: 10.525 - type: map_at_1000 value: 24.973 - type: map_at_3 value: 0.638 - type: map_at_5 value: 1.0659999999999998 - type: mrr_at_1 value: 84 - type: mrr_at_10 value: 91.067 - type: mrr_at_100 value: 91.067 - type: mrr_at_1000 value: 91.067 - type: mrr_at_3 value: 90.667 - type: mrr_at_5 value: 91.067 - type: ndcg_at_1 value: 81 - type: ndcg_at_10 value: 75.566 - type: ndcg_at_100 value: 56.387 - type: ndcg_at_1000 value: 49.834 - type: ndcg_at_3 value: 80.899 - type: ndcg_at_5 value: 80.75099999999999 - type: precision_at_1 value: 84 - type: precision_at_10 value: 79 - type: precision_at_100 value: 57.56 - type: precision_at_1000 value: 21.8 - type: precision_at_3 value: 84.667 - type: precision_at_5 value: 85.2 - type: recall_at_1 value: 0.22699999999999998 - type: recall_at_10 value: 2.136 - type: recall_at_100 value: 13.861 - type: recall_at_1000 value: 46.299 - type: recall_at_3 value: 0.6649999999999999 - type: recall_at_5 value: 1.145 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.752 - type: map_at_10 value: 9.951 - type: map_at_100 value: 16.794999999999998 - type: map_at_1000 value: 18.251 - type: map_at_3 value: 5.288 - type: map_at_5 value: 6.954000000000001 - type: mrr_at_1 value: 38.775999999999996 - type: mrr_at_10 value: 50.458000000000006 - type: mrr_at_100 value: 51.324999999999996 - type: mrr_at_1000 value: 51.339999999999996 - type: mrr_at_3 value: 46.939 - type: mrr_at_5 value: 47.857 - type: ndcg_at_1 value: 36.735 - type: ndcg_at_10 value: 25.198999999999998 - type: ndcg_at_100 value: 37.938 - type: ndcg_at_1000 value: 49.145 - type: ndcg_at_3 value: 29.348000000000003 - type: ndcg_at_5 value: 25.804 - type: precision_at_1 value: 38.775999999999996 - type: precision_at_10 value: 22.041 - type: precision_at_100 value: 7.939 - type: precision_at_1000 value: 1.555 - type: precision_at_3 value: 29.932 - type: precision_at_5 value: 24.490000000000002 - type: recall_at_1 value: 2.752 - type: recall_at_10 value: 16.197 - type: recall_at_100 value: 49.166 - type: recall_at_1000 value: 84.18900000000001 - type: recall_at_3 value: 6.438000000000001 - type: recall_at_5 value: 9.093 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.47980000000001 - type: ap value: 14.605194452178754 - type: f1 value: 55.07362924988948 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.708545557441994 - type: f1 value: 60.04751270975683 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 53.21105960597211 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.58419264469214 - type: cos_sim_ap value: 78.55300004517404 - type: cos_sim_f1 value: 71.49673530889001 - type: cos_sim_precision value: 68.20795400095831 - type: cos_sim_recall value: 75.11873350923483 - type: dot_accuracy value: 87.58419264469214 - type: dot_ap value: 78.55297659559511 - type: dot_f1 value: 71.49673530889001 - type: dot_precision value: 68.20795400095831 - type: dot_recall value: 75.11873350923483 - type: euclidean_accuracy value: 87.58419264469214 - type: euclidean_ap value: 78.55300477331477 - type: euclidean_f1 value: 71.49673530889001 - type: euclidean_precision value: 68.20795400095831 - type: euclidean_recall value: 75.11873350923483 - type: manhattan_accuracy value: 87.5663110210407 - type: manhattan_ap value: 78.49982050876562 - type: manhattan_f1 value: 71.35488740722104 - type: manhattan_precision value: 68.18946862226497 - type: manhattan_recall value: 74.82849604221636 - type: max_accuracy value: 87.58419264469214 - type: max_ap value: 78.55300477331477 - type: max_f1 value: 71.49673530889001 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.09069740365584 - type: cos_sim_ap value: 86.22749303724757 - type: cos_sim_f1 value: 78.36863452005407 - type: cos_sim_precision value: 76.49560117302053 - type: cos_sim_recall value: 80.33569448721897 - type: dot_accuracy value: 89.09069740365584 - type: dot_ap value: 86.22750233655673 - type: dot_f1 value: 78.36863452005407 - type: dot_precision value: 76.49560117302053 - type: dot_recall value: 80.33569448721897 - type: euclidean_accuracy value: 89.09069740365584 - type: euclidean_ap value: 86.22749355597347 - type: euclidean_f1 value: 78.36863452005407 - type: euclidean_precision value: 76.49560117302053 - type: euclidean_recall value: 80.33569448721897 - type: manhattan_accuracy value: 89.08293553770326 - type: manhattan_ap value: 86.21913616084771 - type: manhattan_f1 value: 78.3907031479847 - type: manhattan_precision value: 75.0352013517319 - type: manhattan_recall value: 82.06036341238065 - type: max_accuracy value: 89.09069740365584 - type: max_ap value: 86.22750233655673 - type: max_f1 value: 78.3907031479847 --- # dimcha/mxbai-embed-large-v1-Q4_K_M-GGUF This model was converted to GGUF format from [`mixedbread-ai/mxbai-embed-large-v1`](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo dimcha/mxbai-embed-large-v1-Q4_K_M-GGUF --hf-file mxbai-embed-large-v1-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo dimcha/mxbai-embed-large-v1-Q4_K_M-GGUF --hf-file mxbai-embed-large-v1-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo dimcha/mxbai-embed-large-v1-Q4_K_M-GGUF --hf-file mxbai-embed-large-v1-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo dimcha/mxbai-embed-large-v1-Q4_K_M-GGUF --hf-file mxbai-embed-large-v1-q4_k_m.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
apple/DCLM-7B-8k
apple
null
[ "transformers", "safetensors", "openlm", "arxiv:2406.11794", "arxiv:2405.13226", "license:apple-ascl", "endpoints_compatible", "region:us" ]
2024-07-15T21:17:20Z
2024-08-06T02:31:25+00:00
19
42
--- license: apple-ascl --- <img src="https://hf.fast360.xyz/production/uploads/63118add64939fabc0108b28/BB42g4V8HTxb5dR4tcy8A.png" alt="DCLM Logo" width="800" style="margin-left:'auto' margin-right:'auto' display:'block'"/> # Model Card for DCLM-Baseline-7B DCLM-Baseline-7B is a 7 billion parameter language model trained on the DCLM-Baseline dataset, which was curated as part of the DataComp for Language Models (DCLM) benchmark. This model is designed to showcase the effectiveness of systematic data curation techniques for improving language model performance. ## Model Details | Size | Training Tokens | Layers | Hidden Size | Attention Heads | Context Length | |------|-----------------|--------|-------------|-----------------|----------------| | 7B | 2.6T | 32 | 4096 | 32 | 8192 | ### Model Description - **Developed by:** DataComp for Language Models (DCLM) Team - **Model type:** Decoder-only Transformer language model - **Language(s):** English (primarily) - **License:** Apple Sample Code License - **Contact:** [email protected] - **Date:** June 2024 ### Model Sources - **Repository:** https://github.com/mlfoundations/dclm - **Dataset:** https://huggingface.co/datasets/mlfoundations/dclm-baseline-1.0 - **Paper:** [DataComp-LM: In search of the next generation of training sets for language models](https://arxiv.org/abs/2406.11794) ## Using Model First install open_lm ```pip install git+https://github.com/mlfoundations/open_lm.git``` Then: ``` from open_lm.hf import * from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("apple/DCLM-Baseline-7B-8k") model = AutoModelForCausalLM.from_pretrained("apple/DCLM-Baseline-7B-8k") inputs = tokenizer(["Machine learning is"], return_tensors="pt") gen_kwargs = {"max_new_tokens": 50, "top_p": 0.8, "temperature": 0.8, "do_sample": True, "repetition_penalty": 1.1} output = model.generate(inputs['input_ids'], **gen_kwargs) output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True) print(output) ``` ### Training Details The model was trained using the following setup: - **Architecture:** Decoder-only Transformer - **Framework:** PyTorch with OpenLM - **Optimizer:** AdamW - **Learning Rate:** 2e-3 (peak) - **Weight Decay:** 0.05 - **Batch Size:** 2048 sequences - **Sequence Length:** 8192 tokens - **Total Training Tokens:** 2.6T - **Hardware:** Trained on H100 GPUs For more detailed training information, please refer to Section 3.4 and Appendix F of the DCLM paper. To ensure our trained model is broadly useful, including for math and coding tasks, we combine our 3.8T [DCLM-BASELINE](https://huggingface.co/datasets/mlfoundations/dclm-baseline-1.0) with the [StarCoder](https://huggingface.co/datasets/bigcode/starcoderdata) and [ProofPile2](https://huggingface.co/datasets/EleutherAI/proof-pile-2) data to arrive at a 4.1T token dataset. An additional 100B of training was done on the same dataset using [Dataset Decomposition](https://arxiv.org/abs/2405.13226) to extend context length from 2k -> 8k. ## Evaluation Here are the evaluation results for DCLM-Baseline-7B on various tasks (using [llm-foundry](https://github.com/mosaicml/llm-foundry) eval suite) | Task | Score | |------|-------| | MMLU (zero-shot) | 0.5535 | | MMLU (few-shot) | 0.6369 | | HellaSwag (zero-shot) | 0.7933 | | HellaSwag | 0.8103 | | Jeopardy | 0.5252 | | TriviaQA | 0.5703 | | GSM8K (CoT) | 0.1024 | | AGI Eval SAT Math (CoT) | 0.2227 | | AQuA (CoT) | 0.1061 | | SVAMP (CoT) | 0.5133 | | BigBench QA Wikidata | 0.7344 | | ARC Easy | 0.8249 | | ARC Challenge | 0.6126 | | BigBench Misconceptions | 0.6849 | | COPA | 0.8800 | | SIQA | 0.8270 | | CommonsenseQA | 0.7993 | | PIQA | 0.8161 | | OpenBookQA | 0.4500 | | BigBench Novel Concepts | 0.6563 | | BigBench Strange Stories | 0.7759 | | BigBench Strategy QA | 0.6540 | | LAMBADA | 0.7553 | | Winograd | 0.9011 | | Winogrande | 0.7395 | | BigBench Conlang Translation | 0.1220 | | BigBench Language Identification | 0.5216 | | BigBench Conceptual Combinations | 0.6796 | | BigBench Elementary Math QA | 0.3500 | | BigBench Dyck Languages | 0.3470 | | AGI Eval LSAT AR | 0.2609 | | BigBench CS Algorithms | 0.5379 | | BigBench Logical Deduction | 0.3653 | | BigBench Operators | 0.5000 | | BigBench Repeat Copy Logic | 0.5313 | | Simple Arithmetic (no spaces) | 0.3000 | | Simple Arithmetic (with spaces) | 0.3070 | | MathQA | 0.3108 | | LogiQA | 0.4147 | | PubMedQA | 0.7170 | | SQuAD | 0.6317 | | AGI Eval LSAT RC | 0.7015 | | AGI Eval LSAT LR | 0.5373 | | CoQA | 0.4981 | | BigBench Understanding Fables | 0.7090 | | BoolQ | 0.8284 | | AGI Eval SAT EN | 0.8252 | | Winogender MC (Female) | 0.6333 | | Winogender MC (Male) | 0.5833 | | Enterprise PII Classification | 0.8091 | | BBQ | 0.6420 | | GPQA Main | 0.2612 | | GPQA Diamond | 0.2172 | Note: All scores are presented as decimal values between 0 and 1, representing the proportion of correct answers or the model's performance on each task. ## Comparison Below are comparisions of this model with other models in the 7B regime. | Model | Params | Tokens | Open dataset? | CORE | MMLU | EXTENDED | |---------------|--------|--------|---------------|----------|----------|----------| | **Open weights, closed datasets** | | | | | | | | Llama2 | 7B | 2T | ❌ | 49.2 | 45.8 | 34.1 | | DeepSeek | 7B | 2T | ❌ | 50.7 | 48.5 | 35.3 | | Mistral-0.3 | 7B | ? | ❌ | 57.0 | 62.7 | 45.1 | | QWEN-2 | 7B | ? | ❌ | 57.5 | **71.9** | 50.5 | | Llama3 | 8B | 15T | ❌ | 57.6 | 66.2 | 46.3 | | Gemma | 8B | 6T | ❌ | 57.8 | 64.3 | 44.6 | | Phi-3 | 7B | ? | ❌ | **61.0** | 69.9 | **57.9** | | **Open weights, open datasets** | | | | | | | | Falcon | 7B | 1T | ✅ | 44.1 | 27.4 | 25.1 | | OLMo-1.7 | 7B | 2.1T | ✅ | 47.0 | 54.0 | 34.2 | | MAP-Neo | 7B | 4.5T | ✅ | **50.2** | **57.1** | **40.4** | | **DCLM-7B-8k** | 7B | 2.5T | ✅ | **57.1** | **63.7** | **45.4** | ## Limitations and Biases While DCLM-Baseline-7B demonstrates strong performance across a range of tasks, it's important to note: 1. The model may exhibit biases present in its training data, which is derived from web crawl data. 2. It has not undergone specific alignment or safety fine-tuning, so outputs should be used with caution. 3. Performance on tasks not included in the evaluation suite may vary. 4. The model's knowledge is limited to its training data cutoff date. ## Ethical Considerations Users should be aware that this model, like all large language models, can potentially generate harmful or biased content. It should not be used for making decisions about individuals or in sensitive applications without appropriate safeguards and human oversight. ## Citation If you use this model in your research, please cite: ``` @article{Li2024DataCompLM, title={DataComp-LM: In search of the next generation of training sets for language models}, author={Jeffrey Li and Alex Fang and Georgios Smyrnis and Maor Ivgi and Matt Jordan and Samir Gadre and Hritik Bansal and Etash Guha and Sedrick Keh and Kushal Arora and [... full author list]}, journal={arXiv preprint arXiv:2406.11794}, year={2024} } ```
[ "PUBMEDQA" ]
yishan-wang/snowflake-arctic-embed-m-v1.5-Q8_0-GGUF
yishan-wang
sentence-similarity
[ "sentence-transformers", "gguf", "feature-extraction", "sentence-similarity", "mteb", "arctic", "snowflake-arctic-embed", "transformers.js", "llama-cpp", "gguf-my-repo", "base_model:Snowflake/snowflake-arctic-embed-m-v1.5", "base_model:quantized:Snowflake/snowflake-arctic-embed-m-v1.5", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-08-13T07:03:18Z
2024-08-13T07:03:23+00:00
19
0
--- base_model: Snowflake/snowflake-arctic-embed-m-v1.5 license: apache-2.0 pipeline_tag: sentence-similarity tags: - sentence-transformers - feature-extraction - sentence-similarity - mteb - arctic - snowflake-arctic-embed - transformers.js - llama-cpp - gguf-my-repo model-index: - name: snowflake-arctic-embed-m-v1.5 results: - task: type: Retrieval dataset: name: MTEB ArguAna type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: main_score value: 59.53000000000001 - type: map_at_1 value: 34.282000000000004 - type: map_at_10 value: 50.613 - type: map_at_100 value: 51.269 - type: map_at_1000 value: 51.271 - type: map_at_20 value: 51.158 - type: map_at_3 value: 45.626 - type: map_at_5 value: 48.638 - type: mrr_at_1 value: 34.92176386913229 - type: mrr_at_10 value: 50.856081645555406 - type: mrr_at_100 value: 51.510739437069034 - type: mrr_at_1000 value: 51.51299498830165 - type: mrr_at_20 value: 51.39987941081724 - type: mrr_at_3 value: 45.993361782835514 - type: mrr_at_5 value: 48.88098624940742 - type: nauc_map_at_1000_diff1 value: 10.628675774160785 - type: nauc_map_at_1000_max value: -10.11742589992339 - type: nauc_map_at_1000_std value: -18.29277379812427 - type: nauc_map_at_100_diff1 value: 10.63250240035489 - type: nauc_map_at_100_max value: -10.112078786734363 - type: nauc_map_at_100_std value: -18.288524872706834 - type: nauc_map_at_10_diff1 value: 10.476494913081712 - type: nauc_map_at_10_max value: -9.890937746734037 - type: nauc_map_at_10_std value: -18.279750514750443 - type: nauc_map_at_1_diff1 value: 14.549204048461151 - type: nauc_map_at_1_max value: -12.230560087701225 - type: nauc_map_at_1_std value: -19.469903650130362 - type: nauc_map_at_20_diff1 value: 10.586564571825674 - type: nauc_map_at_20_max value: -10.00292720526217 - type: nauc_map_at_20_std value: -18.258077347878064 - type: nauc_map_at_3_diff1 value: 10.378663968090372 - type: nauc_map_at_3_max value: -10.458896171786185 - type: nauc_map_at_3_std value: -18.38852760333766 - type: nauc_map_at_5_diff1 value: 10.235960275925581 - type: nauc_map_at_5_max value: -10.239496080409058 - type: nauc_map_at_5_std value: -18.817023479445886 - type: nauc_mrr_at_1000_diff1 value: 8.718212649575722 - type: nauc_mrr_at_1000_max value: -10.81022794038691 - type: nauc_mrr_at_1000_std value: -17.87669499555167 - type: nauc_mrr_at_100_diff1 value: 8.722174171165133 - type: nauc_mrr_at_100_max value: -10.804840985713525 - type: nauc_mrr_at_100_std value: -17.872487099359986 - type: nauc_mrr_at_10_diff1 value: 8.609421635870238 - type: nauc_mrr_at_10_max value: -10.568644717548432 - type: nauc_mrr_at_10_std value: -17.872968762635814 - type: nauc_mrr_at_1_diff1 value: 12.69590006263834 - type: nauc_mrr_at_1_max value: -12.082056561238321 - type: nauc_mrr_at_1_std value: -18.036424092186657 - type: nauc_mrr_at_20_diff1 value: 8.684842497970315 - type: nauc_mrr_at_20_max value: -10.691578914627286 - type: nauc_mrr_at_20_std value: -17.84350301434992 - type: nauc_mrr_at_3_diff1 value: 8.649761557556763 - type: nauc_mrr_at_3_max value: -11.104694428047496 - type: nauc_mrr_at_3_std value: -18.149917948370344 - type: nauc_mrr_at_5_diff1 value: 8.433489750038396 - type: nauc_mrr_at_5_max value: -10.917772454397436 - type: nauc_mrr_at_5_std value: -18.4094211134111 - type: nauc_ndcg_at_1000_diff1 value: 10.19041067807956 - type: nauc_ndcg_at_1000_max value: -9.54328201605796 - type: nauc_ndcg_at_1000_std value: -17.824620427456633 - type: nauc_ndcg_at_100_diff1 value: 10.289491087585963 - type: nauc_ndcg_at_100_max value: -9.357214331420337 - type: nauc_ndcg_at_100_std value: -17.657600653632873 - type: nauc_ndcg_at_10_diff1 value: 9.435530877596092 - type: nauc_ndcg_at_10_max value: -8.182581635383546 - type: nauc_ndcg_at_10_std value: -17.603156479980388 - type: nauc_ndcg_at_1_diff1 value: 14.549204048461151 - type: nauc_ndcg_at_1_max value: -12.230560087701225 - type: nauc_ndcg_at_1_std value: -19.469903650130362 - type: nauc_ndcg_at_20_diff1 value: 9.885227087275197 - type: nauc_ndcg_at_20_max value: -8.52362662391439 - type: nauc_ndcg_at_20_std value: -17.441705436231764 - type: nauc_ndcg_at_3_diff1 value: 9.22542769998547 - type: nauc_ndcg_at_3_max value: -9.903590564219288 - type: nauc_ndcg_at_3_std value: -18.357220221111593 - type: nauc_ndcg_at_5_diff1 value: 8.8756720745828 - type: nauc_ndcg_at_5_max value: -9.269764943861245 - type: nauc_ndcg_at_5_std value: -19.009229433187784 - type: nauc_precision_at_1000_diff1 value: 3.733355117431035 - type: nauc_precision_at_1000_max value: 3.9603571352517393 - type: nauc_precision_at_1000_std value: 70.07345061131439 - type: nauc_precision_at_100_diff1 value: 29.019032142462457 - type: nauc_precision_at_100_max value: 40.75153328286103 - type: nauc_precision_at_100_std value: 62.634249549126594 - type: nauc_precision_at_10_diff1 value: 2.5762677254910353 - type: nauc_precision_at_10_max value: 6.096298633773051 - type: nauc_precision_at_10_std value: -11.507400451348587 - type: nauc_precision_at_1_diff1 value: 14.549204048461151 - type: nauc_precision_at_1_max value: -12.230560087701225 - type: nauc_precision_at_1_std value: -19.469903650130362 - type: nauc_precision_at_20_diff1 value: 1.715540124567996 - type: nauc_precision_at_20_max value: 21.53546453945913 - type: nauc_precision_at_20_std value: 1.537961142195571 - type: nauc_precision_at_3_diff1 value: 5.701850652555737 - type: nauc_precision_at_3_max value: -8.180345365085552 - type: nauc_precision_at_3_std value: -18.37033750502482 - type: nauc_precision_at_5_diff1 value: 3.6053552181042843 - type: nauc_precision_at_5_max value: -5.207647070615612 - type: nauc_precision_at_5_std value: -19.89491085427258 - type: nauc_recall_at_1000_diff1 value: 3.733355117431255 - type: nauc_recall_at_1000_max value: 3.9603571352482194 - type: nauc_recall_at_1000_std value: 70.07345061131205 - type: nauc_recall_at_100_diff1 value: 29.01903214246288 - type: nauc_recall_at_100_max value: 40.7515332828621 - type: nauc_recall_at_100_std value: 62.63424954912607 - type: nauc_recall_at_10_diff1 value: 2.5762677254911988 - type: nauc_recall_at_10_max value: 6.0962986337729905 - type: nauc_recall_at_10_std value: -11.507400451348577 - type: nauc_recall_at_1_diff1 value: 14.549204048461151 - type: nauc_recall_at_1_max value: -12.230560087701225 - type: nauc_recall_at_1_std value: -19.469903650130362 - type: nauc_recall_at_20_diff1 value: 1.7155401245682675 - type: nauc_recall_at_20_max value: 21.535464539459632 - type: nauc_recall_at_20_std value: 1.5379611421957025 - type: nauc_recall_at_3_diff1 value: 5.7018506525557875 - type: nauc_recall_at_3_max value: -8.180345365085538 - type: nauc_recall_at_3_std value: -18.370337505024796 - type: nauc_recall_at_5_diff1 value: 3.6053552181043913 - type: nauc_recall_at_5_max value: -5.207647070615579 - type: nauc_recall_at_5_std value: -19.894910854272492 - type: ndcg_at_1 value: 34.282000000000004 - type: ndcg_at_10 value: 59.53000000000001 - type: ndcg_at_100 value: 62.187000000000005 - type: ndcg_at_1000 value: 62.243 - type: ndcg_at_20 value: 61.451 - type: ndcg_at_3 value: 49.393 - type: ndcg_at_5 value: 54.771 - type: precision_at_1 value: 34.282000000000004 - type: precision_at_10 value: 8.791 - type: precision_at_100 value: 0.992 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 4.769 - type: precision_at_3 value: 20.104 - type: precision_at_5 value: 14.651 - type: recall_at_1 value: 34.282000000000004 - type: recall_at_10 value: 87.909 - type: recall_at_100 value: 99.21799999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_20 value: 95.377 - type: recall_at_3 value: 60.313 - type: recall_at_5 value: 73.257 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: mteb/cqadupstack-android config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: main_score value: 53.885000000000005 - type: map_at_1 value: 35.429 - type: map_at_10 value: 47.469 - type: map_at_100 value: 48.997 - type: map_at_1000 value: 49.117 - type: map_at_20 value: 48.324 - type: map_at_3 value: 43.835 - type: map_at_5 value: 46.043 - type: mrr_at_1 value: 43.34763948497854 - type: mrr_at_10 value: 53.258623430297234 - type: mrr_at_100 value: 53.99123884299005 - type: mrr_at_1000 value: 54.02458101713216 - type: mrr_at_20 value: 53.695964669618945 - type: mrr_at_3 value: 50.81068192656173 - type: mrr_at_5 value: 52.45588936576058 - type: nauc_map_at_1000_diff1 value: 51.55382824218782 - type: nauc_map_at_1000_max value: 31.855350695084606 - type: nauc_map_at_1000_std value: -5.465862008150992 - type: nauc_map_at_100_diff1 value: 51.55889312452534 - type: nauc_map_at_100_max value: 31.88429637207401 - type: nauc_map_at_100_std value: -5.40805152544196 - type: nauc_map_at_10_diff1 value: 51.6592677505875 - type: nauc_map_at_10_max value: 31.554425233617543 - type: nauc_map_at_10_std value: -6.125756131339046 - type: nauc_map_at_1_diff1 value: 55.6889617582672 - type: nauc_map_at_1_max value: 27.821166966868176 - type: nauc_map_at_1_std value: -5.778838498211728 - type: nauc_map_at_20_diff1 value: 51.70520970992564 - type: nauc_map_at_20_max value: 31.811676633900465 - type: nauc_map_at_20_std value: -5.463596751904718 - type: nauc_map_at_3_diff1 value: 53.206169626589606 - type: nauc_map_at_3_max value: 31.64373830824983 - type: nauc_map_at_3_std value: -6.054761451312827 - type: nauc_map_at_5_diff1 value: 52.37308971673694 - type: nauc_map_at_5_max value: 31.974302019633644 - type: nauc_map_at_5_std value: -6.302653399940531 - type: nauc_mrr_at_1000_diff1 value: 49.345152231490616 - type: nauc_mrr_at_1000_max value: 33.49789501712511 - type: nauc_mrr_at_1000_std value: -6.054730861163538 - type: nauc_mrr_at_100_diff1 value: 49.3387577601307 - type: nauc_mrr_at_100_max value: 33.48149992464187 - type: nauc_mrr_at_100_std value: -6.061177137579308 - type: nauc_mrr_at_10_diff1 value: 49.08312288449718 - type: nauc_mrr_at_10_max value: 33.470393322577465 - type: nauc_mrr_at_10_std value: -6.180286430216975 - type: nauc_mrr_at_1_diff1 value: 52.43364978537192 - type: nauc_mrr_at_1_max value: 31.521755633355713 - type: nauc_mrr_at_1_std value: -7.002499524130836 - type: nauc_mrr_at_20_diff1 value: 49.311059224991766 - type: nauc_mrr_at_20_max value: 33.538523037692144 - type: nauc_mrr_at_20_std value: -6.034619474981136 - type: nauc_mrr_at_3_diff1 value: 49.90489868439366 - type: nauc_mrr_at_3_max value: 34.400493912164606 - type: nauc_mrr_at_3_std value: -6.028875320994629 - type: nauc_mrr_at_5_diff1 value: 49.033661898983475 - type: nauc_mrr_at_5_max value: 33.732315350193936 - type: nauc_mrr_at_5_std value: -6.272548556330368 - type: nauc_ndcg_at_1000_diff1 value: 49.81681892539247 - type: nauc_ndcg_at_1000_max value: 33.06518006062093 - type: nauc_ndcg_at_1000_std value: -4.282105713014755 - type: nauc_ndcg_at_100_diff1 value: 49.42362108857786 - type: nauc_ndcg_at_100_max value: 32.92024325540483 - type: nauc_ndcg_at_100_std value: -3.7786765305496717 - type: nauc_ndcg_at_10_diff1 value: 48.83102435475594 - type: nauc_ndcg_at_10_max value: 31.898404563611958 - type: nauc_ndcg_at_10_std value: -6.2024003866707 - type: nauc_ndcg_at_1_diff1 value: 52.43364978537192 - type: nauc_ndcg_at_1_max value: 31.521755633355713 - type: nauc_ndcg_at_1_std value: -7.002499524130836 - type: nauc_ndcg_at_20_diff1 value: 49.466526454438316 - type: nauc_ndcg_at_20_max value: 32.424462698701674 - type: nauc_ndcg_at_20_std value: -4.520809563712905 - type: nauc_ndcg_at_3_diff1 value: 50.997884562583884 - type: nauc_ndcg_at_3_max value: 33.26787046916917 - type: nauc_ndcg_at_3_std value: -6.340699471083753 - type: nauc_ndcg_at_5_diff1 value: 49.68314458398097 - type: nauc_ndcg_at_5_max value: 32.80910071143984 - type: nauc_ndcg_at_5_std value: -6.734495576445887 - type: nauc_precision_at_1000_diff1 value: -24.18940012795299 - type: nauc_precision_at_1000_max value: -10.995343674356896 - type: nauc_precision_at_1000_std value: -8.298841004724856 - type: nauc_precision_at_100_diff1 value: -18.104939577865935 - type: nauc_precision_at_100_max value: -1.3757613100627637 - type: nauc_precision_at_100_std value: 0.07661922190466432 - type: nauc_precision_at_10_diff1 value: 3.9624459059275967 - type: nauc_precision_at_10_max value: 14.841561593450391 - type: nauc_precision_at_10_std value: -2.485374333613117 - type: nauc_precision_at_1_diff1 value: 52.43364978537192 - type: nauc_precision_at_1_max value: 31.521755633355713 - type: nauc_precision_at_1_std value: -7.002499524130836 - type: nauc_precision_at_20_diff1 value: -4.4791763436505265 - type: nauc_precision_at_20_max value: 9.157872836996276 - type: nauc_precision_at_20_std value: 2.086903518342088 - type: nauc_precision_at_3_diff1 value: 28.480888018235568 - type: nauc_precision_at_3_max value: 30.34526267718485 - type: nauc_precision_at_3_std value: -6.3006706923866025 - type: nauc_precision_at_5_diff1 value: 16.488039195453517 - type: nauc_precision_at_5_max value: 24.593477099241852 - type: nauc_precision_at_5_std value: -5.316448107840636 - type: nauc_recall_at_1000_diff1 value: 34.715187316533076 - type: nauc_recall_at_1000_max value: 58.2266544684947 - type: nauc_recall_at_1000_std value: 63.85237636398278 - type: nauc_recall_at_100_diff1 value: 36.08623826028132 - type: nauc_recall_at_100_max value: 33.05011429439473 - type: nauc_recall_at_100_std value: 16.559545021212564 - type: nauc_recall_at_10_diff1 value: 39.76738610714205 - type: nauc_recall_at_10_max value: 28.233045706945997 - type: nauc_recall_at_10_std value: -5.13243784043598 - type: nauc_recall_at_1_diff1 value: 55.6889617582672 - type: nauc_recall_at_1_max value: 27.821166966868176 - type: nauc_recall_at_1_std value: -5.778838498211728 - type: nauc_recall_at_20_diff1 value: 41.18682480073759 - type: nauc_recall_at_20_max value: 29.525993239296945 - type: nauc_recall_at_20_std value: 1.5003598438954298 - type: nauc_recall_at_3_diff1 value: 48.31879460301157 - type: nauc_recall_at_3_max value: 32.93751306970167 - type: nauc_recall_at_3_std value: -5.28070084211707 - type: nauc_recall_at_5_diff1 value: 44.327686388315435 - type: nauc_recall_at_5_max value: 32.04823486234599 - type: nauc_recall_at_5_std value: -6.4221525602778256 - type: ndcg_at_1 value: 43.348 - type: ndcg_at_10 value: 53.885000000000005 - type: ndcg_at_100 value: 59.204 - type: ndcg_at_1000 value: 60.744 - type: ndcg_at_20 value: 55.995 - type: ndcg_at_3 value: 49.112 - type: ndcg_at_5 value: 51.61900000000001 - type: precision_at_1 value: 43.348 - type: precision_at_10 value: 10.242999999999999 - type: precision_at_100 value: 1.6150000000000002 - type: precision_at_1000 value: 0.203 - type: precision_at_20 value: 6.066 - type: precision_at_3 value: 23.605 - type: precision_at_5 value: 17.024 - type: recall_at_1 value: 35.429 - type: recall_at_10 value: 65.77199999999999 - type: recall_at_100 value: 87.89 - type: recall_at_1000 value: 97.13000000000001 - type: recall_at_20 value: 73.299 - type: recall_at_3 value: 52.034000000000006 - type: recall_at_5 value: 58.96 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval type: mteb/cqadupstack-english config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: main_score value: 49.55 - type: map_at_1 value: 31.684 - type: map_at_10 value: 43.258 - type: map_at_100 value: 44.628 - type: map_at_1000 value: 44.761 - type: map_at_20 value: 44.015 - type: map_at_3 value: 39.778000000000006 - type: map_at_5 value: 41.643 - type: mrr_at_1 value: 39.87261146496815 - type: mrr_at_10 value: 49.31978566373469 - type: mrr_at_100 value: 49.94922739445482 - type: mrr_at_1000 value: 49.990325601254106 - type: mrr_at_20 value: 49.70597468576704 - type: mrr_at_3 value: 47.070063694267546 - type: mrr_at_5 value: 48.23248407643316 - type: nauc_map_at_1000_diff1 value: 53.44044712371752 - type: nauc_map_at_1000_max value: 34.5651440062204 - type: nauc_map_at_1000_std value: -0.9814384609230475 - type: nauc_map_at_100_diff1 value: 53.429004435388464 - type: nauc_map_at_100_max value: 34.52038957273436 - type: nauc_map_at_100_std value: -1.1021936362699805 - type: nauc_map_at_10_diff1 value: 53.879128574022005 - type: nauc_map_at_10_max value: 33.74771524140917 - type: nauc_map_at_10_std value: -2.945132777205236 - type: nauc_map_at_1_diff1 value: 60.25159799695403 - type: nauc_map_at_1_max value: 26.843892985235808 - type: nauc_map_at_1_std value: -9.618702739509093 - type: nauc_map_at_20_diff1 value: 53.56789898225283 - type: nauc_map_at_20_max value: 34.11628845872402 - type: nauc_map_at_20_std value: -2.024376635870884 - type: nauc_map_at_3_diff1 value: 54.45882099014072 - type: nauc_map_at_3_max value: 31.29495446507793 - type: nauc_map_at_3_std value: -6.391948228781555 - type: nauc_map_at_5_diff1 value: 54.20536489050697 - type: nauc_map_at_5_max value: 32.31001487256826 - type: nauc_map_at_5_std value: -5.050953263346934 - type: nauc_mrr_at_1000_diff1 value: 50.835858995999125 - type: nauc_mrr_at_1000_max value: 38.20717381701079 - type: nauc_mrr_at_1000_std value: 4.174163368228787 - type: nauc_mrr_at_100_diff1 value: 50.827072441041224 - type: nauc_mrr_at_100_max value: 38.21077622034756 - type: nauc_mrr_at_100_std value: 4.1951082737013365 - type: nauc_mrr_at_10_diff1 value: 50.90578491570948 - type: nauc_mrr_at_10_max value: 38.19229691746408 - type: nauc_mrr_at_10_std value: 3.8290750066335546 - type: nauc_mrr_at_1_diff1 value: 54.807021746871186 - type: nauc_mrr_at_1_max value: 37.09225642043841 - type: nauc_mrr_at_1_std value: 0.5654547513131355 - type: nauc_mrr_at_20_diff1 value: 50.86247832095378 - type: nauc_mrr_at_20_max value: 38.19277867384178 - type: nauc_mrr_at_20_std value: 4.098932316791841 - type: nauc_mrr_at_3_diff1 value: 50.788934370903036 - type: nauc_mrr_at_3_max value: 37.72130561895659 - type: nauc_mrr_at_3_std value: 2.7339370381517583 - type: nauc_mrr_at_5_diff1 value: 50.72543792525547 - type: nauc_mrr_at_5_max value: 37.57740908475375 - type: nauc_mrr_at_5_std value: 2.742881431085094 - type: nauc_ndcg_at_1000_diff1 value: 50.89692885407576 - type: nauc_ndcg_at_1000_max value: 37.250583054716955 - type: nauc_ndcg_at_1000_std value: 5.552279826578831 - type: nauc_ndcg_at_100_diff1 value: 50.624606875496944 - type: nauc_ndcg_at_100_max value: 37.1024514234627 - type: nauc_ndcg_at_100_std value: 5.495892760032762 - type: nauc_ndcg_at_10_diff1 value: 51.910387255793445 - type: nauc_ndcg_at_10_max value: 36.71168418905039 - type: nauc_ndcg_at_10_std value: 2.3064115117905217 - type: nauc_ndcg_at_1_diff1 value: 54.807021746871186 - type: nauc_ndcg_at_1_max value: 37.09225642043841 - type: nauc_ndcg_at_1_std value: 0.5654547513131355 - type: nauc_ndcg_at_20_diff1 value: 51.43416588546778 - type: nauc_ndcg_at_20_max value: 36.76387180172346 - type: nauc_ndcg_at_20_std value: 3.7012798827049718 - type: nauc_ndcg_at_3_diff1 value: 50.91198494475423 - type: nauc_ndcg_at_3_max value: 34.92770670756687 - type: nauc_ndcg_at_3_std value: -0.9071486759887368 - type: nauc_ndcg_at_5_diff1 value: 51.63559468683886 - type: nauc_ndcg_at_5_max value: 34.86849679864564 - type: nauc_ndcg_at_5_std value: -0.734837221224976 - type: nauc_precision_at_1000_diff1 value: -13.43645457127175 - type: nauc_precision_at_1000_max value: 12.71162105198664 - type: nauc_precision_at_1000_std value: 33.175399007040255 - type: nauc_precision_at_100_diff1 value: -8.549834785105412 - type: nauc_precision_at_100_max value: 22.47383497331883 - type: nauc_precision_at_100_std value: 39.09108761430844 - type: nauc_precision_at_10_diff1 value: 7.556572451100043 - type: nauc_precision_at_10_max value: 35.35285122987575 - type: nauc_precision_at_10_std value: 29.417466305615967 - type: nauc_precision_at_1_diff1 value: 54.807021746871186 - type: nauc_precision_at_1_max value: 37.09225642043841 - type: nauc_precision_at_1_std value: 0.5654547513131355 - type: nauc_precision_at_20_diff1 value: -0.550158641635712 - type: nauc_precision_at_20_max value: 29.9068430006187 - type: nauc_precision_at_20_std value: 33.920603132821185 - type: nauc_precision_at_3_diff1 value: 25.551264664276687 - type: nauc_precision_at_3_max value: 37.59463225854679 - type: nauc_precision_at_3_std value: 13.707295021359043 - type: nauc_precision_at_5_diff1 value: 17.76136129817151 - type: nauc_precision_at_5_max value: 35.85363807255972 - type: nauc_precision_at_5_std value: 19.48470876841111 - type: nauc_recall_at_1000_diff1 value: 37.1593620123866 - type: nauc_recall_at_1000_max value: 46.29322536951135 - type: nauc_recall_at_1000_std value: 51.47312657083967 - type: nauc_recall_at_100_diff1 value: 37.7542224949536 - type: nauc_recall_at_100_max value: 38.84120637703135 - type: nauc_recall_at_100_std value: 28.839672572221925 - type: nauc_recall_at_10_diff1 value: 46.24130302658384 - type: nauc_recall_at_10_max value: 35.89001724712849 - type: nauc_recall_at_10_std value: 6.985137790828618 - type: nauc_recall_at_1_diff1 value: 60.25159799695403 - type: nauc_recall_at_1_max value: 26.843892985235808 - type: nauc_recall_at_1_std value: -9.618702739509093 - type: nauc_recall_at_20_diff1 value: 43.63576680886187 - type: nauc_recall_at_20_max value: 36.79079644708101 - type: nauc_recall_at_20_std value: 13.81561928605839 - type: nauc_recall_at_3_diff1 value: 48.2299322140522 - type: nauc_recall_at_3_max value: 30.038088484376203 - type: nauc_recall_at_3_std value: -4.871116183843762 - type: nauc_recall_at_5_diff1 value: 47.22331872695983 - type: nauc_recall_at_5_max value: 30.398541477173136 - type: nauc_recall_at_5_std value: -3.2038541888528957 - type: ndcg_at_1 value: 39.873 - type: ndcg_at_10 value: 49.55 - type: ndcg_at_100 value: 53.809 - type: ndcg_at_1000 value: 55.767999999999994 - type: ndcg_at_20 value: 51.275999999999996 - type: ndcg_at_3 value: 44.91 - type: ndcg_at_5 value: 46.855999999999995 - type: precision_at_1 value: 39.873 - type: precision_at_10 value: 9.65 - type: precision_at_100 value: 1.522 - type: precision_at_1000 value: 0.196 - type: precision_at_20 value: 5.701 - type: precision_at_3 value: 22.166 - type: precision_at_5 value: 15.643 - type: recall_at_1 value: 31.684 - type: recall_at_10 value: 60.69 - type: recall_at_100 value: 78.521 - type: recall_at_1000 value: 91.02900000000001 - type: recall_at_20 value: 66.973 - type: recall_at_3 value: 46.807 - type: recall_at_5 value: 52.402 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval type: mteb/cqadupstack-gaming config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: main_score value: 62.686 - type: map_at_1 value: 43.856 - type: map_at_10 value: 57.056 - type: map_at_100 value: 58.048 - type: map_at_1000 value: 58.092 - type: map_at_20 value: 57.684000000000005 - type: map_at_3 value: 53.958 - type: map_at_5 value: 55.80500000000001 - type: mrr_at_1 value: 50.03134796238244 - type: mrr_at_10 value: 60.31022043091019 - type: mrr_at_100 value: 60.91892338857461 - type: mrr_at_1000 value: 60.93770463536649 - type: mrr_at_20 value: 60.705642387392736 - type: mrr_at_3 value: 58.286311389759746 - type: mrr_at_5 value: 59.49320794148393 - type: nauc_map_at_1000_diff1 value: 54.849140197256695 - type: nauc_map_at_1000_max value: 38.978448968260224 - type: nauc_map_at_1000_std value: 0.4955439383268162 - type: nauc_map_at_100_diff1 value: 54.824334747823364 - type: nauc_map_at_100_max value: 38.959443109450994 - type: nauc_map_at_100_std value: 0.49626092018886037 - type: nauc_map_at_10_diff1 value: 54.778189277103394 - type: nauc_map_at_10_max value: 38.20972191654546 - type: nauc_map_at_10_std value: -0.7239823837455759 - type: nauc_map_at_1_diff1 value: 58.74017164752485 - type: nauc_map_at_1_max value: 31.528974862589585 - type: nauc_map_at_1_std value: -3.273824691929492 - type: nauc_map_at_20_diff1 value: 54.78943693416187 - type: nauc_map_at_20_max value: 38.77930316443076 - type: nauc_map_at_20_std value: 0.25607460088355544 - type: nauc_map_at_3_diff1 value: 55.68313410225767 - type: nauc_map_at_3_max value: 36.22847284104399 - type: nauc_map_at_3_std value: -3.010979639100503 - type: nauc_map_at_5_diff1 value: 55.11385094420661 - type: nauc_map_at_5_max value: 37.319681045490924 - type: nauc_map_at_5_std value: -2.156640733221061 - type: nauc_mrr_at_1000_diff1 value: 54.504759468380705 - type: nauc_mrr_at_1000_max value: 40.58849492650406 - type: nauc_mrr_at_1000_std value: 1.8226622175866118 - type: nauc_mrr_at_100_diff1 value: 54.4918034449886 - type: nauc_mrr_at_100_max value: 40.59202728933427 - type: nauc_mrr_at_100_std value: 1.8276428096536335 - type: nauc_mrr_at_10_diff1 value: 54.33603399493329 - type: nauc_mrr_at_10_max value: 40.58896878978089 - type: nauc_mrr_at_10_std value: 1.5733340909114375 - type: nauc_mrr_at_1_diff1 value: 58.062410036466105 - type: nauc_mrr_at_1_max value: 37.660958859966506 - type: nauc_mrr_at_1_std value: 0.029007600674170648 - type: nauc_mrr_at_20_diff1 value: 54.43793386924358 - type: nauc_mrr_at_20_max value: 40.66773423875307 - type: nauc_mrr_at_20_std value: 1.891967891797154 - type: nauc_mrr_at_3_diff1 value: 54.77901284537966 - type: nauc_mrr_at_3_max value: 40.182219821206964 - type: nauc_mrr_at_3_std value: 0.8911935034597871 - type: nauc_mrr_at_5_diff1 value: 54.466068837163675 - type: nauc_mrr_at_5_max value: 40.334996916684126 - type: nauc_mrr_at_5_std value: 0.9460830492892364 - type: nauc_ndcg_at_1000_diff1 value: 53.8465376860938 - type: nauc_ndcg_at_1000_max value: 41.63158111016696 - type: nauc_ndcg_at_1000_std value: 3.864205884257578 - type: nauc_ndcg_at_100_diff1 value: 53.4025864436944 - type: nauc_ndcg_at_100_max value: 41.805453995307914 - type: nauc_ndcg_at_100_std value: 4.36777557904857 - type: nauc_ndcg_at_10_diff1 value: 52.96034987157544 - type: nauc_ndcg_at_10_max value: 40.7601173480795 - type: nauc_ndcg_at_10_std value: 1.905824035879141 - type: nauc_ndcg_at_1_diff1 value: 58.062410036466105 - type: nauc_ndcg_at_1_max value: 37.660958859966506 - type: nauc_ndcg_at_1_std value: 0.029007600674170648 - type: nauc_ndcg_at_20_diff1 value: 53.2834771889242 - type: nauc_ndcg_at_20_max value: 41.713541932946406 - type: nauc_ndcg_at_20_std value: 3.865102828793311 - type: nauc_ndcg_at_3_diff1 value: 54.03389464372289 - type: nauc_ndcg_at_3_max value: 38.41449914649933 - type: nauc_ndcg_at_3_std value: -0.886276189886313 - type: nauc_ndcg_at_5_diff1 value: 53.456413320299 - type: nauc_ndcg_at_5_max value: 39.49048882649335 - type: nauc_ndcg_at_5_std value: -0.42692690160443814 - type: nauc_precision_at_1000_diff1 value: -14.770791653274824 - type: nauc_precision_at_1000_max value: 21.479874538905246 - type: nauc_precision_at_1000_std value: 28.607024261300207 - type: nauc_precision_at_100_diff1 value: -12.189696449878126 - type: nauc_precision_at_100_max value: 26.69785787492456 - type: nauc_precision_at_100_std value: 33.59098307467553 - type: nauc_precision_at_10_diff1 value: 6.922968330978399 - type: nauc_precision_at_10_max value: 34.52138344123087 - type: nauc_precision_at_10_std value: 21.768427637079952 - type: nauc_precision_at_1_diff1 value: 58.062410036466105 - type: nauc_precision_at_1_max value: 37.660958859966506 - type: nauc_precision_at_1_std value: 0.029007600674170648 - type: nauc_precision_at_20_diff1 value: -0.6837867902179278 - type: nauc_precision_at_20_max value: 33.98683709011133 - type: nauc_precision_at_20_std value: 30.8845561918902 - type: nauc_precision_at_3_diff1 value: 28.195043041120847 - type: nauc_precision_at_3_max value: 37.659916094938836 - type: nauc_precision_at_3_std value: 7.226520146634867 - type: nauc_precision_at_5_diff1 value: 16.633667288096245 - type: nauc_precision_at_5_max value: 34.90176597404891 - type: nauc_precision_at_5_std value: 12.421585442334088 - type: nauc_recall_at_1000_diff1 value: 45.20743732415397 - type: nauc_recall_at_1000_max value: 72.77115913579242 - type: nauc_recall_at_1000_std value: 70.48328496679083 - type: nauc_recall_at_100_diff1 value: 38.56282680810794 - type: nauc_recall_at_100_max value: 55.46797683321103 - type: nauc_recall_at_100_std value: 36.878791151929136 - type: nauc_recall_at_10_diff1 value: 44.18252051452362 - type: nauc_recall_at_10_max value: 43.33391810040086 - type: nauc_recall_at_10_std value: 6.663378192277723 - type: nauc_recall_at_1_diff1 value: 58.74017164752485 - type: nauc_recall_at_1_max value: 31.528974862589585 - type: nauc_recall_at_1_std value: -3.273824691929492 - type: nauc_recall_at_20_diff1 value: 44.19944231642417 - type: nauc_recall_at_20_max value: 49.401101483915866 - type: nauc_recall_at_20_std value: 18.97803841673839 - type: nauc_recall_at_3_diff1 value: 49.56378985428704 - type: nauc_recall_at_3_max value: 36.434210616870224 - type: nauc_recall_at_3_std value: -2.850559971607616 - type: nauc_recall_at_5_diff1 value: 47.37107217086109 - type: nauc_recall_at_5_max value: 39.0236745509895 - type: nauc_recall_at_5_std value: -1.7402454457937195 - type: ndcg_at_1 value: 50.031000000000006 - type: ndcg_at_10 value: 62.686 - type: ndcg_at_100 value: 66.403 - type: ndcg_at_1000 value: 67.241 - type: ndcg_at_20 value: 64.37899999999999 - type: ndcg_at_3 value: 57.859 - type: ndcg_at_5 value: 60.375 - type: precision_at_1 value: 50.031000000000006 - type: precision_at_10 value: 9.856 - type: precision_at_100 value: 1.266 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_20 value: 5.489 - type: precision_at_3 value: 25.746999999999996 - type: precision_at_5 value: 17.492 - type: recall_at_1 value: 43.856 - type: recall_at_10 value: 75.824 - type: recall_at_100 value: 91.622 - type: recall_at_1000 value: 97.538 - type: recall_at_20 value: 81.951 - type: recall_at_3 value: 63.016000000000005 - type: recall_at_5 value: 69.18299999999999 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval type: mteb/cqadupstack-gis config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: main_score value: 43.983 - type: map_at_1 value: 28.942 - type: map_at_10 value: 38.621 - type: map_at_100 value: 39.7 - type: map_at_1000 value: 39.766 - type: map_at_20 value: 39.262 - type: map_at_3 value: 35.719 - type: map_at_5 value: 37.378 - type: mrr_at_1 value: 31.29943502824859 - type: mrr_at_10 value: 40.76463994260603 - type: mrr_at_100 value: 41.67073617629083 - type: mrr_at_1000 value: 41.717446259457105 - type: mrr_at_20 value: 41.32577374689195 - type: mrr_at_3 value: 37.984934086628996 - type: mrr_at_5 value: 39.64595103578152 - type: nauc_map_at_1000_diff1 value: 43.64461679688985 - type: nauc_map_at_1000_max value: 31.53717883948204 - type: nauc_map_at_1000_std value: 1.193745788248017 - type: nauc_map_at_100_diff1 value: 43.63847825079489 - type: nauc_map_at_100_max value: 31.536602619279165 - type: nauc_map_at_100_std value: 1.2001240243342401 - type: nauc_map_at_10_diff1 value: 43.845991987142014 - type: nauc_map_at_10_max value: 31.27509937344113 - type: nauc_map_at_10_std value: 0.7327934840520994 - type: nauc_map_at_1_diff1 value: 50.62269273984579 - type: nauc_map_at_1_max value: 30.16325757909521 - type: nauc_map_at_1_std value: -0.6398875136233392 - type: nauc_map_at_20_diff1 value: 43.630758403790914 - type: nauc_map_at_20_max value: 31.408258098047703 - type: nauc_map_at_20_std value: 1.12616034652217 - type: nauc_map_at_3_diff1 value: 44.823493567359456 - type: nauc_map_at_3_max value: 31.075886347614496 - type: nauc_map_at_3_std value: -0.25126874515735426 - type: nauc_map_at_5_diff1 value: 43.79768853087658 - type: nauc_map_at_5_max value: 31.091080995725324 - type: nauc_map_at_5_std value: 0.16440771782544047 - type: nauc_mrr_at_1000_diff1 value: 42.7865400752329 - type: nauc_mrr_at_1000_max value: 32.84731670326893 - type: nauc_mrr_at_1000_std value: 2.6067637582013825 - type: nauc_mrr_at_100_diff1 value: 42.771741548331065 - type: nauc_mrr_at_100_max value: 32.85324232845987 - type: nauc_mrr_at_100_std value: 2.6092786694308376 - type: nauc_mrr_at_10_diff1 value: 42.82969738870672 - type: nauc_mrr_at_10_max value: 32.69407549631432 - type: nauc_mrr_at_10_std value: 2.302903910016054 - type: nauc_mrr_at_1_diff1 value: 49.05638333657571 - type: nauc_mrr_at_1_max value: 33.12030717171514 - type: nauc_mrr_at_1_std value: 1.3278035087690774 - type: nauc_mrr_at_20_diff1 value: 42.74267239536286 - type: nauc_mrr_at_20_max value: 32.78571108973092 - type: nauc_mrr_at_20_std value: 2.5932669908758643 - type: nauc_mrr_at_3_diff1 value: 43.69963426089187 - type: nauc_mrr_at_3_max value: 32.78193126956233 - type: nauc_mrr_at_3_std value: 1.634874463134699 - type: nauc_mrr_at_5_diff1 value: 42.838630647832524 - type: nauc_mrr_at_5_max value: 32.459318735260545 - type: nauc_mrr_at_5_std value: 1.9412518283209172 - type: nauc_ndcg_at_1000_diff1 value: 41.01253839851583 - type: nauc_ndcg_at_1000_max value: 32.69570568894237 - type: nauc_ndcg_at_1000_std value: 3.4254737113410343 - type: nauc_ndcg_at_100_diff1 value: 40.62589243745832 - type: nauc_ndcg_at_100_max value: 32.664990655736126 - type: nauc_ndcg_at_100_std value: 3.799569445326048 - type: nauc_ndcg_at_10_diff1 value: 41.31658753735306 - type: nauc_ndcg_at_10_max value: 31.511946320339295 - type: nauc_ndcg_at_10_std value: 2.0492930500796662 - type: nauc_ndcg_at_1_diff1 value: 49.05638333657571 - type: nauc_ndcg_at_1_max value: 33.12030717171514 - type: nauc_ndcg_at_1_std value: 1.3278035087690774 - type: nauc_ndcg_at_20_diff1 value: 40.66188223212841 - type: nauc_ndcg_at_20_max value: 31.926240431497476 - type: nauc_ndcg_at_20_std value: 3.370398664595343 - type: nauc_ndcg_at_3_diff1 value: 43.035580180241 - type: nauc_ndcg_at_3_max value: 31.363874129878404 - type: nauc_ndcg_at_3_std value: 0.1422507242819929 - type: nauc_ndcg_at_5_diff1 value: 41.29049003955878 - type: nauc_ndcg_at_5_max value: 31.112034994977737 - type: nauc_ndcg_at_5_std value: 0.860179279828966 - type: nauc_precision_at_1000_diff1 value: -12.41854465881981 - type: nauc_precision_at_1000_max value: 14.706779246590548 - type: nauc_precision_at_1000_std value: 9.812804367375206 - type: nauc_precision_at_100_diff1 value: 2.797520107808461 - type: nauc_precision_at_100_max value: 24.335873541811406 - type: nauc_precision_at_100_std value: 12.87186398750545 - type: nauc_precision_at_10_diff1 value: 24.530962799265847 - type: nauc_precision_at_10_max value: 31.00772010798733 - type: nauc_precision_at_10_std value: 6.696733001548185 - type: nauc_precision_at_1_diff1 value: 49.05638333657571 - type: nauc_precision_at_1_max value: 33.12030717171514 - type: nauc_precision_at_1_std value: 1.3278035087690774 - type: nauc_precision_at_20_diff1 value: 16.25028416351204 - type: nauc_precision_at_20_max value: 29.629326492027342 - type: nauc_precision_at_20_std value: 11.085888573121679 - type: nauc_precision_at_3_diff1 value: 33.923667689694256 - type: nauc_precision_at_3_max value: 33.5859782361996 - type: nauc_precision_at_3_std value: 1.9468331086918693 - type: nauc_precision_at_5_diff1 value: 27.917827233088875 - type: nauc_precision_at_5_max value: 33.13290043423535 - type: nauc_precision_at_5_std value: 3.800870695945311 - type: nauc_recall_at_1000_diff1 value: 9.680283388428789 - type: nauc_recall_at_1000_max value: 49.479399284871235 - type: nauc_recall_at_1000_std value: 31.506985071436088 - type: nauc_recall_at_100_diff1 value: 23.607673377885448 - type: nauc_recall_at_100_max value: 36.637750366403935 - type: nauc_recall_at_100_std value: 18.30770690564224 - type: nauc_recall_at_10_diff1 value: 33.199683418312446 - type: nauc_recall_at_10_max value: 29.63115497012312 - type: nauc_recall_at_10_std value: 4.813200391480566 - type: nauc_recall_at_1_diff1 value: 50.62269273984579 - type: nauc_recall_at_1_max value: 30.16325757909521 - type: nauc_recall_at_1_std value: -0.6398875136233392 - type: nauc_recall_at_20_diff1 value: 29.16488387844995 - type: nauc_recall_at_20_max value: 30.788019479459 - type: nauc_recall_at_20_std value: 11.031953917298853 - type: nauc_recall_at_3_diff1 value: 38.215351600417065 - type: nauc_recall_at_3_max value: 29.619887154236128 - type: nauc_recall_at_3_std value: -0.13237298980339363 - type: nauc_recall_at_5_diff1 value: 33.93788042633265 - type: nauc_recall_at_5_max value: 28.67185092656741 - type: nauc_recall_at_5_std value: 1.316700201091445 - type: ndcg_at_1 value: 31.299 - type: ndcg_at_10 value: 43.983 - type: ndcg_at_100 value: 48.992999999999995 - type: ndcg_at_1000 value: 50.757 - type: ndcg_at_20 value: 46.152 - type: ndcg_at_3 value: 38.367000000000004 - type: ndcg_at_5 value: 41.171 - type: precision_at_1 value: 31.299 - type: precision_at_10 value: 6.734 - type: precision_at_100 value: 0.972 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_20 value: 3.898 - type: precision_at_3 value: 16.121 - type: precision_at_5 value: 11.344999999999999 - type: recall_at_1 value: 28.942 - type: recall_at_10 value: 58.343999999999994 - type: recall_at_100 value: 80.82300000000001 - type: recall_at_1000 value: 94.348 - type: recall_at_20 value: 66.449 - type: recall_at_3 value: 43.415 - type: recall_at_5 value: 50.007999999999996 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval type: mteb/cqadupstack-mathematica config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: main_score value: 33.144 - type: map_at_1 value: 19.41 - type: map_at_10 value: 27.802 - type: map_at_100 value: 29.157 - type: map_at_1000 value: 29.274 - type: map_at_20 value: 28.549000000000003 - type: map_at_3 value: 25.052999999999997 - type: map_at_5 value: 26.521 - type: mrr_at_1 value: 23.756218905472636 - type: mrr_at_10 value: 32.3623450209271 - type: mrr_at_100 value: 33.3648208444617 - type: mrr_at_1000 value: 33.427688215162185 - type: mrr_at_20 value: 32.93723485575758 - type: mrr_at_3 value: 29.539800995024883 - type: mrr_at_5 value: 31.156716417910452 - type: nauc_map_at_1000_diff1 value: 36.196391248081284 - type: nauc_map_at_1000_max value: 25.650644367091495 - type: nauc_map_at_1000_std value: 6.130340697729844 - type: nauc_map_at_100_diff1 value: 36.138890642411376 - type: nauc_map_at_100_max value: 25.587124763888518 - type: nauc_map_at_100_std value: 6.129336379055536 - type: nauc_map_at_10_diff1 value: 36.254426743566775 - type: nauc_map_at_10_max value: 25.465599906543034 - type: nauc_map_at_10_std value: 5.880280378112879 - type: nauc_map_at_1_diff1 value: 42.890551563179976 - type: nauc_map_at_1_max value: 25.813805281076956 - type: nauc_map_at_1_std value: 5.150718386163028 - type: nauc_map_at_20_diff1 value: 35.98551587974314 - type: nauc_map_at_20_max value: 25.501540521726636 - type: nauc_map_at_20_std value: 5.858703157458749 - type: nauc_map_at_3_diff1 value: 37.646558039577734 - type: nauc_map_at_3_max value: 26.138491471124247 - type: nauc_map_at_3_std value: 6.0487505175540734 - type: nauc_map_at_5_diff1 value: 36.817582976153695 - type: nauc_map_at_5_max value: 25.398200211121146 - type: nauc_map_at_5_std value: 6.31126763919522 - type: nauc_mrr_at_1000_diff1 value: 37.313544952847835 - type: nauc_mrr_at_1000_max value: 26.96218532078988 - type: nauc_mrr_at_1000_std value: 6.814359224654042 - type: nauc_mrr_at_100_diff1 value: 37.28104407653679 - type: nauc_mrr_at_100_max value: 26.931243040477256 - type: nauc_mrr_at_100_std value: 6.800500150841733 - type: nauc_mrr_at_10_diff1 value: 37.315832621275895 - type: nauc_mrr_at_10_max value: 26.941454225978372 - type: nauc_mrr_at_10_std value: 6.837046527796884 - type: nauc_mrr_at_1_diff1 value: 43.19904188582958 - type: nauc_mrr_at_1_max value: 26.975620445904795 - type: nauc_mrr_at_1_std value: 4.52071008581395 - type: nauc_mrr_at_20_diff1 value: 37.2200524790774 - type: nauc_mrr_at_20_max value: 26.971494160765847 - type: nauc_mrr_at_20_std value: 6.716431228783282 - type: nauc_mrr_at_3_diff1 value: 38.46236387340654 - type: nauc_mrr_at_3_max value: 27.846812992192056 - type: nauc_mrr_at_3_std value: 6.550711872569794 - type: nauc_mrr_at_5_diff1 value: 37.620346007658476 - type: nauc_mrr_at_5_max value: 27.031025952102038 - type: nauc_mrr_at_5_std value: 7.32343760231163 - type: nauc_ndcg_at_1000_diff1 value: 34.95081314840592 - type: nauc_ndcg_at_1000_max value: 26.89265465124325 - type: nauc_ndcg_at_1000_std value: 7.854154466831975 - type: nauc_ndcg_at_100_diff1 value: 34.01417812563093 - type: nauc_ndcg_at_100_max value: 25.792737746436835 - type: nauc_ndcg_at_100_std value: 7.726584165493833 - type: nauc_ndcg_at_10_diff1 value: 33.895122516474466 - type: nauc_ndcg_at_10_max value: 25.388442204589612 - type: nauc_ndcg_at_10_std value: 6.359560223645991 - type: nauc_ndcg_at_1_diff1 value: 43.19904188582958 - type: nauc_ndcg_at_1_max value: 26.975620445904795 - type: nauc_ndcg_at_1_std value: 4.52071008581395 - type: nauc_ndcg_at_20_diff1 value: 33.36078689830245 - type: nauc_ndcg_at_20_max value: 25.531794610571563 - type: nauc_ndcg_at_20_std value: 6.136658608653248 - type: nauc_ndcg_at_3_diff1 value: 36.44505602530781 - type: nauc_ndcg_at_3_max value: 26.9104071983157 - type: nauc_ndcg_at_3_std value: 6.427178520371878 - type: nauc_ndcg_at_5_diff1 value: 35.01384323197442 - type: nauc_ndcg_at_5_max value: 25.5560447088692 - type: nauc_ndcg_at_5_std value: 7.3676236760360485 - type: nauc_precision_at_1000_diff1 value: 2.8903331041804514 - type: nauc_precision_at_1000_max value: 4.059662742366004 - type: nauc_precision_at_1000_std value: -1.5891687644008334 - type: nauc_precision_at_100_diff1 value: 8.437726471693766 - type: nauc_precision_at_100_max value: 11.250588557568427 - type: nauc_precision_at_100_std value: 4.231571164627862 - type: nauc_precision_at_10_diff1 value: 19.57085237210294 - type: nauc_precision_at_10_max value: 20.973093492003905 - type: nauc_precision_at_10_std value: 3.197416248152466 - type: nauc_precision_at_1_diff1 value: 43.19904188582958 - type: nauc_precision_at_1_max value: 26.975620445904795 - type: nauc_precision_at_1_std value: 4.52071008581395 - type: nauc_precision_at_20_diff1 value: 15.67136554192724 - type: nauc_precision_at_20_max value: 17.706882621057858 - type: nauc_precision_at_20_std value: 1.9363472182867714 - type: nauc_precision_at_3_diff1 value: 30.38035695042325 - type: nauc_precision_at_3_max value: 26.48218693244094 - type: nauc_precision_at_3_std value: 6.424657705785632 - type: nauc_precision_at_5_diff1 value: 25.272543315171458 - type: nauc_precision_at_5_max value: 22.32441421311652 - type: nauc_precision_at_5_std value: 7.4912569081905716 - type: nauc_recall_at_1000_diff1 value: 25.5748044137675 - type: nauc_recall_at_1000_max value: 43.85796585370269 - type: nauc_recall_at_1000_std value: 30.0338086596789 - type: nauc_recall_at_100_diff1 value: 22.577080638885093 - type: nauc_recall_at_100_max value: 23.224511700617477 - type: nauc_recall_at_100_std value: 15.187963852289313 - type: nauc_recall_at_10_diff1 value: 25.058592299355908 - type: nauc_recall_at_10_max value: 22.24448483279841 - type: nauc_recall_at_10_std value: 6.3179089740052765 - type: nauc_recall_at_1_diff1 value: 42.890551563179976 - type: nauc_recall_at_1_max value: 25.813805281076956 - type: nauc_recall_at_1_std value: 5.150718386163028 - type: nauc_recall_at_20_diff1 value: 22.433865123187307 - type: nauc_recall_at_20_max value: 22.739695641511762 - type: nauc_recall_at_20_std value: 5.362005125538497 - type: nauc_recall_at_3_diff1 value: 32.17919168998616 - type: nauc_recall_at_3_max value: 26.044028436867357 - type: nauc_recall_at_3_std value: 7.420349884006329 - type: nauc_recall_at_5_diff1 value: 28.967104573649138 - type: nauc_recall_at_5_max value: 23.40865848168201 - type: nauc_recall_at_5_std value: 9.174406147723621 - type: ndcg_at_1 value: 23.756 - type: ndcg_at_10 value: 33.144 - type: ndcg_at_100 value: 39.261 - type: ndcg_at_1000 value: 41.881 - type: ndcg_at_20 value: 35.56 - type: ndcg_at_3 value: 27.927999999999997 - type: ndcg_at_5 value: 30.293999999999997 - type: precision_at_1 value: 23.756 - type: precision_at_10 value: 5.995 - type: precision_at_100 value: 1.053 - type: precision_at_1000 value: 0.14100000000000001 - type: precision_at_20 value: 3.688 - type: precision_at_3 value: 13.059999999999999 - type: precision_at_5 value: 9.602 - type: recall_at_1 value: 19.41 - type: recall_at_10 value: 45.074 - type: recall_at_100 value: 71.131 - type: recall_at_1000 value: 89.604 - type: recall_at_20 value: 53.673 - type: recall_at_3 value: 31.055 - type: recall_at_5 value: 36.714999999999996 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval type: mteb/cqadupstack-physics config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: main_score value: 49.675000000000004 - type: map_at_1 value: 33.178999999999995 - type: map_at_10 value: 43.807 - type: map_at_100 value: 45.17 - type: map_at_1000 value: 45.271 - type: map_at_20 value: 44.516 - type: map_at_3 value: 40.813 - type: map_at_5 value: 42.457 - type: mrr_at_1 value: 40.32723772858518 - type: mrr_at_10 value: 49.646867409138814 - type: mrr_at_100 value: 50.493686101426285 - type: mrr_at_1000 value: 50.525386961808834 - type: mrr_at_20 value: 50.120274354884586 - type: mrr_at_3 value: 47.49759384023096 - type: mrr_at_5 value: 48.72473532242535 - type: nauc_map_at_1000_diff1 value: 49.5947127786396 - type: nauc_map_at_1000_max value: 33.39720045844929 - type: nauc_map_at_1000_std value: -3.131428593252271 - type: nauc_map_at_100_diff1 value: 49.57797867324617 - type: nauc_map_at_100_max value: 33.356927974709464 - type: nauc_map_at_100_std value: -3.1661365376766337 - type: nauc_map_at_10_diff1 value: 49.59294630598952 - type: nauc_map_at_10_max value: 32.86647346990462 - type: nauc_map_at_10_std value: -4.1582043443386745 - type: nauc_map_at_1_diff1 value: 53.98646767288695 - type: nauc_map_at_1_max value: 29.45629077638936 - type: nauc_map_at_1_std value: -5.621187380771589 - type: nauc_map_at_20_diff1 value: 49.486982890447074 - type: nauc_map_at_20_max value: 33.11681933406332 - type: nauc_map_at_20_std value: -3.5826433195146854 - type: nauc_map_at_3_diff1 value: 50.81807107491861 - type: nauc_map_at_3_max value: 32.32552291988859 - type: nauc_map_at_3_std value: -3.952946504088928 - type: nauc_map_at_5_diff1 value: 49.70201354274439 - type: nauc_map_at_5_max value: 32.831846031004886 - type: nauc_map_at_5_std value: -3.8330488624207737 - type: nauc_mrr_at_1000_diff1 value: 49.04159472507738 - type: nauc_mrr_at_1000_max value: 35.617600171138676 - type: nauc_mrr_at_1000_std value: -1.5975830757486646 - type: nauc_mrr_at_100_diff1 value: 49.03848471692094 - type: nauc_mrr_at_100_max value: 35.61936748662614 - type: nauc_mrr_at_100_std value: -1.5922053398594729 - type: nauc_mrr_at_10_diff1 value: 48.92463964652612 - type: nauc_mrr_at_10_max value: 35.37757708992045 - type: nauc_mrr_at_10_std value: -2.2052028139567303 - type: nauc_mrr_at_1_diff1 value: 52.23915787290734 - type: nauc_mrr_at_1_max value: 34.393531787632334 - type: nauc_mrr_at_1_std value: -1.452007661016969 - type: nauc_mrr_at_20_diff1 value: 48.91168438018404 - type: nauc_mrr_at_20_max value: 35.478962544421876 - type: nauc_mrr_at_20_std value: -1.8246048423555414 - type: nauc_mrr_at_3_diff1 value: 50.115432665442164 - type: nauc_mrr_at_3_max value: 35.89093796085569 - type: nauc_mrr_at_3_std value: -1.4895016313153366 - type: nauc_mrr_at_5_diff1 value: 49.04321261351915 - type: nauc_mrr_at_5_max value: 35.85730520949451 - type: nauc_mrr_at_5_std value: -1.68790556880753 - type: nauc_ndcg_at_1000_diff1 value: 48.294697499154374 - type: nauc_ndcg_at_1000_max value: 35.167410242367595 - type: nauc_ndcg_at_1000_std value: -0.6346078535914157 - type: nauc_ndcg_at_100_diff1 value: 48.025525283449014 - type: nauc_ndcg_at_100_max value: 34.79288511776105 - type: nauc_ndcg_at_100_std value: -0.7823403044086993 - type: nauc_ndcg_at_10_diff1 value: 47.70793258015258 - type: nauc_ndcg_at_10_max value: 33.09558927880104 - type: nauc_ndcg_at_10_std value: -4.7793864166260605 - type: nauc_ndcg_at_1_diff1 value: 52.23915787290734 - type: nauc_ndcg_at_1_max value: 34.393531787632334 - type: nauc_ndcg_at_1_std value: -1.452007661016969 - type: nauc_ndcg_at_20_diff1 value: 47.354286045074815 - type: nauc_ndcg_at_20_max value: 33.686648806027975 - type: nauc_ndcg_at_20_std value: -3.0189085132476556 - type: nauc_ndcg_at_3_diff1 value: 49.68805334316908 - type: nauc_ndcg_at_3_max value: 34.196077748056496 - type: nauc_ndcg_at_3_std value: -2.7167289163768436 - type: nauc_ndcg_at_5_diff1 value: 47.94474868912989 - type: nauc_ndcg_at_5_max value: 34.00261603413051 - type: nauc_ndcg_at_5_std value: -3.3541028103046115 - type: nauc_precision_at_1000_diff1 value: -12.0150100710755 - type: nauc_precision_at_1000_max value: 5.332942816568796 - type: nauc_precision_at_1000_std value: 14.543288479130458 - type: nauc_precision_at_100_diff1 value: -4.920332181588838 - type: nauc_precision_at_100_max value: 14.42313332017491 - type: nauc_precision_at_100_std value: 17.821953321018384 - type: nauc_precision_at_10_diff1 value: 14.70509089079217 - type: nauc_precision_at_10_max value: 25.381887131649716 - type: nauc_precision_at_10_std value: 5.226419288645675 - type: nauc_precision_at_1_diff1 value: 52.23915787290734 - type: nauc_precision_at_1_max value: 34.393531787632334 - type: nauc_precision_at_1_std value: -1.452007661016969 - type: nauc_precision_at_20_diff1 value: 6.312827641507564 - type: nauc_precision_at_20_max value: 22.483038562271933 - type: nauc_precision_at_20_std value: 11.368419856892416 - type: nauc_precision_at_3_diff1 value: 33.271443420273606 - type: nauc_precision_at_3_max value: 33.571078182106675 - type: nauc_precision_at_3_std value: 4.47382265155717 - type: nauc_precision_at_5_diff1 value: 23.43287104284656 - type: nauc_precision_at_5_max value: 30.909085068105313 - type: nauc_precision_at_5_std value: 5.545672049452433 - type: nauc_recall_at_1000_diff1 value: 35.22615594677707 - type: nauc_recall_at_1000_max value: 52.0710533173532 - type: nauc_recall_at_1000_std value: 45.17683523786464 - type: nauc_recall_at_100_diff1 value: 36.2169056956332 - type: nauc_recall_at_100_max value: 35.02435003210817 - type: nauc_recall_at_100_std value: 15.833632946282508 - type: nauc_recall_at_10_diff1 value: 39.12440292974848 - type: nauc_recall_at_10_max value: 28.0546011979648 - type: nauc_recall_at_10_std value: -9.620558638092172 - type: nauc_recall_at_1_diff1 value: 53.98646767288695 - type: nauc_recall_at_1_max value: 29.45629077638936 - type: nauc_recall_at_1_std value: -5.621187380771589 - type: nauc_recall_at_20_diff1 value: 36.39254630768161 - type: nauc_recall_at_20_max value: 29.277856508751967 - type: nauc_recall_at_20_std value: -3.048007490798412 - type: nauc_recall_at_3_diff1 value: 45.64706642644958 - type: nauc_recall_at_3_max value: 31.003050159737413 - type: nauc_recall_at_3_std value: -4.849763876930667 - type: nauc_recall_at_5_diff1 value: 40.918108859971746 - type: nauc_recall_at_5_max value: 30.69907335071493 - type: nauc_recall_at_5_std value: -6.1445436251916865 - type: ndcg_at_1 value: 40.327 - type: ndcg_at_10 value: 49.675000000000004 - type: ndcg_at_100 value: 55.364000000000004 - type: ndcg_at_1000 value: 56.992 - type: ndcg_at_20 value: 51.803999999999995 - type: ndcg_at_3 value: 45.227000000000004 - type: ndcg_at_5 value: 47.244 - type: precision_at_1 value: 40.327 - type: precision_at_10 value: 8.826 - type: precision_at_100 value: 1.354 - type: precision_at_1000 value: 0.167 - type: precision_at_20 value: 5.115 - type: precision_at_3 value: 21.303 - type: precision_at_5 value: 14.726 - type: recall_at_1 value: 33.178999999999995 - type: recall_at_10 value: 61.087 - type: recall_at_100 value: 85.099 - type: recall_at_1000 value: 95.14099999999999 - type: recall_at_20 value: 68.623 - type: recall_at_3 value: 48.245 - type: recall_at_5 value: 53.832 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval type: mteb/cqadupstack-programmers config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: main_score value: 44.99 - type: map_at_1 value: 28.089 - type: map_at_10 value: 38.98 - type: map_at_100 value: 40.339000000000006 - type: map_at_1000 value: 40.441 - type: map_at_20 value: 39.702 - type: map_at_3 value: 35.620000000000005 - type: map_at_5 value: 37.657000000000004 - type: mrr_at_1 value: 35.15981735159817 - type: mrr_at_10 value: 44.54075161266937 - type: mrr_at_100 value: 45.435730392436646 - type: mrr_at_1000 value: 45.47673849356812 - type: mrr_at_20 value: 45.05949613726918 - type: mrr_at_3 value: 42.00913242009131 - type: mrr_at_5 value: 43.52739726027392 - type: nauc_map_at_1000_diff1 value: 42.6375513442399 - type: nauc_map_at_1000_max value: 35.83899956589522 - type: nauc_map_at_1000_std value: 5.798620017712549 - type: nauc_map_at_100_diff1 value: 42.609712253881504 - type: nauc_map_at_100_max value: 35.85401871065736 - type: nauc_map_at_100_std value: 5.829007296755533 - type: nauc_map_at_10_diff1 value: 42.90931172127824 - type: nauc_map_at_10_max value: 35.46694204511423 - type: nauc_map_at_10_std value: 5.131477704152026 - type: nauc_map_at_1_diff1 value: 48.066312177855956 - type: nauc_map_at_1_max value: 30.67745267941573 - type: nauc_map_at_1_std value: -1.4170737991670943 - type: nauc_map_at_20_diff1 value: 42.730423700784 - type: nauc_map_at_20_max value: 35.710039616497085 - type: nauc_map_at_20_std value: 5.363961887475162 - type: nauc_map_at_3_diff1 value: 43.499223646579935 - type: nauc_map_at_3_max value: 33.872570039621564 - type: nauc_map_at_3_std value: 3.0787571843453008 - type: nauc_map_at_5_diff1 value: 43.28963642946521 - type: nauc_map_at_5_max value: 35.18327408279892 - type: nauc_map_at_5_std value: 4.516467154662473 - type: nauc_mrr_at_1000_diff1 value: 42.71279871641341 - type: nauc_mrr_at_1000_max value: 37.48825064817496 - type: nauc_mrr_at_1000_std value: 8.10015025024314 - type: nauc_mrr_at_100_diff1 value: 42.694777404773376 - type: nauc_mrr_at_100_max value: 37.476741768741086 - type: nauc_mrr_at_100_std value: 8.11525130417229 - type: nauc_mrr_at_10_diff1 value: 42.954194054560176 - type: nauc_mrr_at_10_max value: 37.606138578797506 - type: nauc_mrr_at_10_std value: 8.092519513302399 - type: nauc_mrr_at_1_diff1 value: 48.350790286038574 - type: nauc_mrr_at_1_max value: 33.97992759739641 - type: nauc_mrr_at_1_std value: 1.8332987018664093 - type: nauc_mrr_at_20_diff1 value: 42.664983701783044 - type: nauc_mrr_at_20_max value: 37.47450702110784 - type: nauc_mrr_at_20_std value: 8.001067634745462 - type: nauc_mrr_at_3_diff1 value: 42.921968602737955 - type: nauc_mrr_at_3_max value: 37.19599728791262 - type: nauc_mrr_at_3_std value: 7.4692697422507575 - type: nauc_mrr_at_5_diff1 value: 42.96028546491891 - type: nauc_mrr_at_5_max value: 37.688350071295915 - type: nauc_mrr_at_5_std value: 8.213017954012372 - type: nauc_ndcg_at_1000_diff1 value: 40.70763263942397 - type: nauc_ndcg_at_1000_max value: 37.87768319167602 - type: nauc_ndcg_at_1000_std value: 9.908807071686738 - type: nauc_ndcg_at_100_diff1 value: 39.97828438221707 - type: nauc_ndcg_at_100_max value: 37.7723393835996 - type: nauc_ndcg_at_100_std value: 10.666779466040097 - type: nauc_ndcg_at_10_diff1 value: 41.172233451172936 - type: nauc_ndcg_at_10_max value: 37.12252131573939 - type: nauc_ndcg_at_10_std value: 8.273798754436639 - type: nauc_ndcg_at_1_diff1 value: 48.350790286038574 - type: nauc_ndcg_at_1_max value: 33.97992759739641 - type: nauc_ndcg_at_1_std value: 1.8332987018664093 - type: nauc_ndcg_at_20_diff1 value: 40.33325895172716 - type: nauc_ndcg_at_20_max value: 37.36015594019951 - type: nauc_ndcg_at_20_std value: 8.818556108749302 - type: nauc_ndcg_at_3_diff1 value: 41.652701699747254 - type: nauc_ndcg_at_3_max value: 35.499109874223294 - type: nauc_ndcg_at_3_std value: 5.831784865606119 - type: nauc_ndcg_at_5_diff1 value: 41.856346892595475 - type: nauc_ndcg_at_5_max value: 36.940681835687194 - type: nauc_ndcg_at_5_std value: 7.507798515093516 - type: nauc_precision_at_1000_diff1 value: -2.4605367806784866 - type: nauc_precision_at_1000_max value: -0.3538142127162922 - type: nauc_precision_at_1000_std value: 8.369794961833236 - type: nauc_precision_at_100_diff1 value: -0.34954522096524704 - type: nauc_precision_at_100_max value: 13.159909603146458 - type: nauc_precision_at_100_std value: 19.425561514133996 - type: nauc_precision_at_10_diff1 value: 17.048304710148145 - type: nauc_precision_at_10_max value: 29.816041846806375 - type: nauc_precision_at_10_std value: 18.358893367243798 - type: nauc_precision_at_1_diff1 value: 48.350790286038574 - type: nauc_precision_at_1_max value: 33.97992759739641 - type: nauc_precision_at_1_std value: 1.8332987018664093 - type: nauc_precision_at_20_diff1 value: 10.450903599411344 - type: nauc_precision_at_20_max value: 25.228916373799127 - type: nauc_precision_at_20_std value: 18.46893569529936 - type: nauc_precision_at_3_diff1 value: 29.181236567048636 - type: nauc_precision_at_3_max value: 35.64918262500281 - type: nauc_precision_at_3_std value: 13.347538222514968 - type: nauc_precision_at_5_diff1 value: 23.693323840550345 - type: nauc_precision_at_5_max value: 33.972399735191225 - type: nauc_precision_at_5_std value: 17.107012760554618 - type: nauc_recall_at_1000_diff1 value: 20.297340483227945 - type: nauc_recall_at_1000_max value: 63.084305970127275 - type: nauc_recall_at_1000_std value: 63.04655000858784 - type: nauc_recall_at_100_diff1 value: 22.587332148979723 - type: nauc_recall_at_100_max value: 40.740968468024775 - type: nauc_recall_at_100_std value: 34.120423684507124 - type: nauc_recall_at_10_diff1 value: 33.361195948673675 - type: nauc_recall_at_10_max value: 37.1411402410262 - type: nauc_recall_at_10_std value: 13.475407196166259 - type: nauc_recall_at_1_diff1 value: 48.066312177855956 - type: nauc_recall_at_1_max value: 30.67745267941573 - type: nauc_recall_at_1_std value: -1.4170737991670943 - type: nauc_recall_at_20_diff1 value: 28.703982984383984 - type: nauc_recall_at_20_max value: 37.32929431193496 - type: nauc_recall_at_20_std value: 16.139135347989903 - type: nauc_recall_at_3_diff1 value: 36.53346179134789 - type: nauc_recall_at_3_max value: 34.11397914899309 - type: nauc_recall_at_3_std value: 7.19358019807132 - type: nauc_recall_at_5_diff1 value: 36.24058894947452 - type: nauc_recall_at_5_max value: 37.00990358651097 - type: nauc_recall_at_5_std value: 11.074645476821619 - type: ndcg_at_1 value: 35.160000000000004 - type: ndcg_at_10 value: 44.99 - type: ndcg_at_100 value: 50.661 - type: ndcg_at_1000 value: 52.599 - type: ndcg_at_20 value: 47.154 - type: ndcg_at_3 value: 39.843 - type: ndcg_at_5 value: 42.486000000000004 - type: precision_at_1 value: 35.160000000000004 - type: precision_at_10 value: 8.299 - type: precision_at_100 value: 1.2850000000000001 - type: precision_at_1000 value: 0.16199999999999998 - type: precision_at_20 value: 4.84 - type: precision_at_3 value: 19.178 - type: precision_at_5 value: 13.927 - type: recall_at_1 value: 28.089 - type: recall_at_10 value: 57.158 - type: recall_at_100 value: 81.461 - type: recall_at_1000 value: 94.46900000000001 - type: recall_at_20 value: 64.927 - type: recall_at_3 value: 42.775999999999996 - type: recall_at_5 value: 49.719 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: mteb/cqadupstack config: default split: test revision: CQADupstackRetrieval is a combined dataset metrics: - type: main_score value: 44.989166666666655 - type: ndcg_at_10 value: 44.989166666666655 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval type: mteb/cqadupstack-stats config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: main_score value: 39.586 - type: map_at_1 value: 27.301 - type: map_at_10 value: 35.022 - type: map_at_100 value: 36.061 - type: map_at_1000 value: 36.146 - type: map_at_20 value: 35.608000000000004 - type: map_at_3 value: 32.978 - type: map_at_5 value: 33.994 - type: mrr_at_1 value: 30.67484662576687 - type: mrr_at_10 value: 38.1696124257474 - type: mrr_at_100 value: 38.99730898994137 - type: mrr_at_1000 value: 39.049871007408136 - type: mrr_at_20 value: 38.62424051396064 - type: mrr_at_3 value: 36.40081799591004 - type: mrr_at_5 value: 37.23670756646219 - type: nauc_map_at_1000_diff1 value: 50.4395097150819 - type: nauc_map_at_1000_max value: 42.36231476768413 - type: nauc_map_at_1000_std value: 1.0739414045485742 - type: nauc_map_at_100_diff1 value: 50.4253775421283 - type: nauc_map_at_100_max value: 42.34508969348633 - type: nauc_map_at_100_std value: 1.0590256535050135 - type: nauc_map_at_10_diff1 value: 50.74196619464362 - type: nauc_map_at_10_max value: 42.354326434590284 - type: nauc_map_at_10_std value: 0.6330167542705694 - type: nauc_map_at_1_diff1 value: 55.7404810490963 - type: nauc_map_at_1_max value: 40.7676941648045 - type: nauc_map_at_1_std value: -5.021772566610674 - type: nauc_map_at_20_diff1 value: 50.39792463598886 - type: nauc_map_at_20_max value: 42.25768760228577 - type: nauc_map_at_20_std value: 0.8979017700131807 - type: nauc_map_at_3_diff1 value: 51.53267996170815 - type: nauc_map_at_3_max value: 41.78801756883417 - type: nauc_map_at_3_std value: -0.6652383024396911 - type: nauc_map_at_5_diff1 value: 50.992783683271504 - type: nauc_map_at_5_max value: 41.8607977828188 - type: nauc_map_at_5_std value: 0.3484379897869807 - type: nauc_mrr_at_1000_diff1 value: 48.952907124445126 - type: nauc_mrr_at_1000_max value: 42.93563741482114 - type: nauc_mrr_at_1000_std value: 3.0791495753556424 - type: nauc_mrr_at_100_diff1 value: 48.941921107360805 - type: nauc_mrr_at_100_max value: 42.94419657374061 - type: nauc_mrr_at_100_std value: 3.075397087180154 - type: nauc_mrr_at_10_diff1 value: 49.098926306303056 - type: nauc_mrr_at_10_max value: 42.941857820499806 - type: nauc_mrr_at_10_std value: 2.8184474174054372 - type: nauc_mrr_at_1_diff1 value: 54.428109877009334 - type: nauc_mrr_at_1_max value: 42.50273386972492 - type: nauc_mrr_at_1_std value: -2.1811826216412187 - type: nauc_mrr_at_20_diff1 value: 48.82502192775839 - type: nauc_mrr_at_20_max value: 42.92227277257095 - type: nauc_mrr_at_20_std value: 2.975812634368533 - type: nauc_mrr_at_3_diff1 value: 49.440009227591176 - type: nauc_mrr_at_3_max value: 42.95503176290712 - type: nauc_mrr_at_3_std value: 2.2997128945013796 - type: nauc_mrr_at_5_diff1 value: 49.09846782701398 - type: nauc_mrr_at_5_max value: 42.51449168285772 - type: nauc_mrr_at_5_std value: 2.7785816484421297 - type: nauc_ndcg_at_1000_diff1 value: 48.14680758187888 - type: nauc_ndcg_at_1000_max value: 43.57465718500695 - type: nauc_ndcg_at_1000_std value: 5.287435676678261 - type: nauc_ndcg_at_100_diff1 value: 47.66081605743284 - type: nauc_ndcg_at_100_max value: 43.28156751251163 - type: nauc_ndcg_at_100_std value: 4.959626409663624 - type: nauc_ndcg_at_10_diff1 value: 48.25075619623878 - type: nauc_ndcg_at_10_max value: 43.00688660666578 - type: nauc_ndcg_at_10_std value: 3.2319193368891637 - type: nauc_ndcg_at_1_diff1 value: 54.428109877009334 - type: nauc_ndcg_at_1_max value: 42.50273386972492 - type: nauc_ndcg_at_1_std value: -2.1811826216412187 - type: nauc_ndcg_at_20_diff1 value: 47.1943098627403 - type: nauc_ndcg_at_20_max value: 42.86954491768707 - type: nauc_ndcg_at_20_std value: 4.08583080150737 - type: nauc_ndcg_at_3_diff1 value: 49.32681523192246 - type: nauc_ndcg_at_3_max value: 42.46898641470274 - type: nauc_ndcg_at_3_std value: 1.7416962407725236 - type: nauc_ndcg_at_5_diff1 value: 48.59647012439291 - type: nauc_ndcg_at_5_max value: 42.07098889846439 - type: nauc_ndcg_at_5_std value: 2.979621233356828 - type: nauc_precision_at_1000_diff1 value: -1.7366334161587105 - type: nauc_precision_at_1000_max value: 17.70969166396819 - type: nauc_precision_at_1000_std value: 17.50619975322144 - type: nauc_precision_at_100_diff1 value: 10.082579982582155 - type: nauc_precision_at_100_max value: 28.024893516091776 - type: nauc_precision_at_100_std value: 18.41413013357596 - type: nauc_precision_at_10_diff1 value: 28.796167732373657 - type: nauc_precision_at_10_max value: 40.37340024485382 - type: nauc_precision_at_10_std value: 13.718572711091733 - type: nauc_precision_at_1_diff1 value: 54.428109877009334 - type: nauc_precision_at_1_max value: 42.50273386972492 - type: nauc_precision_at_1_std value: -2.1811826216412187 - type: nauc_precision_at_20_diff1 value: 19.82691920771315 - type: nauc_precision_at_20_max value: 34.45075390159975 - type: nauc_precision_at_20_std value: 16.410812072348058 - type: nauc_precision_at_3_diff1 value: 40.85430254962678 - type: nauc_precision_at_3_max value: 43.63016056067074 - type: nauc_precision_at_3_std value: 9.322014634477581 - type: nauc_precision_at_5_diff1 value: 35.830272848975795 - type: nauc_precision_at_5_max value: 41.30047691620363 - type: nauc_precision_at_5_std value: 13.145693992266565 - type: nauc_recall_at_1000_diff1 value: 35.532000545890504 - type: nauc_recall_at_1000_max value: 50.714223194510325 - type: nauc_recall_at_1000_std value: 43.09037309139045 - type: nauc_recall_at_100_diff1 value: 35.11024488875192 - type: nauc_recall_at_100_max value: 43.0874566265193 - type: nauc_recall_at_100_std value: 19.70628521846854 - type: nauc_recall_at_10_diff1 value: 40.36203726741153 - type: nauc_recall_at_10_max value: 42.581482582576726 - type: nauc_recall_at_10_std value: 8.642553371022348 - type: nauc_recall_at_1_diff1 value: 55.7404810490963 - type: nauc_recall_at_1_max value: 40.7676941648045 - type: nauc_recall_at_1_std value: -5.021772566610674 - type: nauc_recall_at_20_diff1 value: 35.97348868186562 - type: nauc_recall_at_20_max value: 41.82695933305065 - type: nauc_recall_at_20_std value: 11.444957541593585 - type: nauc_recall_at_3_diff1 value: 44.20020470014979 - type: nauc_recall_at_3_max value: 40.84130855296979 - type: nauc_recall_at_3_std value: 5.004883338558809 - type: nauc_recall_at_5_diff1 value: 42.08756885472078 - type: nauc_recall_at_5_max value: 39.90323783606852 - type: nauc_recall_at_5_std value: 8.085182534171127 - type: ndcg_at_1 value: 30.675 - type: ndcg_at_10 value: 39.586 - type: ndcg_at_100 value: 44.737 - type: ndcg_at_1000 value: 46.863 - type: ndcg_at_20 value: 41.495 - type: ndcg_at_3 value: 35.8 - type: ndcg_at_5 value: 37.3 - type: precision_at_1 value: 30.675 - type: precision_at_10 value: 6.196 - type: precision_at_100 value: 0.9570000000000001 - type: precision_at_1000 value: 0.122 - type: precision_at_20 value: 3.6350000000000002 - type: precision_at_3 value: 15.337 - type: precision_at_5 value: 10.337 - type: recall_at_1 value: 27.301 - type: recall_at_10 value: 50.346999999999994 - type: recall_at_100 value: 74.459 - type: recall_at_1000 value: 90.018 - type: recall_at_20 value: 57.473 - type: recall_at_3 value: 39.672000000000004 - type: recall_at_5 value: 43.383 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval type: mteb/cqadupstack-tex config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: main_score value: 32.842 - type: map_at_1 value: 19.527 - type: map_at_10 value: 27.711999999999996 - type: map_at_100 value: 28.98 - type: map_at_1000 value: 29.108 - type: map_at_20 value: 28.407 - type: map_at_3 value: 25.023 - type: map_at_5 value: 26.528000000000002 - type: mrr_at_1 value: 23.675154852030282 - type: mrr_at_10 value: 31.810676323752784 - type: mrr_at_100 value: 32.788970614380716 - type: mrr_at_1000 value: 32.86028758975889 - type: mrr_at_20 value: 32.35935756676056 - type: mrr_at_3 value: 29.41615049323246 - type: mrr_at_5 value: 30.785730672172633 - type: nauc_map_at_1000_diff1 value: 35.597766688968015 - type: nauc_map_at_1000_max value: 26.295790183159845 - type: nauc_map_at_1000_std value: -0.04229904865958209 - type: nauc_map_at_100_diff1 value: 35.568782622469925 - type: nauc_map_at_100_max value: 26.27850795471227 - type: nauc_map_at_100_std value: -0.04944875782811099 - type: nauc_map_at_10_diff1 value: 35.63760937893694 - type: nauc_map_at_10_max value: 26.130094042028233 - type: nauc_map_at_10_std value: -0.6896882769027717 - type: nauc_map_at_1_diff1 value: 41.759098341890976 - type: nauc_map_at_1_max value: 23.918885427783326 - type: nauc_map_at_1_std value: -2.1383574897865074 - type: nauc_map_at_20_diff1 value: 35.55706530442612 - type: nauc_map_at_20_max value: 26.23339626569677 - type: nauc_map_at_20_std value: -0.162172033918129 - type: nauc_map_at_3_diff1 value: 37.22183376355153 - type: nauc_map_at_3_max value: 25.770512522122186 - type: nauc_map_at_3_std value: -1.3105892187778403 - type: nauc_map_at_5_diff1 value: 36.205913161663084 - type: nauc_map_at_5_max value: 25.953300641502064 - type: nauc_map_at_5_std value: -0.7987363137547906 - type: nauc_mrr_at_1000_diff1 value: 34.864016559617646 - type: nauc_mrr_at_1000_max value: 26.8689525348564 - type: nauc_mrr_at_1000_std value: -0.5839923973914446 - type: nauc_mrr_at_100_diff1 value: 34.83820469598538 - type: nauc_mrr_at_100_max value: 26.864669056231282 - type: nauc_mrr_at_100_std value: -0.5785645654158633 - type: nauc_mrr_at_10_diff1 value: 34.81868397381981 - type: nauc_mrr_at_10_max value: 26.79988560460627 - type: nauc_mrr_at_10_std value: -1.1113808365827318 - type: nauc_mrr_at_1_diff1 value: 40.0281507903504 - type: nauc_mrr_at_1_max value: 25.036735941806583 - type: nauc_mrr_at_1_std value: -2.508700799268523 - type: nauc_mrr_at_20_diff1 value: 34.81954537357966 - type: nauc_mrr_at_20_max value: 26.877673033315453 - type: nauc_mrr_at_20_std value: -0.6706028107452919 - type: nauc_mrr_at_3_diff1 value: 35.87313782549696 - type: nauc_mrr_at_3_max value: 26.776261693392335 - type: nauc_mrr_at_3_std value: -1.8010591328112908 - type: nauc_mrr_at_5_diff1 value: 35.31673912159536 - type: nauc_mrr_at_5_max value: 26.78720786106881 - type: nauc_mrr_at_5_std value: -1.3096326953900546 - type: nauc_ndcg_at_1000_diff1 value: 33.43105244339048 - type: nauc_ndcg_at_1000_max value: 27.52195065724684 - type: nauc_ndcg_at_1000_std value: 2.8376056562675744 - type: nauc_ndcg_at_100_diff1 value: 32.90916846420573 - type: nauc_ndcg_at_100_max value: 27.27161017736065 - type: nauc_ndcg_at_100_std value: 2.8703122625872126 - type: nauc_ndcg_at_10_diff1 value: 33.12714979317447 - type: nauc_ndcg_at_10_max value: 26.67762031747992 - type: nauc_ndcg_at_10_std value: -0.1341345572932233 - type: nauc_ndcg_at_1_diff1 value: 40.0281507903504 - type: nauc_ndcg_at_1_max value: 25.036735941806583 - type: nauc_ndcg_at_1_std value: -2.508700799268523 - type: nauc_ndcg_at_20_diff1 value: 32.891656138688546 - type: nauc_ndcg_at_20_max value: 26.991976404027163 - type: nauc_ndcg_at_20_std value: 1.6050741106677746 - type: nauc_ndcg_at_3_diff1 value: 35.576958713955484 - type: nauc_ndcg_at_3_max value: 26.41687745899445 - type: nauc_ndcg_at_3_std value: -1.5326687067002291 - type: nauc_ndcg_at_5_diff1 value: 34.27335619067276 - type: nauc_ndcg_at_5_max value: 26.479515412084208 - type: nauc_ndcg_at_5_std value: -0.5597648935666003 - type: nauc_precision_at_1000_diff1 value: -0.18660914306684007 - type: nauc_precision_at_1000_max value: 7.268255385799229 - type: nauc_precision_at_1000_std value: -0.1968875268478991 - type: nauc_precision_at_100_diff1 value: 7.386701205054449 - type: nauc_precision_at_100_max value: 15.477735603019607 - type: nauc_precision_at_100_std value: 4.753153414679307 - type: nauc_precision_at_10_diff1 value: 18.4668296945938 - type: nauc_precision_at_10_max value: 25.457144217779597 - type: nauc_precision_at_10_std value: 0.40165373733963605 - type: nauc_precision_at_1_diff1 value: 40.0281507903504 - type: nauc_precision_at_1_max value: 25.036735941806583 - type: nauc_precision_at_1_std value: -2.508700799268523 - type: nauc_precision_at_20_diff1 value: 14.751135844289335 - type: nauc_precision_at_20_max value: 22.763373329576293 - type: nauc_precision_at_20_std value: 4.360731801761864 - type: nauc_precision_at_3_diff1 value: 28.154753888265393 - type: nauc_precision_at_3_max value: 27.838427033527147 - type: nauc_precision_at_3_std value: -1.0042621266717804 - type: nauc_precision_at_5_diff1 value: 23.549026872711423 - type: nauc_precision_at_5_max value: 27.192214745385044 - type: nauc_precision_at_5_std value: 0.4455206110174471 - type: nauc_recall_at_1000_diff1 value: 17.905404210815632 - type: nauc_recall_at_1000_max value: 32.8674418535776 - type: nauc_recall_at_1000_std value: 35.187050415735435 - type: nauc_recall_at_100_diff1 value: 20.903609751984757 - type: nauc_recall_at_100_max value: 27.180306691518364 - type: nauc_recall_at_100_std value: 17.553030959393297 - type: nauc_recall_at_10_diff1 value: 25.615147693464387 - type: nauc_recall_at_10_max value: 25.97062699453565 - type: nauc_recall_at_10_std value: 2.2181702899826576 - type: nauc_recall_at_1_diff1 value: 41.759098341890976 - type: nauc_recall_at_1_max value: 23.918885427783326 - type: nauc_recall_at_1_std value: -2.1383574897865074 - type: nauc_recall_at_20_diff1 value: 23.922775940094386 - type: nauc_recall_at_20_max value: 26.384627814902785 - type: nauc_recall_at_20_std value: 7.944532403561578 - type: nauc_recall_at_3_diff1 value: 32.26543270634743 - type: nauc_recall_at_3_max value: 26.36357710828272 - type: nauc_recall_at_3_std value: -0.42723331708340706 - type: nauc_recall_at_5_diff1 value: 29.080464141763336 - type: nauc_recall_at_5_max value: 25.81238438303652 - type: nauc_recall_at_5_std value: 1.1649311168287726 - type: ndcg_at_1 value: 23.674999999999997 - type: ndcg_at_10 value: 32.842 - type: ndcg_at_100 value: 38.64 - type: ndcg_at_1000 value: 41.367 - type: ndcg_at_20 value: 35.032999999999994 - type: ndcg_at_3 value: 28.166000000000004 - type: ndcg_at_5 value: 30.407 - type: precision_at_1 value: 23.674999999999997 - type: precision_at_10 value: 6.005 - type: precision_at_100 value: 1.053 - type: precision_at_1000 value: 0.146 - type: precision_at_20 value: 3.6580000000000004 - type: precision_at_3 value: 13.352 - type: precision_at_5 value: 9.718 - type: recall_at_1 value: 19.527 - type: recall_at_10 value: 44.096999999999994 - type: recall_at_100 value: 69.962 - type: recall_at_1000 value: 89.035 - type: recall_at_20 value: 52.166000000000004 - type: recall_at_3 value: 30.946 - type: recall_at_5 value: 36.789 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval type: mteb/cqadupstack-unix config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: main_score value: 46.54 - type: map_at_1 value: 29.953999999999997 - type: map_at_10 value: 40.742 - type: map_at_100 value: 41.964 - type: map_at_1000 value: 42.059999999999995 - type: map_at_20 value: 41.426 - type: map_at_3 value: 37.378 - type: map_at_5 value: 39.267 - type: mrr_at_1 value: 34.701492537313435 - type: mrr_at_10 value: 44.29978085761664 - type: mrr_at_100 value: 45.205551401915486 - type: mrr_at_1000 value: 45.24735017384963 - type: mrr_at_20 value: 44.85338423755729 - type: mrr_at_3 value: 41.57338308457707 - type: mrr_at_5 value: 43.19185323383077 - type: nauc_map_at_1000_diff1 value: 48.45170522932164 - type: nauc_map_at_1000_max value: 31.544164363591204 - type: nauc_map_at_1000_std value: 0.8661088818146858 - type: nauc_map_at_100_diff1 value: 48.47347800061323 - type: nauc_map_at_100_max value: 31.568637596620313 - type: nauc_map_at_100_std value: 0.9252699336843858 - type: nauc_map_at_10_diff1 value: 48.64849891585432 - type: nauc_map_at_10_max value: 31.40371265579746 - type: nauc_map_at_10_std value: 0.7088016563713089 - type: nauc_map_at_1_diff1 value: 53.57918993108331 - type: nauc_map_at_1_max value: 31.392632653740993 - type: nauc_map_at_1_std value: -2.857306170463933 - type: nauc_map_at_20_diff1 value: 48.49084353023969 - type: nauc_map_at_20_max value: 31.470313174779374 - type: nauc_map_at_20_std value: 0.8950296035234309 - type: nauc_map_at_3_diff1 value: 49.273481161619806 - type: nauc_map_at_3_max value: 31.101471509782826 - type: nauc_map_at_3_std value: -0.886510096257905 - type: nauc_map_at_5_diff1 value: 48.85344288229106 - type: nauc_map_at_5_max value: 31.32633663238284 - type: nauc_map_at_5_std value: -0.44752909698881177 - type: nauc_mrr_at_1000_diff1 value: 46.27593166906613 - type: nauc_mrr_at_1000_max value: 31.637594372116336 - type: nauc_mrr_at_1000_std value: 0.8444917550670064 - type: nauc_mrr_at_100_diff1 value: 46.27161543033672 - type: nauc_mrr_at_100_max value: 31.64330655339695 - type: nauc_mrr_at_100_std value: 0.8717446416398773 - type: nauc_mrr_at_10_diff1 value: 46.100348481312864 - type: nauc_mrr_at_10_max value: 31.594271897882237 - type: nauc_mrr_at_10_std value: 0.8807168907688873 - type: nauc_mrr_at_1_diff1 value: 51.35163098909763 - type: nauc_mrr_at_1_max value: 31.99084441327899 - type: nauc_mrr_at_1_std value: -2.688594880742662 - type: nauc_mrr_at_20_diff1 value: 46.18178546174727 - type: nauc_mrr_at_20_max value: 31.639111674119448 - type: nauc_mrr_at_20_std value: 0.9855008641374622 - type: nauc_mrr_at_3_diff1 value: 46.307484835305864 - type: nauc_mrr_at_3_max value: 31.35563850804847 - type: nauc_mrr_at_3_std value: -0.3419536587707561 - type: nauc_mrr_at_5_diff1 value: 46.17646418781234 - type: nauc_mrr_at_5_max value: 31.313474270239833 - type: nauc_mrr_at_5_std value: -0.08656550526568331 - type: nauc_ndcg_at_1000_diff1 value: 46.12095795101613 - type: nauc_ndcg_at_1000_max value: 31.989083597726314 - type: nauc_ndcg_at_1000_std value: 3.2965704707660763 - type: nauc_ndcg_at_100_diff1 value: 46.05376249841318 - type: nauc_ndcg_at_100_max value: 32.39195988574972 - type: nauc_ndcg_at_100_std value: 4.518018135593347 - type: nauc_ndcg_at_10_diff1 value: 46.133631183744875 - type: nauc_ndcg_at_10_max value: 31.45358876172339 - type: nauc_ndcg_at_10_std value: 3.4254370918871055 - type: nauc_ndcg_at_1_diff1 value: 51.35163098909763 - type: nauc_ndcg_at_1_max value: 31.99084441327899 - type: nauc_ndcg_at_1_std value: -2.688594880742662 - type: nauc_ndcg_at_20_diff1 value: 45.94584949766954 - type: nauc_ndcg_at_20_max value: 31.689777515111295 - type: nauc_ndcg_at_20_std value: 4.189082428922442 - type: nauc_ndcg_at_3_diff1 value: 46.5057835389752 - type: nauc_ndcg_at_3_max value: 30.941407592082047 - type: nauc_ndcg_at_3_std value: -0.042473944857831535 - type: nauc_ndcg_at_5_diff1 value: 46.369027395136136 - type: nauc_ndcg_at_5_max value: 31.057841776505352 - type: nauc_ndcg_at_5_std value: 0.6878993420489522 - type: nauc_precision_at_1000_diff1 value: -17.30759714093202 - type: nauc_precision_at_1000_max value: -4.441155558458858 - type: nauc_precision_at_1000_std value: 1.5537300718220326 - type: nauc_precision_at_100_diff1 value: -7.18920438222021 - type: nauc_precision_at_100_max value: 8.017878121399253 - type: nauc_precision_at_100_std value: 11.357132919349102 - type: nauc_precision_at_10_diff1 value: 15.202451884794076 - type: nauc_precision_at_10_max value: 19.077295902881417 - type: nauc_precision_at_10_std value: 9.885526867355805 - type: nauc_precision_at_1_diff1 value: 51.35163098909763 - type: nauc_precision_at_1_max value: 31.99084441327899 - type: nauc_precision_at_1_std value: -2.688594880742662 - type: nauc_precision_at_20_diff1 value: 6.827461091494899 - type: nauc_precision_at_20_max value: 15.27268633497114 - type: nauc_precision_at_20_std value: 11.515826649647384 - type: nauc_precision_at_3_diff1 value: 31.043021807472027 - type: nauc_precision_at_3_max value: 26.22457157531548 - type: nauc_precision_at_3_std value: 1.788215968301994 - type: nauc_precision_at_5_diff1 value: 25.030185818513235 - type: nauc_precision_at_5_max value: 23.680129160901537 - type: nauc_precision_at_5_std value: 4.303018899688115 - type: nauc_recall_at_1000_diff1 value: 28.68826642607512 - type: nauc_recall_at_1000_max value: 42.33849804103852 - type: nauc_recall_at_1000_std value: 42.67413575876864 - type: nauc_recall_at_100_diff1 value: 36.51494878715 - type: nauc_recall_at_100_max value: 37.4764995034434 - type: nauc_recall_at_100_std value: 28.295671266661017 - type: nauc_recall_at_10_diff1 value: 39.416721111463524 - type: nauc_recall_at_10_max value: 29.95985608454179 - type: nauc_recall_at_10_std value: 12.423335839786201 - type: nauc_recall_at_1_diff1 value: 53.57918993108331 - type: nauc_recall_at_1_max value: 31.392632653740993 - type: nauc_recall_at_1_std value: -2.857306170463933 - type: nauc_recall_at_20_diff1 value: 38.228803480194046 - type: nauc_recall_at_20_max value: 30.87261362975955 - type: nauc_recall_at_20_std value: 16.977113091834095 - type: nauc_recall_at_3_diff1 value: 43.154348566653155 - type: nauc_recall_at_3_max value: 29.54536633744803 - type: nauc_recall_at_3_std value: 2.02842672250621 - type: nauc_recall_at_5_diff1 value: 41.00436246072242 - type: nauc_recall_at_5_max value: 29.413569555348023 - type: nauc_recall_at_5_std value: 3.845214021958289 - type: ndcg_at_1 value: 34.701 - type: ndcg_at_10 value: 46.54 - type: ndcg_at_100 value: 51.754999999999995 - type: ndcg_at_1000 value: 53.71 - type: ndcg_at_20 value: 48.679 - type: ndcg_at_3 value: 40.892 - type: ndcg_at_5 value: 43.595 - type: precision_at_1 value: 34.701 - type: precision_at_10 value: 8.004 - type: precision_at_100 value: 1.185 - type: precision_at_1000 value: 0.145 - type: precision_at_20 value: 4.632 - type: precision_at_3 value: 18.719 - type: precision_at_5 value: 13.245999999999999 - type: recall_at_1 value: 29.953999999999997 - type: recall_at_10 value: 60.246 - type: recall_at_100 value: 82.128 - type: recall_at_1000 value: 95.622 - type: recall_at_20 value: 67.756 - type: recall_at_3 value: 45.096000000000004 - type: recall_at_5 value: 51.9 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval type: mteb/cqadupstack-webmasters config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: main_score value: 44.718999999999994 - type: map_at_1 value: 28.383999999999997 - type: map_at_10 value: 38.422 - type: map_at_100 value: 40.058 - type: map_at_1000 value: 40.276 - type: map_at_20 value: 39.301 - type: map_at_3 value: 35.205 - type: map_at_5 value: 36.803999999999995 - type: mrr_at_1 value: 33.59683794466403 - type: mrr_at_10 value: 42.837536859275986 - type: mrr_at_100 value: 43.7501703455481 - type: mrr_at_1000 value: 43.79258407771123 - type: mrr_at_20 value: 43.36044710445095 - type: mrr_at_3 value: 40.15151515151516 - type: mrr_at_5 value: 41.74242424242425 - type: nauc_map_at_1000_diff1 value: 47.934826596875304 - type: nauc_map_at_1000_max value: 32.39759438116062 - type: nauc_map_at_1000_std value: 0.9489007346763054 - type: nauc_map_at_100_diff1 value: 47.94844822157888 - type: nauc_map_at_100_max value: 32.51485845519537 - type: nauc_map_at_100_std value: 0.8094339925545622 - type: nauc_map_at_10_diff1 value: 48.251456404874645 - type: nauc_map_at_10_max value: 31.412906399154245 - type: nauc_map_at_10_std value: -0.7024825737369933 - type: nauc_map_at_1_diff1 value: 55.81906101970174 - type: nauc_map_at_1_max value: 31.811715334193796 - type: nauc_map_at_1_std value: -6.17056859281584 - type: nauc_map_at_20_diff1 value: 47.80902650237369 - type: nauc_map_at_20_max value: 32.22465403023091 - type: nauc_map_at_20_std value: 0.20706526946705656 - type: nauc_map_at_3_diff1 value: 49.97333984346632 - type: nauc_map_at_3_max value: 31.58195498640799 - type: nauc_map_at_3_std value: -2.577539707727459 - type: nauc_map_at_5_diff1 value: 49.40005767350608 - type: nauc_map_at_5_max value: 30.998435600377434 - type: nauc_map_at_5_std value: -2.1231771618690307 - type: nauc_mrr_at_1000_diff1 value: 46.86811371969663 - type: nauc_mrr_at_1000_max value: 31.25147138171024 - type: nauc_mrr_at_1000_std value: 1.9954422477585918 - type: nauc_mrr_at_100_diff1 value: 46.855870345882195 - type: nauc_mrr_at_100_max value: 31.263524035665966 - type: nauc_mrr_at_100_std value: 2.0160751193806568 - type: nauc_mrr_at_10_diff1 value: 46.93294772825783 - type: nauc_mrr_at_10_max value: 30.927002048701663 - type: nauc_mrr_at_10_std value: 1.6538220080908224 - type: nauc_mrr_at_1_diff1 value: 52.416386548395664 - type: nauc_mrr_at_1_max value: 32.28582003787206 - type: nauc_mrr_at_1_std value: -2.154991145714492 - type: nauc_mrr_at_20_diff1 value: 46.71796185319694 - type: nauc_mrr_at_20_max value: 31.16219902794994 - type: nauc_mrr_at_20_std value: 1.8590646572728409 - type: nauc_mrr_at_3_diff1 value: 47.697100317669914 - type: nauc_mrr_at_3_max value: 30.821806030159383 - type: nauc_mrr_at_3_std value: 1.1927626358099177 - type: nauc_mrr_at_5_diff1 value: 47.065272061365704 - type: nauc_mrr_at_5_max value: 30.299230962805023 - type: nauc_mrr_at_5_std value: 1.3225842862629529 - type: nauc_ndcg_at_1000_diff1 value: 45.20612583136058 - type: nauc_ndcg_at_1000_max value: 33.51931869947315 - type: nauc_ndcg_at_1000_std value: 4.923707509620363 - type: nauc_ndcg_at_100_diff1 value: 44.76206243393775 - type: nauc_ndcg_at_100_max value: 33.57771606755598 - type: nauc_ndcg_at_100_std value: 5.30915563331338 - type: nauc_ndcg_at_10_diff1 value: 45.12714032463827 - type: nauc_ndcg_at_10_max value: 30.351909495610492 - type: nauc_ndcg_at_10_std value: 2.3972947289996873 - type: nauc_ndcg_at_1_diff1 value: 52.416386548395664 - type: nauc_ndcg_at_1_max value: 32.28582003787206 - type: nauc_ndcg_at_1_std value: -2.154991145714492 - type: nauc_ndcg_at_20_diff1 value: 44.20281844000005 - type: nauc_ndcg_at_20_max value: 32.14112739396226 - type: nauc_ndcg_at_20_std value: 3.3971385462591916 - type: nauc_ndcg_at_3_diff1 value: 47.0633767031858 - type: nauc_ndcg_at_3_max value: 31.032896053733435 - type: nauc_ndcg_at_3_std value: 0.6827544906310201 - type: nauc_ndcg_at_5_diff1 value: 46.735352294106484 - type: nauc_ndcg_at_5_max value: 29.784992270528544 - type: nauc_ndcg_at_5_std value: 0.8685943819516141 - type: nauc_precision_at_1000_diff1 value: -12.223330179860852 - type: nauc_precision_at_1000_max value: -9.266492213777273 - type: nauc_precision_at_1000_std value: 19.0569899587788 - type: nauc_precision_at_100_diff1 value: -5.803751085072067 - type: nauc_precision_at_100_max value: 3.448932057044294 - type: nauc_precision_at_100_std value: 23.470863527030627 - type: nauc_precision_at_10_diff1 value: 8.887357341361907 - type: nauc_precision_at_10_max value: 18.67165390928126 - type: nauc_precision_at_10_std value: 19.158543337955404 - type: nauc_precision_at_1_diff1 value: 52.416386548395664 - type: nauc_precision_at_1_max value: 32.28582003787206 - type: nauc_precision_at_1_std value: -2.154991145714492 - type: nauc_precision_at_20_diff1 value: 0.942496138409553 - type: nauc_precision_at_20_max value: 18.86957127610774 - type: nauc_precision_at_20_std value: 24.075503903246496 - type: nauc_precision_at_3_diff1 value: 28.15363877307106 - type: nauc_precision_at_3_max value: 27.064928137991824 - type: nauc_precision_at_3_std value: 8.632807104504753 - type: nauc_precision_at_5_diff1 value: 20.805862332497973 - type: nauc_precision_at_5_max value: 21.420201475758404 - type: nauc_precision_at_5_std value: 12.380239645425714 - type: nauc_recall_at_1000_diff1 value: 18.478341468055547 - type: nauc_recall_at_1000_max value: 56.293560115074506 - type: nauc_recall_at_1000_std value: 64.31607185065428 - type: nauc_recall_at_100_diff1 value: 26.737267337771886 - type: nauc_recall_at_100_max value: 38.011889141496326 - type: nauc_recall_at_100_std value: 30.44904690114732 - type: nauc_recall_at_10_diff1 value: 35.22772732735716 - type: nauc_recall_at_10_max value: 26.000054115159486 - type: nauc_recall_at_10_std value: 5.174264254271206 - type: nauc_recall_at_1_diff1 value: 55.81906101970174 - type: nauc_recall_at_1_max value: 31.811715334193796 - type: nauc_recall_at_1_std value: -6.17056859281584 - type: nauc_recall_at_20_diff1 value: 30.48493302415641 - type: nauc_recall_at_20_max value: 31.05487040370753 - type: nauc_recall_at_20_std value: 10.319948318834136 - type: nauc_recall_at_3_diff1 value: 43.12289512340243 - type: nauc_recall_at_3_max value: 28.176279771026135 - type: nauc_recall_at_3_std value: -0.1775154523381921 - type: nauc_recall_at_5_diff1 value: 40.9934933741234 - type: nauc_recall_at_5_max value: 25.569156290584733 - type: nauc_recall_at_5_std value: 0.21166696686855038 - type: ndcg_at_1 value: 33.597 - type: ndcg_at_10 value: 44.718999999999994 - type: ndcg_at_100 value: 50.324000000000005 - type: ndcg_at_1000 value: 52.468 - type: ndcg_at_20 value: 46.822 - type: ndcg_at_3 value: 39.558 - type: ndcg_at_5 value: 41.827999999999996 - type: precision_at_1 value: 33.597 - type: precision_at_10 value: 8.735 - type: precision_at_100 value: 1.6420000000000001 - type: precision_at_1000 value: 0.246 - type: precision_at_20 value: 5.375 - type: precision_at_3 value: 18.511 - type: precision_at_5 value: 13.399 - type: recall_at_1 value: 28.383999999999997 - type: recall_at_10 value: 56.425000000000004 - type: recall_at_100 value: 82.01899999999999 - type: recall_at_1000 value: 95.285 - type: recall_at_20 value: 64.615 - type: recall_at_3 value: 42.171 - type: recall_at_5 value: 48.296 - task: type: Retrieval dataset: name: MTEB CQADupstackWordpressRetrieval type: mteb/cqadupstack-wordpress config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: main_score value: 38.269999999999996 - type: map_at_1 value: 25.324999999999996 - type: map_at_10 value: 33.263 - type: map_at_100 value: 34.304 - type: map_at_1000 value: 34.394000000000005 - type: map_at_20 value: 33.827 - type: map_at_3 value: 30.259999999999998 - type: map_at_5 value: 31.832 - type: mrr_at_1 value: 27.171903881700555 - type: mrr_at_10 value: 35.334991051257234 - type: mrr_at_100 value: 36.251283465952355 - type: mrr_at_1000 value: 36.316236092511055 - type: mrr_at_20 value: 35.87141909945257 - type: mrr_at_3 value: 32.71719038817007 - type: mrr_at_5 value: 34.19593345656194 - type: nauc_map_at_1000_diff1 value: 39.614836211522714 - type: nauc_map_at_1000_max value: 22.019768626310192 - type: nauc_map_at_1000_std value: -1.5238708712112499 - type: nauc_map_at_100_diff1 value: 39.63008548572307 - type: nauc_map_at_100_max value: 22.044756063752345 - type: nauc_map_at_100_std value: -1.4869190221494792 - type: nauc_map_at_10_diff1 value: 39.73025012395569 - type: nauc_map_at_10_max value: 22.117710178892107 - type: nauc_map_at_10_std value: -2.5129984871932973 - type: nauc_map_at_1_diff1 value: 45.015617718902654 - type: nauc_map_at_1_max value: 19.313800263189638 - type: nauc_map_at_1_std value: -4.763931386681675 - type: nauc_map_at_20_diff1 value: 39.53678019013766 - type: nauc_map_at_20_max value: 21.880316719428258 - type: nauc_map_at_20_std value: -1.882003994523355 - type: nauc_map_at_3_diff1 value: 40.37307665298228 - type: nauc_map_at_3_max value: 20.851976075322533 - type: nauc_map_at_3_std value: -2.429569082966531 - type: nauc_map_at_5_diff1 value: 39.763015635086 - type: nauc_map_at_5_max value: 22.010102196900725 - type: nauc_map_at_5_std value: -2.654896415670943 - type: nauc_mrr_at_1000_diff1 value: 39.74071733680025 - type: nauc_mrr_at_1000_max value: 21.67309640681989 - type: nauc_mrr_at_1000_std value: -1.4003373135477462 - type: nauc_mrr_at_100_diff1 value: 39.730614151966485 - type: nauc_mrr_at_100_max value: 21.678390048971767 - type: nauc_mrr_at_100_std value: -1.3655362623563931 - type: nauc_mrr_at_10_diff1 value: 39.7900031013241 - type: nauc_mrr_at_10_max value: 21.73643491725051 - type: nauc_mrr_at_10_std value: -2.1175389838696312 - type: nauc_mrr_at_1_diff1 value: 46.165736140679776 - type: nauc_mrr_at_1_max value: 20.071083446822147 - type: nauc_mrr_at_1_std value: -5.018909100858311 - type: nauc_mrr_at_20_diff1 value: 39.6371295762885 - type: nauc_mrr_at_20_max value: 21.659557440270973 - type: nauc_mrr_at_20_std value: -1.4909603958341686 - type: nauc_mrr_at_3_diff1 value: 40.351150322758876 - type: nauc_mrr_at_3_max value: 20.83706249041544 - type: nauc_mrr_at_3_std value: -1.956027373253151 - type: nauc_mrr_at_5_diff1 value: 39.57759107791911 - type: nauc_mrr_at_5_max value: 21.79552045204151 - type: nauc_mrr_at_5_std value: -2.1507013120951126 - type: nauc_ndcg_at_1000_diff1 value: 37.717619356839016 - type: nauc_ndcg_at_1000_max value: 22.545375504379805 - type: nauc_ndcg_at_1000_std value: 1.682348628141016 - type: nauc_ndcg_at_100_diff1 value: 37.656027803682626 - type: nauc_ndcg_at_100_max value: 22.49278246383637 - type: nauc_ndcg_at_100_std value: 2.6818118152357773 - type: nauc_ndcg_at_10_diff1 value: 37.834954205539766 - type: nauc_ndcg_at_10_max value: 22.655839885558443 - type: nauc_ndcg_at_10_std value: -1.97159619786231 - type: nauc_ndcg_at_1_diff1 value: 46.165736140679776 - type: nauc_ndcg_at_1_max value: 20.071083446822147 - type: nauc_ndcg_at_1_std value: -5.018909100858311 - type: nauc_ndcg_at_20_diff1 value: 37.171914857454304 - type: nauc_ndcg_at_20_max value: 21.858904801745897 - type: nauc_ndcg_at_20_std value: 0.3809854859496657 - type: nauc_ndcg_at_3_diff1 value: 38.4460623883955 - type: nauc_ndcg_at_3_max value: 20.95244159463402 - type: nauc_ndcg_at_3_std value: -1.2685011660086651 - type: nauc_ndcg_at_5_diff1 value: 37.48831054573054 - type: nauc_ndcg_at_5_max value: 22.625921624640526 - type: nauc_ndcg_at_5_std value: -2.049221092724925 - type: nauc_precision_at_1000_diff1 value: -19.120500628263994 - type: nauc_precision_at_1000_max value: -6.650707109047473 - type: nauc_precision_at_1000_std value: 15.71193179253002 - type: nauc_precision_at_100_diff1 value: 6.254606806876069 - type: nauc_precision_at_100_max value: 14.601826922181823 - type: nauc_precision_at_100_std value: 28.38299592246453 - type: nauc_precision_at_10_diff1 value: 22.978614338670816 - type: nauc_precision_at_10_max value: 23.04146766323557 - type: nauc_precision_at_10_std value: 6.226264308612577 - type: nauc_precision_at_1_diff1 value: 46.165736140679776 - type: nauc_precision_at_1_max value: 20.071083446822147 - type: nauc_precision_at_1_std value: -5.018909100858311 - type: nauc_precision_at_20_diff1 value: 17.681032853225602 - type: nauc_precision_at_20_max value: 18.66680304585122 - type: nauc_precision_at_20_std value: 15.34896796713905 - type: nauc_precision_at_3_diff1 value: 31.359396694559194 - type: nauc_precision_at_3_max value: 22.279263308973274 - type: nauc_precision_at_3_std value: 3.6302537979529035 - type: nauc_precision_at_5_diff1 value: 26.32257879892933 - type: nauc_precision_at_5_max value: 25.402524493181026 - type: nauc_precision_at_5_std value: 4.731450603747359 - type: nauc_recall_at_1000_diff1 value: 23.562925244967875 - type: nauc_recall_at_1000_max value: 30.737399333586797 - type: nauc_recall_at_1000_std value: 34.19418935008663 - type: nauc_recall_at_100_diff1 value: 28.703574970574824 - type: nauc_recall_at_100_max value: 22.448663600170278 - type: nauc_recall_at_100_std value: 24.53297349042035 - type: nauc_recall_at_10_diff1 value: 31.73603907811882 - type: nauc_recall_at_10_max value: 23.453183748640765 - type: nauc_recall_at_10_std value: -1.8279054407176274 - type: nauc_recall_at_1_diff1 value: 45.015617718902654 - type: nauc_recall_at_1_max value: 19.313800263189638 - type: nauc_recall_at_1_std value: -4.763931386681675 - type: nauc_recall_at_20_diff1 value: 28.74169081866096 - type: nauc_recall_at_20_max value: 20.035509169577324 - type: nauc_recall_at_20_std value: 7.371615811227748 - type: nauc_recall_at_3_diff1 value: 34.09890157333362 - type: nauc_recall_at_3_max value: 20.46565842748346 - type: nauc_recall_at_3_std value: -0.4337283067447526 - type: nauc_recall_at_5_diff1 value: 30.974580787842402 - type: nauc_recall_at_5_max value: 23.76379349487105 - type: nauc_recall_at_5_std value: -1.8407515927979428 - type: ndcg_at_1 value: 27.172 - type: ndcg_at_10 value: 38.269999999999996 - type: ndcg_at_100 value: 43.338 - type: ndcg_at_1000 value: 45.594 - type: ndcg_at_20 value: 40.256 - type: ndcg_at_3 value: 32.673 - type: ndcg_at_5 value: 35.224 - type: precision_at_1 value: 27.172 - type: precision_at_10 value: 6.063000000000001 - type: precision_at_100 value: 0.9259999999999999 - type: precision_at_1000 value: 0.123 - type: precision_at_20 value: 3.5029999999999997 - type: precision_at_3 value: 13.74 - type: precision_at_5 value: 9.797 - type: recall_at_1 value: 25.324999999999996 - type: recall_at_10 value: 51.634 - type: recall_at_100 value: 74.687 - type: recall_at_1000 value: 91.412 - type: recall_at_20 value: 59.207 - type: recall_at_3 value: 36.678 - type: recall_at_5 value: 42.742999999999995 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: main_score value: 36.853 - type: map_at_1 value: 15.371000000000002 - type: map_at_10 value: 27.122 - type: map_at_100 value: 29.226000000000003 - type: map_at_1000 value: 29.409999999999997 - type: map_at_20 value: 28.274 - type: map_at_3 value: 22.431 - type: map_at_5 value: 24.877 - type: mrr_at_1 value: 34.13680781758958 - type: mrr_at_10 value: 47.265911793599145 - type: mrr_at_100 value: 48.028369995763846 - type: mrr_at_1000 value: 48.05317022537804 - type: mrr_at_20 value: 47.75785292259516 - type: mrr_at_3 value: 43.887079261672156 - type: mrr_at_5 value: 45.906623235613544 - type: nauc_map_at_1000_diff1 value: 24.949211292921547 - type: nauc_map_at_1000_max value: 38.69844483304584 - type: nauc_map_at_1000_std value: 18.336359440844753 - type: nauc_map_at_100_diff1 value: 24.8951732982492 - type: nauc_map_at_100_max value: 38.65049158594052 - type: nauc_map_at_100_std value: 18.28935278388095 - type: nauc_map_at_10_diff1 value: 24.606032216798273 - type: nauc_map_at_10_max value: 38.00608351559887 - type: nauc_map_at_10_std value: 16.61261615173358 - type: nauc_map_at_1_diff1 value: 30.83614944448221 - type: nauc_map_at_1_max value: 33.757528532809 - type: nauc_map_at_1_std value: 8.880622713261126 - type: nauc_map_at_20_diff1 value: 24.75491310922017 - type: nauc_map_at_20_max value: 38.353679076398834 - type: nauc_map_at_20_std value: 17.58637493443171 - type: nauc_map_at_3_diff1 value: 25.563085273287083 - type: nauc_map_at_3_max value: 35.14515679047155 - type: nauc_map_at_3_std value: 11.75594869817732 - type: nauc_map_at_5_diff1 value: 24.815807517691614 - type: nauc_map_at_5_max value: 36.25905426665983 - type: nauc_map_at_5_std value: 14.516391726180697 - type: nauc_mrr_at_1000_diff1 value: 27.948233427121274 - type: nauc_mrr_at_1000_max value: 37.5893640945859 - type: nauc_mrr_at_1000_std value: 19.588442449629763 - type: nauc_mrr_at_100_diff1 value: 27.947962345854037 - type: nauc_mrr_at_100_max value: 37.60375479481945 - type: nauc_mrr_at_100_std value: 19.614791576283793 - type: nauc_mrr_at_10_diff1 value: 27.882311310262136 - type: nauc_mrr_at_10_max value: 37.58580968074054 - type: nauc_mrr_at_10_std value: 19.49875186170201 - type: nauc_mrr_at_1_diff1 value: 28.017413073648477 - type: nauc_mrr_at_1_max value: 32.87710191514022 - type: nauc_mrr_at_1_std value: 14.04889142608459 - type: nauc_mrr_at_20_diff1 value: 27.89129925771968 - type: nauc_mrr_at_20_max value: 37.6142863106945 - type: nauc_mrr_at_20_std value: 19.645390143394163 - type: nauc_mrr_at_3_diff1 value: 27.99609559690795 - type: nauc_mrr_at_3_max value: 36.87362332456197 - type: nauc_mrr_at_3_std value: 18.598416821915333 - type: nauc_mrr_at_5_diff1 value: 27.68306089976716 - type: nauc_mrr_at_5_max value: 37.12264485659723 - type: nauc_mrr_at_5_std value: 19.18875305730564 - type: nauc_ndcg_at_1000_diff1 value: 25.736779186453777 - type: nauc_ndcg_at_1000_max value: 41.93281139456004 - type: nauc_ndcg_at_1000_std value: 25.179038422659993 - type: nauc_ndcg_at_100_diff1 value: 25.144796623848322 - type: nauc_ndcg_at_100_max value: 41.72820916876173 - type: nauc_ndcg_at_100_std value: 25.12851686850754 - type: nauc_ndcg_at_10_diff1 value: 24.321249191226652 - type: nauc_ndcg_at_10_max value: 40.23711916935706 - type: nauc_ndcg_at_10_std value: 20.89060972334557 - type: nauc_ndcg_at_1_diff1 value: 28.017413073648477 - type: nauc_ndcg_at_1_max value: 32.87710191514022 - type: nauc_ndcg_at_1_std value: 14.04889142608459 - type: nauc_ndcg_at_20_diff1 value: 24.5090484877482 - type: nauc_ndcg_at_20_max value: 40.752854032983606 - type: nauc_ndcg_at_20_std value: 22.70331074781384 - type: nauc_ndcg_at_3_diff1 value: 25.13499057756147 - type: nauc_ndcg_at_3_max value: 35.8325682137567 - type: nauc_ndcg_at_3_std value: 15.23768392706637 - type: nauc_ndcg_at_5_diff1 value: 24.614105695451116 - type: nauc_ndcg_at_5_max value: 37.68089587624492 - type: nauc_ndcg_at_5_std value: 17.946406099261708 - type: nauc_precision_at_1000_diff1 value: -2.022340544774227 - type: nauc_precision_at_1000_max value: 6.070578645067797 - type: nauc_precision_at_1000_std value: 22.15132728777549 - type: nauc_precision_at_100_diff1 value: 4.544144474504255 - type: nauc_precision_at_100_max value: 19.780392159848574 - type: nauc_precision_at_100_std value: 31.107111186002438 - type: nauc_precision_at_10_diff1 value: 10.107015022955848 - type: nauc_precision_at_10_max value: 30.779709099060465 - type: nauc_precision_at_10_std value: 27.324148451668602 - type: nauc_precision_at_1_diff1 value: 28.017413073648477 - type: nauc_precision_at_1_max value: 32.87710191514022 - type: nauc_precision_at_1_std value: 14.04889142608459 - type: nauc_precision_at_20_diff1 value: 8.270881053079405 - type: nauc_precision_at_20_max value: 27.26753946078481 - type: nauc_precision_at_20_std value: 29.156725822074204 - type: nauc_precision_at_3_diff1 value: 17.82468940497632 - type: nauc_precision_at_3_max value: 31.490021174215155 - type: nauc_precision_at_3_std value: 18.73818985054394 - type: nauc_precision_at_5_diff1 value: 13.24803141673961 - type: nauc_precision_at_5_max value: 29.94926240784298 - type: nauc_precision_at_5_std value: 23.2940906142919 - type: nauc_recall_at_1000_diff1 value: 19.09850333580471 - type: nauc_recall_at_1000_max value: 46.026306142840596 - type: nauc_recall_at_1000_std value: 46.50391519568263 - type: nauc_recall_at_100_diff1 value: 16.739384224869738 - type: nauc_recall_at_100_max value: 40.68987136431252 - type: nauc_recall_at_100_std value: 36.01609750485591 - type: nauc_recall_at_10_diff1 value: 17.51796617221814 - type: nauc_recall_at_10_max value: 39.47453129444401 - type: nauc_recall_at_10_std value: 23.79239002974899 - type: nauc_recall_at_1_diff1 value: 30.83614944448221 - type: nauc_recall_at_1_max value: 33.757528532809 - type: nauc_recall_at_1_std value: 8.880622713261126 - type: nauc_recall_at_20_diff1 value: 16.978668307251652 - type: nauc_recall_at_20_max value: 39.09115357303713 - type: nauc_recall_at_20_std value: 27.278668534187524 - type: nauc_recall_at_3_diff1 value: 22.55937738994021 - type: nauc_recall_at_3_max value: 36.25055459395638 - type: nauc_recall_at_3_std value: 14.828905168761247 - type: nauc_recall_at_5_diff1 value: 19.32656748627199 - type: nauc_recall_at_5_max value: 36.28836228620816 - type: nauc_recall_at_5_std value: 19.264352933914278 - type: ndcg_at_1 value: 34.137 - type: ndcg_at_10 value: 36.853 - type: ndcg_at_100 value: 44.279 - type: ndcg_at_1000 value: 47.336 - type: ndcg_at_20 value: 39.815 - type: ndcg_at_3 value: 30.253999999999998 - type: ndcg_at_5 value: 32.649 - type: precision_at_1 value: 34.137 - type: precision_at_10 value: 11.655 - type: precision_at_100 value: 1.9619999999999997 - type: precision_at_1000 value: 0.254 - type: precision_at_20 value: 7.1209999999999996 - type: precision_at_3 value: 22.823 - type: precision_at_5 value: 17.655 - type: recall_at_1 value: 15.371000000000002 - type: recall_at_10 value: 43.718 - type: recall_at_100 value: 68.81 - type: recall_at_1000 value: 85.69600000000001 - type: recall_at_20 value: 51.94 - type: recall_at_3 value: 27.694000000000003 - type: recall_at_5 value: 34.469 - task: type: Retrieval dataset: name: MTEB DBPedia type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: main_score value: 45.553 - type: map_at_1 value: 9.168999999999999 - type: map_at_10 value: 22.154 - type: map_at_100 value: 32.174 - type: map_at_1000 value: 33.974 - type: map_at_20 value: 25.899 - type: map_at_3 value: 15.275 - type: map_at_5 value: 18.291 - type: mrr_at_1 value: 70.75 - type: mrr_at_10 value: 78.39662698412697 - type: mrr_at_100 value: 78.56221458977012 - type: mrr_at_1000 value: 78.56669970642338 - type: mrr_at_20 value: 78.49688805346696 - type: mrr_at_3 value: 76.33333333333333 - type: mrr_at_5 value: 77.70833333333333 - type: nauc_map_at_1000_diff1 value: 18.465085922071346 - type: nauc_map_at_1000_max value: 24.29804638788498 - type: nauc_map_at_1000_std value: 22.380463943423514 - type: nauc_map_at_100_diff1 value: 19.37585410674523 - type: nauc_map_at_100_max value: 22.56424042509462 - type: nauc_map_at_100_std value: 19.672237275984426 - type: nauc_map_at_10_diff1 value: 23.597788166305577 - type: nauc_map_at_10_max value: 9.157316105122925 - type: nauc_map_at_10_std value: -3.8881247055786807 - type: nauc_map_at_1_diff1 value: 43.96699602275052 - type: nauc_map_at_1_max value: -0.7577088440873263 - type: nauc_map_at_1_std value: -17.732463891968404 - type: nauc_map_at_20_diff1 value: 22.326759054850097 - type: nauc_map_at_20_max value: 14.879191412167703 - type: nauc_map_at_20_std value: 5.405751236575241 - type: nauc_map_at_3_diff1 value: 28.73583545428074 - type: nauc_map_at_3_max value: 1.5986597211018239 - type: nauc_map_at_3_std value: -16.512455883681515 - type: nauc_map_at_5_diff1 value: 25.401810959155057 - type: nauc_map_at_5_max value: 4.418875376978587 - type: nauc_map_at_5_std value: -12.296750992013052 - type: nauc_mrr_at_1000_diff1 value: 51.228801807498584 - type: nauc_mrr_at_1000_max value: 61.040998883279585 - type: nauc_mrr_at_1000_std value: 40.93983887257123 - type: nauc_mrr_at_100_diff1 value: 51.23715338435314 - type: nauc_mrr_at_100_max value: 61.03971408781317 - type: nauc_mrr_at_100_std value: 40.91796923590573 - type: nauc_mrr_at_10_diff1 value: 51.1214868552331 - type: nauc_mrr_at_10_max value: 61.03069045590881 - type: nauc_mrr_at_10_std value: 40.661621199704264 - type: nauc_mrr_at_1_diff1 value: 50.84660003035892 - type: nauc_mrr_at_1_max value: 60.692091499960895 - type: nauc_mrr_at_1_std value: 42.126228731502955 - type: nauc_mrr_at_20_diff1 value: 51.0402624284872 - type: nauc_mrr_at_20_max value: 60.94577844338166 - type: nauc_mrr_at_20_std value: 40.89505950503613 - type: nauc_mrr_at_3_diff1 value: 51.771113665996516 - type: nauc_mrr_at_3_max value: 61.65264793077224 - type: nauc_mrr_at_3_std value: 41.75781827057092 - type: nauc_mrr_at_5_diff1 value: 51.0656793772882 - type: nauc_mrr_at_5_max value: 61.08042065139715 - type: nauc_mrr_at_5_std value: 41.11203271084835 - type: nauc_ndcg_at_1000_diff1 value: 22.347978262245107 - type: nauc_ndcg_at_1000_max value: 36.56458763955002 - type: nauc_ndcg_at_1000_std value: 35.99616144258822 - type: nauc_ndcg_at_100_diff1 value: 23.1120990977162 - type: nauc_ndcg_at_100_max value: 30.79663306311657 - type: nauc_ndcg_at_100_std value: 27.387572106784297 - type: nauc_ndcg_at_10_diff1 value: 23.329746066899656 - type: nauc_ndcg_at_10_max value: 28.69246947084685 - type: nauc_ndcg_at_10_std value: 21.457736188325345 - type: nauc_ndcg_at_1_diff1 value: 39.99399153456974 - type: nauc_ndcg_at_1_max value: 38.12447856470389 - type: nauc_ndcg_at_1_std value: 27.768869260384676 - type: nauc_ndcg_at_20_diff1 value: 24.945374175339907 - type: nauc_ndcg_at_20_max value: 27.67836982165295 - type: nauc_ndcg_at_20_std value: 19.7933631060578 - type: nauc_ndcg_at_3_diff1 value: 26.063492354398527 - type: nauc_ndcg_at_3_max value: 33.06541959550656 - type: nauc_ndcg_at_3_std value: 23.278902797288726 - type: nauc_ndcg_at_5_diff1 value: 22.521596060750035 - type: nauc_ndcg_at_5_max value: 31.210005673730784 - type: nauc_ndcg_at_5_std value: 22.893106456317927 - type: nauc_precision_at_1000_diff1 value: -19.845356495096006 - type: nauc_precision_at_1000_max value: 4.163819381816099 - type: nauc_precision_at_1000_std value: 7.612952884590339 - type: nauc_precision_at_100_diff1 value: -8.2679285153361 - type: nauc_precision_at_100_max value: 29.78018175573565 - type: nauc_precision_at_100_std value: 41.07244463956215 - type: nauc_precision_at_10_diff1 value: -3.2451428407349057 - type: nauc_precision_at_10_max value: 36.92563008274906 - type: nauc_precision_at_10_std value: 45.06962043489777 - type: nauc_precision_at_1_diff1 value: 50.84660003035892 - type: nauc_precision_at_1_max value: 60.692091499960895 - type: nauc_precision_at_1_std value: 42.126228731502955 - type: nauc_precision_at_20_diff1 value: -3.432279149061878 - type: nauc_precision_at_20_max value: 37.013592483974875 - type: nauc_precision_at_20_std value: 46.47324739428665 - type: nauc_precision_at_3_diff1 value: 7.28495481051025 - type: nauc_precision_at_3_max value: 38.66372411741402 - type: nauc_precision_at_3_std value: 35.23163993723955 - type: nauc_precision_at_5_diff1 value: -0.16540230063716202 - type: nauc_precision_at_5_max value: 37.322494255721715 - type: nauc_precision_at_5_std value: 39.666653561269754 - type: nauc_recall_at_1000_diff1 value: 11.388326469283681 - type: nauc_recall_at_1000_max value: 32.698146308591674 - type: nauc_recall_at_1000_std value: 49.48830488070777 - type: nauc_recall_at_100_diff1 value: 11.497443532756819 - type: nauc_recall_at_100_max value: 20.196970431621615 - type: nauc_recall_at_100_std value: 23.688772100803433 - type: nauc_recall_at_10_diff1 value: 16.519851398596003 - type: nauc_recall_at_10_max value: 0.774066845071221 - type: nauc_recall_at_10_std value: -10.89514647001814 - type: nauc_recall_at_1_diff1 value: 43.96699602275052 - type: nauc_recall_at_1_max value: -0.7577088440873263 - type: nauc_recall_at_1_std value: -17.732463891968404 - type: nauc_recall_at_20_diff1 value: 15.202960269878258 - type: nauc_recall_at_20_max value: 7.067263295590253 - type: nauc_recall_at_20_std value: -0.06050108222640702 - type: nauc_recall_at_3_diff1 value: 24.066741361525125 - type: nauc_recall_at_3_max value: -2.1961525860488424 - type: nauc_recall_at_3_std value: -19.48307077749568 - type: nauc_recall_at_5_diff1 value: 20.086330794102707 - type: nauc_recall_at_5_max value: -0.8866528062747986 - type: nauc_recall_at_5_std value: -16.53799173962747 - type: ndcg_at_1 value: 57.99999999999999 - type: ndcg_at_10 value: 45.553 - type: ndcg_at_100 value: 51.014 - type: ndcg_at_1000 value: 58.226 - type: ndcg_at_20 value: 44.98 - type: ndcg_at_3 value: 48.981 - type: ndcg_at_5 value: 46.794999999999995 - type: precision_at_1 value: 70.75 - type: precision_at_10 value: 36.85 - type: precision_at_100 value: 11.955 - type: precision_at_1000 value: 2.247 - type: precision_at_20 value: 28.075 - type: precision_at_3 value: 52.666999999999994 - type: precision_at_5 value: 45.85 - type: recall_at_1 value: 9.168999999999999 - type: recall_at_10 value: 28.796 - type: recall_at_100 value: 58.892999999999994 - type: recall_at_1000 value: 81.644 - type: recall_at_20 value: 36.659000000000006 - type: recall_at_3 value: 16.709 - type: recall_at_5 value: 21.387 - task: type: Retrieval dataset: name: MTEB FEVER type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: main_score value: 88.41 - type: map_at_1 value: 75.637 - type: map_at_10 value: 84.674 - type: map_at_100 value: 84.909 - type: map_at_1000 value: 84.92 - type: map_at_20 value: 84.836 - type: map_at_3 value: 83.44200000000001 - type: map_at_5 value: 84.28099999999999 - type: mrr_at_1 value: 81.56315631563157 - type: mrr_at_10 value: 88.89571695264748 - type: mrr_at_100 value: 88.93671417216285 - type: mrr_at_1000 value: 88.93708016011664 - type: mrr_at_20 value: 88.9311652665256 - type: mrr_at_3 value: 88.20882088208805 - type: mrr_at_5 value: 88.72937293729349 - type: nauc_map_at_1000_diff1 value: 54.41216035074026 - type: nauc_map_at_1000_max value: 13.346153003554361 - type: nauc_map_at_1000_std value: -6.721664416152164 - type: nauc_map_at_100_diff1 value: 54.36538350995795 - type: nauc_map_at_100_max value: 13.355583381471298 - type: nauc_map_at_100_std value: -6.696921015641016 - type: nauc_map_at_10_diff1 value: 54.0389127730555 - type: nauc_map_at_10_max value: 13.387802159150663 - type: nauc_map_at_10_std value: -6.73514381731833 - type: nauc_map_at_1_diff1 value: 57.99489574836453 - type: nauc_map_at_1_max value: 7.830032589171654 - type: nauc_map_at_1_std value: -10.140208285080295 - type: nauc_map_at_20_diff1 value: 54.16841004736076 - type: nauc_map_at_20_max value: 13.345607363689746 - type: nauc_map_at_20_std value: -6.663119775158465 - type: nauc_map_at_3_diff1 value: 53.82879543599303 - type: nauc_map_at_3_max value: 12.716952288433902 - type: nauc_map_at_3_std value: -7.746102082835598 - type: nauc_map_at_5_diff1 value: 53.82838395350109 - type: nauc_map_at_5_max value: 13.487373534211702 - type: nauc_map_at_5_std value: -6.869504398693434 - type: nauc_mrr_at_1000_diff1 value: 68.92783546581906 - type: nauc_mrr_at_1000_max value: 12.076297180596592 - type: nauc_mrr_at_1000_std value: -13.306257067567998 - type: nauc_mrr_at_100_diff1 value: 68.92780219775517 - type: nauc_mrr_at_100_max value: 12.078449805054374 - type: nauc_mrr_at_100_std value: -13.303524852703719 - type: nauc_mrr_at_10_diff1 value: 68.92686206881258 - type: nauc_mrr_at_10_max value: 12.273295656884873 - type: nauc_mrr_at_10_std value: -13.222483496603965 - type: nauc_mrr_at_1_diff1 value: 70.1738022073041 - type: nauc_mrr_at_1_max value: 9.378639533482806 - type: nauc_mrr_at_1_std value: -13.444033823202348 - type: nauc_mrr_at_20_diff1 value: 68.91161304905303 - type: nauc_mrr_at_20_max value: 12.117091514817885 - type: nauc_mrr_at_20_std value: -13.258261750160239 - type: nauc_mrr_at_3_diff1 value: 68.61982455945467 - type: nauc_mrr_at_3_max value: 12.608213879734578 - type: nauc_mrr_at_3_std value: -13.558003431587839 - type: nauc_mrr_at_5_diff1 value: 68.81439097457242 - type: nauc_mrr_at_5_max value: 12.54025598903624 - type: nauc_mrr_at_5_std value: -13.199231514972093 - type: nauc_ndcg_at_1000_diff1 value: 56.47563443877495 - type: nauc_ndcg_at_1000_max value: 14.508331783439466 - type: nauc_ndcg_at_1000_std value: -6.206829736668775 - type: nauc_ndcg_at_100_diff1 value: 55.54015515673474 - type: nauc_ndcg_at_100_max value: 14.753595778278136 - type: nauc_ndcg_at_100_std value: -5.638517949568802 - type: nauc_ndcg_at_10_diff1 value: 54.220845223257996 - type: nauc_ndcg_at_10_max value: 15.265309648490021 - type: nauc_ndcg_at_10_std value: -5.516276098929109 - type: nauc_ndcg_at_1_diff1 value: 70.1738022073041 - type: nauc_ndcg_at_1_max value: 9.378639533482806 - type: nauc_ndcg_at_1_std value: -13.444033823202348 - type: nauc_ndcg_at_20_diff1 value: 54.481406100854635 - type: nauc_ndcg_at_20_max value: 14.868763583210498 - type: nauc_ndcg_at_20_std value: -5.328097380018734 - type: nauc_ndcg_at_3_diff1 value: 54.94411725607744 - type: nauc_ndcg_at_3_max value: 14.27186734506607 - type: nauc_ndcg_at_3_std value: -7.894724962312474 - type: nauc_ndcg_at_5_diff1 value: 54.08048166974806 - type: nauc_ndcg_at_5_max value: 15.528233170721006 - type: nauc_ndcg_at_5_std value: -5.984768714537104 - type: nauc_precision_at_1000_diff1 value: -8.744323640074445 - type: nauc_precision_at_1000_max value: -0.01881224392053465 - type: nauc_precision_at_1000_std value: 3.8721477979260635 - type: nauc_precision_at_100_diff1 value: -11.86150156952171 - type: nauc_precision_at_100_max value: 3.2736651314552314 - type: nauc_precision_at_100_std value: 8.12687620615509 - type: nauc_precision_at_10_diff1 value: -10.360708676781178 - type: nauc_precision_at_10_max value: 10.945552490433458 - type: nauc_precision_at_10_std value: 11.016707653014485 - type: nauc_precision_at_1_diff1 value: 70.1738022073041 - type: nauc_precision_at_1_max value: 9.378639533482806 - type: nauc_precision_at_1_std value: -13.444033823202348 - type: nauc_precision_at_20_diff1 value: -13.557721925696583 - type: nauc_precision_at_20_max value: 6.331386521718574 - type: nauc_precision_at_20_std value: 10.322188778142388 - type: nauc_precision_at_3_diff1 value: 15.139456770248968 - type: nauc_precision_at_3_max value: 17.10220985600708 - type: nauc_precision_at_3_std value: 3.0448183682558074 - type: nauc_precision_at_5_diff1 value: -1.9825577548111102 - type: nauc_precision_at_5_max value: 17.139148127012625 - type: nauc_precision_at_5_std value: 10.598435750554753 - type: nauc_recall_at_1000_diff1 value: 15.641740744283005 - type: nauc_recall_at_1000_max value: 44.65315702195612 - type: nauc_recall_at_1000_std value: 52.34265862835513 - type: nauc_recall_at_100_diff1 value: 5.254385435323394 - type: nauc_recall_at_100_max value: 38.53577774395794 - type: nauc_recall_at_100_std value: 43.47744274335829 - type: nauc_recall_at_10_diff1 value: 19.135735476268042 - type: nauc_recall_at_10_max value: 30.05417445923848 - type: nauc_recall_at_10_std value: 18.3988023241141 - type: nauc_recall_at_1_diff1 value: 57.99489574836453 - type: nauc_recall_at_1_max value: 7.830032589171654 - type: nauc_recall_at_1_std value: -10.140208285080295 - type: nauc_recall_at_20_diff1 value: 9.444797759735126 - type: nauc_recall_at_20_max value: 31.001311675371017 - type: nauc_recall_at_20_std value: 29.351418893822178 - type: nauc_recall_at_3_diff1 value: 36.88862653262064 - type: nauc_recall_at_3_max value: 19.845892741607823 - type: nauc_recall_at_3_std value: -1.0584273105890794 - type: nauc_recall_at_5_diff1 value: 27.360718561944974 - type: nauc_recall_at_5_max value: 26.698311215441738 - type: nauc_recall_at_5_std value: 8.97113997755362 - type: ndcg_at_1 value: 81.563 - type: ndcg_at_10 value: 88.41 - type: ndcg_at_100 value: 89.101 - type: ndcg_at_1000 value: 89.25800000000001 - type: ndcg_at_20 value: 88.79 - type: ndcg_at_3 value: 86.599 - type: ndcg_at_5 value: 87.74 - type: precision_at_1 value: 81.563 - type: precision_at_10 value: 10.699 - type: precision_at_100 value: 1.13 - type: precision_at_1000 value: 0.116 - type: precision_at_20 value: 5.479 - type: precision_at_3 value: 33.238 - type: precision_at_5 value: 20.744 - type: recall_at_1 value: 75.637 - type: recall_at_10 value: 95.57600000000001 - type: recall_at_100 value: 98.072 - type: recall_at_1000 value: 98.951 - type: recall_at_20 value: 96.792 - type: recall_at_3 value: 90.79599999999999 - type: recall_at_5 value: 93.674 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: main_score value: 42.396 - type: map_at_1 value: 21.711 - type: map_at_10 value: 34.628 - type: map_at_100 value: 36.549 - type: map_at_1000 value: 36.719 - type: map_at_20 value: 35.673 - type: map_at_3 value: 30.585 - type: map_at_5 value: 32.875 - type: mrr_at_1 value: 41.82098765432099 - type: mrr_at_10 value: 50.69505682931607 - type: mrr_at_100 value: 51.50556608727901 - type: mrr_at_1000 value: 51.53870583208304 - type: mrr_at_20 value: 51.15345764364655 - type: mrr_at_3 value: 48.35390946502059 - type: mrr_at_5 value: 49.87397119341563 - type: nauc_map_at_1000_diff1 value: 45.182252919583895 - type: nauc_map_at_1000_max value: 35.66124930024801 - type: nauc_map_at_1000_std value: -0.6925562638650965 - type: nauc_map_at_100_diff1 value: 45.116964706960125 - type: nauc_map_at_100_max value: 35.54990469525889 - type: nauc_map_at_100_std value: -0.6667263852859368 - type: nauc_map_at_10_diff1 value: 45.39189096228184 - type: nauc_map_at_10_max value: 34.780111261901 - type: nauc_map_at_10_std value: -1.8169859294150819 - type: nauc_map_at_1_diff1 value: 47.72764937952259 - type: nauc_map_at_1_max value: 24.83306559709341 - type: nauc_map_at_1_std value: -4.714128457297418 - type: nauc_map_at_20_diff1 value: 45.17073365898278 - type: nauc_map_at_20_max value: 35.0938403469058 - type: nauc_map_at_20_std value: -1.373412631183604 - type: nauc_map_at_3_diff1 value: 46.525724305731295 - type: nauc_map_at_3_max value: 31.042538866512597 - type: nauc_map_at_3_std value: -4.119355935975354 - type: nauc_map_at_5_diff1 value: 45.79569633383187 - type: nauc_map_at_5_max value: 32.88779656647293 - type: nauc_map_at_5_std value: -3.2518474739335312 - type: nauc_mrr_at_1000_diff1 value: 52.83619185487903 - type: nauc_mrr_at_1000_max value: 42.30310720405186 - type: nauc_mrr_at_1000_std value: -1.1487703348518024 - type: nauc_mrr_at_100_diff1 value: 52.82248853996664 - type: nauc_mrr_at_100_max value: 42.30549701564678 - type: nauc_mrr_at_100_std value: -1.1240113031894834 - type: nauc_mrr_at_10_diff1 value: 52.74644276642243 - type: nauc_mrr_at_10_max value: 42.39103029476398 - type: nauc_mrr_at_10_std value: -1.1043413237848576 - type: nauc_mrr_at_1_diff1 value: 54.810335521617326 - type: nauc_mrr_at_1_max value: 40.733260207843394 - type: nauc_mrr_at_1_std value: -4.452554921565855 - type: nauc_mrr_at_20_diff1 value: 52.788257862499954 - type: nauc_mrr_at_20_max value: 42.32658875363406 - type: nauc_mrr_at_20_std value: -1.2209728080684497 - type: nauc_mrr_at_3_diff1 value: 53.43281175319808 - type: nauc_mrr_at_3_max value: 41.735942650867926 - type: nauc_mrr_at_3_std value: -2.462688102468019 - type: nauc_mrr_at_5_diff1 value: 52.874037126566606 - type: nauc_mrr_at_5_max value: 41.93740449458822 - type: nauc_mrr_at_5_std value: -1.2928874908441947 - type: nauc_ndcg_at_1000_diff1 value: 46.5532425476402 - type: nauc_ndcg_at_1000_max value: 40.369611603370515 - type: nauc_ndcg_at_1000_std value: 3.472567588386994 - type: nauc_ndcg_at_100_diff1 value: 45.75244404695404 - type: nauc_ndcg_at_100_max value: 39.36470550675439 - type: nauc_ndcg_at_100_std value: 4.356189041115731 - type: nauc_ndcg_at_10_diff1 value: 46.005135323539704 - type: nauc_ndcg_at_10_max value: 37.89018165334218 - type: nauc_ndcg_at_10_std value: 0.7129618297768014 - type: nauc_ndcg_at_1_diff1 value: 54.810335521617326 - type: nauc_ndcg_at_1_max value: 40.733260207843394 - type: nauc_ndcg_at_1_std value: -4.452554921565855 - type: nauc_ndcg_at_20_diff1 value: 45.841552790490034 - type: nauc_ndcg_at_20_max value: 38.04992825472661 - type: nauc_ndcg_at_20_std value: 1.2748305707955212 - type: nauc_ndcg_at_3_diff1 value: 46.683033449357744 - type: nauc_ndcg_at_3_max value: 37.46397870760607 - type: nauc_ndcg_at_3_std value: -2.3421854966319824 - type: nauc_ndcg_at_5_diff1 value: 45.82409645378457 - type: nauc_ndcg_at_5_max value: 36.27588234096716 - type: nauc_ndcg_at_5_std value: -1.5141197170944254 - type: nauc_precision_at_1000_diff1 value: -3.137944321071885 - type: nauc_precision_at_1000_max value: 24.12803166253776 - type: nauc_precision_at_1000_std value: 11.076454789944101 - type: nauc_precision_at_100_diff1 value: 3.9896283891401048 - type: nauc_precision_at_100_max value: 31.00198316788829 - type: nauc_precision_at_100_std value: 15.725887643803063 - type: nauc_precision_at_10_diff1 value: 20.493420889888394 - type: nauc_precision_at_10_max value: 41.689699671507405 - type: nauc_precision_at_10_std value: 9.374983385669914 - type: nauc_precision_at_1_diff1 value: 54.810335521617326 - type: nauc_precision_at_1_max value: 40.733260207843394 - type: nauc_precision_at_1_std value: -4.452554921565855 - type: nauc_precision_at_20_diff1 value: 15.02911800246446 - type: nauc_precision_at_20_max value: 39.227068888505 - type: nauc_precision_at_20_std value: 11.755558515319404 - type: nauc_precision_at_3_diff1 value: 34.044986535461746 - type: nauc_precision_at_3_max value: 40.96605829831656 - type: nauc_precision_at_3_std value: 1.1903535705688038 - type: nauc_precision_at_5_diff1 value: 26.617002443432707 - type: nauc_precision_at_5_max value: 40.60413785916794 - type: nauc_precision_at_5_std value: 3.6984531670502814 - type: nauc_recall_at_1000_diff1 value: 26.96489389440101 - type: nauc_recall_at_1000_max value: 41.811583968523955 - type: nauc_recall_at_1000_std value: 41.5719519496712 - type: nauc_recall_at_100_diff1 value: 28.50851434908223 - type: nauc_recall_at_100_max value: 32.19528060706322 - type: nauc_recall_at_100_std value: 25.56935294258179 - type: nauc_recall_at_10_diff1 value: 35.139582891180964 - type: nauc_recall_at_10_max value: 32.15221840434225 - type: nauc_recall_at_10_std value: 5.550434611582702 - type: nauc_recall_at_1_diff1 value: 47.72764937952259 - type: nauc_recall_at_1_max value: 24.83306559709341 - type: nauc_recall_at_1_std value: -4.714128457297418 - type: nauc_recall_at_20_diff1 value: 32.78604811055205 - type: nauc_recall_at_20_max value: 29.62940720700254 - type: nauc_recall_at_20_std value: 6.769941491859872 - type: nauc_recall_at_3_diff1 value: 40.76090616138699 - type: nauc_recall_at_3_max value: 27.506425490226867 - type: nauc_recall_at_3_std value: -2.608872693119243 - type: nauc_recall_at_5_diff1 value: 37.06532485024711 - type: nauc_recall_at_5_max value: 27.704150556658448 - type: nauc_recall_at_5_std value: 0.4718707152343872 - type: ndcg_at_1 value: 41.821000000000005 - type: ndcg_at_10 value: 42.396 - type: ndcg_at_100 value: 49.370000000000005 - type: ndcg_at_1000 value: 52.251000000000005 - type: ndcg_at_20 value: 45.097 - type: ndcg_at_3 value: 39.028 - type: ndcg_at_5 value: 40.222 - type: precision_at_1 value: 41.821000000000005 - type: precision_at_10 value: 11.451 - type: precision_at_100 value: 1.863 - type: precision_at_1000 value: 0.23900000000000002 - type: precision_at_20 value: 6.798 - type: precision_at_3 value: 25.823 - type: precision_at_5 value: 18.735 - type: recall_at_1 value: 21.711 - type: recall_at_10 value: 48.862 - type: recall_at_100 value: 74.708 - type: recall_at_1000 value: 91.865 - type: recall_at_20 value: 57.50999999999999 - type: recall_at_3 value: 35.85 - type: recall_at_5 value: 41.976 - task: type: Retrieval dataset: name: MTEB HotpotQA type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: main_score value: 72.21 - type: map_at_1 value: 39.487 - type: map_at_10 value: 63.949999999999996 - type: map_at_100 value: 64.873 - type: map_at_1000 value: 64.927 - type: map_at_20 value: 64.529 - type: map_at_3 value: 60.243 - type: map_at_5 value: 62.613 - type: mrr_at_1 value: 78.97366644159351 - type: mrr_at_10 value: 84.84600173627825 - type: mrr_at_100 value: 85.0172804866798 - type: mrr_at_1000 value: 85.02245651152857 - type: mrr_at_20 value: 84.9625577788225 - type: mrr_at_3 value: 83.90276839972962 - type: mrr_at_5 value: 84.48278190411845 - type: nauc_map_at_1000_diff1 value: 19.825004700775164 - type: nauc_map_at_1000_max value: 19.943221724164182 - type: nauc_map_at_1000_std value: 10.068951166560058 - type: nauc_map_at_100_diff1 value: 19.80139472181137 - type: nauc_map_at_100_max value: 19.938006132804347 - type: nauc_map_at_100_std value: 10.100008107666842 - type: nauc_map_at_10_diff1 value: 19.53604502514735 - type: nauc_map_at_10_max value: 19.62768870331064 - type: nauc_map_at_10_std value: 9.446859074725705 - type: nauc_map_at_1_diff1 value: 67.7764270505257 - type: nauc_map_at_1_max value: 38.45166604737058 - type: nauc_map_at_1_std value: 1.9919181988552352 - type: nauc_map_at_20_diff1 value: 19.635871913149913 - type: nauc_map_at_20_max value: 19.812838965919155 - type: nauc_map_at_20_std value: 9.905163140101845 - type: nauc_map_at_3_diff1 value: 18.965707122532212 - type: nauc_map_at_3_max value: 17.878860313056517 - type: nauc_map_at_3_std value: 6.189378752019195 - type: nauc_map_at_5_diff1 value: 19.493354049675954 - type: nauc_map_at_5_max value: 19.24527088109141 - type: nauc_map_at_5_std value: 8.283883139680066 - type: nauc_mrr_at_1000_diff1 value: 66.87150374356781 - type: nauc_mrr_at_1000_max value: 41.413456443203984 - type: nauc_mrr_at_1000_std value: 4.140387282484357 - type: nauc_mrr_at_100_diff1 value: 66.87178015619061 - type: nauc_mrr_at_100_max value: 41.419754763150834 - type: nauc_mrr_at_100_std value: 4.15222235416704 - type: nauc_mrr_at_10_diff1 value: 66.89720586892301 - type: nauc_mrr_at_10_max value: 41.56353878125211 - type: nauc_mrr_at_10_std value: 4.213376519922392 - type: nauc_mrr_at_1_diff1 value: 67.7764270505257 - type: nauc_mrr_at_1_max value: 38.45166604737058 - type: nauc_mrr_at_1_std value: 1.9919181988552352 - type: nauc_mrr_at_20_diff1 value: 66.8714688713149 - type: nauc_mrr_at_20_max value: 41.46170778986735 - type: nauc_mrr_at_20_std value: 4.165154741309859 - type: nauc_mrr_at_3_diff1 value: 66.31615462679144 - type: nauc_mrr_at_3_max value: 41.419637693259936 - type: nauc_mrr_at_3_std value: 3.814834551396097 - type: nauc_mrr_at_5_diff1 value: 66.7289413087213 - type: nauc_mrr_at_5_max value: 41.668346356371586 - type: nauc_mrr_at_5_std value: 4.116331539882484 - type: nauc_ndcg_at_1000_diff1 value: 26.37325375970598 - type: nauc_ndcg_at_1000_max value: 24.850915174721735 - type: nauc_ndcg_at_1000_std value: 13.37585683440429 - type: nauc_ndcg_at_100_diff1 value: 25.591771178059503 - type: nauc_ndcg_at_100_max value: 24.562820829532473 - type: nauc_ndcg_at_100_std value: 14.093690500501541 - type: nauc_ndcg_at_10_diff1 value: 24.64600598115805 - type: nauc_ndcg_at_10_max value: 23.543499404760023 - type: nauc_ndcg_at_10_std value: 11.55823632781553 - type: nauc_ndcg_at_1_diff1 value: 67.7764270505257 - type: nauc_ndcg_at_1_max value: 38.45166604737058 - type: nauc_ndcg_at_1_std value: 1.9919181988552352 - type: nauc_ndcg_at_20_diff1 value: 24.757843275306726 - type: nauc_ndcg_at_20_max value: 23.951154200380827 - type: nauc_ndcg_at_20_std value: 12.931320453044886 - type: nauc_ndcg_at_3_diff1 value: 24.37742630418847 - type: nauc_ndcg_at_3_max value: 21.310512304883723 - type: nauc_ndcg_at_3_std value: 6.503993200818077 - type: nauc_ndcg_at_5_diff1 value: 24.813706829269716 - type: nauc_ndcg_at_5_max value: 22.993657212898 - type: nauc_ndcg_at_5_std value: 9.34462052506809 - type: nauc_precision_at_1000_diff1 value: -0.6506415756958156 - type: nauc_precision_at_1000_max value: 28.039755644694875 - type: nauc_precision_at_1000_std value: 53.46474329623814 - type: nauc_precision_at_100_diff1 value: 3.78462668236152 - type: nauc_precision_at_100_max value: 22.501700881673862 - type: nauc_precision_at_100_std value: 40.56672716474142 - type: nauc_precision_at_10_diff1 value: 9.156113228907534 - type: nauc_precision_at_10_max value: 19.734206254833254 - type: nauc_precision_at_10_std value: 19.986282545779602 - type: nauc_precision_at_1_diff1 value: 67.7764270505257 - type: nauc_precision_at_1_max value: 38.45166604737058 - type: nauc_precision_at_1_std value: 1.9919181988552352 - type: nauc_precision_at_20_diff1 value: 6.6164335644470125 - type: nauc_precision_at_20_max value: 20.29343459608317 - type: nauc_precision_at_20_std value: 26.51115475333977 - type: nauc_precision_at_3_diff1 value: 12.476520554399546 - type: nauc_precision_at_3_max value: 16.69401409858964 - type: nauc_precision_at_3_std value: 8.165880294907444 - type: nauc_precision_at_5_diff1 value: 11.783242828320958 - type: nauc_precision_at_5_max value: 19.0679467875759 - type: nauc_precision_at_5_std value: 13.615358345509884 - type: nauc_recall_at_1000_diff1 value: -0.6506415756960168 - type: nauc_recall_at_1000_max value: 28.039755644694786 - type: nauc_recall_at_1000_std value: 53.46474329623801 - type: nauc_recall_at_100_diff1 value: 3.7846266823613877 - type: nauc_recall_at_100_max value: 22.501700881674008 - type: nauc_recall_at_100_std value: 40.566727164741366 - type: nauc_recall_at_10_diff1 value: 9.15611322890755 - type: nauc_recall_at_10_max value: 19.73420625483318 - type: nauc_recall_at_10_std value: 19.98628254577951 - type: nauc_recall_at_1_diff1 value: 67.7764270505257 - type: nauc_recall_at_1_max value: 38.45166604737058 - type: nauc_recall_at_1_std value: 1.9919181988552352 - type: nauc_recall_at_20_diff1 value: 6.616433564446929 - type: nauc_recall_at_20_max value: 20.293434596083248 - type: nauc_recall_at_20_std value: 26.5111547533396 - type: nauc_recall_at_3_diff1 value: 12.476520554399531 - type: nauc_recall_at_3_max value: 16.69401409858966 - type: nauc_recall_at_3_std value: 8.165880294907438 - type: nauc_recall_at_5_diff1 value: 11.783242828320999 - type: nauc_recall_at_5_max value: 19.067946787575845 - type: nauc_recall_at_5_std value: 13.61535834550991 - type: ndcg_at_1 value: 78.974 - type: ndcg_at_10 value: 72.21 - type: ndcg_at_100 value: 75.264 - type: ndcg_at_1000 value: 76.259 - type: ndcg_at_20 value: 73.628 - type: ndcg_at_3 value: 67.047 - type: ndcg_at_5 value: 69.974 - type: precision_at_1 value: 78.974 - type: precision_at_10 value: 15.267 - type: precision_at_100 value: 1.762 - type: precision_at_1000 value: 0.189 - type: precision_at_20 value: 8.09 - type: precision_at_3 value: 43.309 - type: precision_at_5 value: 28.294000000000004 - type: recall_at_1 value: 39.487 - type: recall_at_10 value: 76.334 - type: recall_at_100 value: 88.076 - type: recall_at_1000 value: 94.59100000000001 - type: recall_at_20 value: 80.898 - type: recall_at_3 value: 64.96300000000001 - type: recall_at_5 value: 70.736 - task: type: Retrieval dataset: name: MTEB MSMARCO type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: main_score value: 42.027 - type: map_at_1 value: 22.118 - type: map_at_10 value: 34.816 - type: map_at_100 value: 35.983 - type: map_at_1000 value: 36.028999999999996 - type: map_at_20 value: 35.545 - type: map_at_3 value: 30.752000000000002 - type: map_at_5 value: 33.114 - type: mrr_at_1 value: 22.793696275071635 - type: mrr_at_10 value: 35.47250079592483 - type: mrr_at_100 value: 36.576471512902856 - type: mrr_at_1000 value: 36.616205680509786 - type: mrr_at_20 value: 36.16557033864942 - type: mrr_at_3 value: 31.48758357211065 - type: mrr_at_5 value: 33.80563514804202 - type: nauc_map_at_1000_diff1 value: 32.89234100489284 - type: nauc_map_at_1000_max value: 1.1802816553581001 - type: nauc_map_at_1000_std value: -20.187692925732446 - type: nauc_map_at_100_diff1 value: 32.88694493681772 - type: nauc_map_at_100_max value: 1.1732717578080365 - type: nauc_map_at_100_std value: -20.164165529035245 - type: nauc_map_at_10_diff1 value: 32.826182211848796 - type: nauc_map_at_10_max value: 1.1551262165737235 - type: nauc_map_at_10_std value: -20.88326292319754 - type: nauc_map_at_1_diff1 value: 36.12732122790642 - type: nauc_map_at_1_max value: 1.8197550109156913 - type: nauc_map_at_1_std value: -17.205625720792167 - type: nauc_map_at_20_diff1 value: 32.83333177195551 - type: nauc_map_at_20_max value: 1.0937431645506202 - type: nauc_map_at_20_std value: -20.503956514646145 - type: nauc_map_at_3_diff1 value: 32.76264193805814 - type: nauc_map_at_3_max value: 0.8560962042500389 - type: nauc_map_at_3_std value: -20.608930717315577 - type: nauc_map_at_5_diff1 value: 32.78673238978775 - type: nauc_map_at_5_max value: 1.0511863039329437 - type: nauc_map_at_5_std value: -21.02164728626011 - type: nauc_mrr_at_1000_diff1 value: 32.610323934702286 - type: nauc_mrr_at_1000_max value: 1.276669121901405 - type: nauc_mrr_at_1000_std value: -19.908120615285043 - type: nauc_mrr_at_100_diff1 value: 32.601373758102795 - type: nauc_mrr_at_100_max value: 1.2752735149992132 - type: nauc_mrr_at_100_std value: -19.87937042610101 - type: nauc_mrr_at_10_diff1 value: 32.55795432078168 - type: nauc_mrr_at_10_max value: 1.2881786969258637 - type: nauc_mrr_at_10_std value: -20.54564519015977 - type: nauc_mrr_at_1_diff1 value: 35.596301376443726 - type: nauc_mrr_at_1_max value: 1.7633238037306902 - type: nauc_mrr_at_1_std value: -17.1999420019887 - type: nauc_mrr_at_20_diff1 value: 32.57185739111023 - type: nauc_mrr_at_20_max value: 1.2212620853201877 - type: nauc_mrr_at_20_std value: -20.179517281041264 - type: nauc_mrr_at_3_diff1 value: 32.42681377099514 - type: nauc_mrr_at_3_max value: 0.8745921708861145 - type: nauc_mrr_at_3_std value: -20.41017687790572 - type: nauc_mrr_at_5_diff1 value: 32.499107129648266 - type: nauc_mrr_at_5_max value: 1.1159673851851573 - type: nauc_mrr_at_5_std value: -20.695143502133824 - type: nauc_ndcg_at_1000_diff1 value: 32.16957965806702 - type: nauc_ndcg_at_1000_max value: 1.6763998947980905 - type: nauc_ndcg_at_1000_std value: -18.970592350332893 - type: nauc_ndcg_at_100_diff1 value: 31.977550102558872 - type: nauc_ndcg_at_100_max value: 1.5625858650110014 - type: nauc_ndcg_at_100_std value: -17.990456766123835 - type: nauc_ndcg_at_10_diff1 value: 31.82738932481356 - type: nauc_ndcg_at_10_max value: 1.1661362042692103 - type: nauc_ndcg_at_10_std value: -21.872680193994217 - type: nauc_ndcg_at_1_diff1 value: 35.596301376443726 - type: nauc_ndcg_at_1_max value: 1.7633238037306902 - type: nauc_ndcg_at_1_std value: -17.1999420019887 - type: nauc_ndcg_at_20_diff1 value: 31.749656399266264 - type: nauc_ndcg_at_20_max value: 0.9629024493088691 - type: nauc_ndcg_at_20_std value: -20.4379403899277 - type: nauc_ndcg_at_3_diff1 value: 31.731361436850836 - type: nauc_ndcg_at_3_max value: 0.531749791578849 - type: nauc_ndcg_at_3_std value: -21.551112910698674 - type: nauc_ndcg_at_5_diff1 value: 31.785373941157303 - type: nauc_ndcg_at_5_max value: 0.86207769368333 - type: nauc_ndcg_at_5_std value: -22.24923399160171 - type: nauc_precision_at_1000_diff1 value: -3.841288331986519 - type: nauc_precision_at_1000_max value: 13.558041371634976 - type: nauc_precision_at_1000_std value: 15.181510484512827 - type: nauc_precision_at_100_diff1 value: 12.441154582709053 - type: nauc_precision_at_100_max value: 8.428136255841935 - type: nauc_precision_at_100_std value: 14.710391839731656 - type: nauc_precision_at_10_diff1 value: 26.185854813986705 - type: nauc_precision_at_10_max value: 1.6348387310504464 - type: nauc_precision_at_10_std value: -23.448927004357298 - type: nauc_precision_at_1_diff1 value: 35.596301376443726 - type: nauc_precision_at_1_max value: 1.7633238037306902 - type: nauc_precision_at_1_std value: -17.1999420019887 - type: nauc_precision_at_20_diff1 value: 22.69194179544158 - type: nauc_precision_at_20_max value: 1.2972015009169306 - type: nauc_precision_at_20_std value: -15.751482380060269 - type: nauc_precision_at_3_diff1 value: 28.255531512125188 - type: nauc_precision_at_3_max value: -0.3715575458464333 - type: nauc_precision_at_3_std value: -24.227970454057697 - type: nauc_precision_at_5_diff1 value: 27.65497951098847 - type: nauc_precision_at_5_max value: 0.449773375292472 - type: nauc_precision_at_5_std value: -25.37445450938601 - type: nauc_recall_at_1000_diff1 value: 15.243948516763819 - type: nauc_recall_at_1000_max value: 41.821227805251375 - type: nauc_recall_at_1000_std value: 61.66297794838101 - type: nauc_recall_at_100_diff1 value: 24.516543685029994 - type: nauc_recall_at_100_max value: 7.093972966253228 - type: nauc_recall_at_100_std value: 17.244452321212282 - type: nauc_recall_at_10_diff1 value: 28.404243095182828 - type: nauc_recall_at_10_max value: 1.0805210480930945 - type: nauc_recall_at_10_std value: -24.885018657039527 - type: nauc_recall_at_1_diff1 value: 36.12732122790642 - type: nauc_recall_at_1_max value: 1.8197550109156913 - type: nauc_recall_at_1_std value: -17.205625720792167 - type: nauc_recall_at_20_diff1 value: 26.956250169438512 - type: nauc_recall_at_20_max value: 0.023973408161285917 - type: nauc_recall_at_20_std value: -18.32944444428131 - type: nauc_recall_at_3_diff1 value: 28.9894205130054 - type: nauc_recall_at_3_max value: -0.36140658021466865 - type: nauc_recall_at_3_std value: -24.022505107768364 - type: nauc_recall_at_5_diff1 value: 28.907023434955104 - type: nauc_recall_at_5_max value: 0.2501037567297729 - type: nauc_recall_at_5_std value: -25.719919602271496 - type: ndcg_at_1 value: 22.794 - type: ndcg_at_10 value: 42.027 - type: ndcg_at_100 value: 47.601 - type: ndcg_at_1000 value: 48.713 - type: ndcg_at_20 value: 44.623000000000005 - type: ndcg_at_3 value: 33.772999999999996 - type: ndcg_at_5 value: 37.991 - type: precision_at_1 value: 22.794 - type: precision_at_10 value: 6.711 - type: precision_at_100 value: 0.9490000000000001 - type: precision_at_1000 value: 0.105 - type: precision_at_20 value: 3.8920000000000003 - type: precision_at_3 value: 14.46 - type: precision_at_5 value: 10.822 - type: recall_at_1 value: 22.118 - type: recall_at_10 value: 64.201 - type: recall_at_100 value: 89.878 - type: recall_at_1000 value: 98.259 - type: recall_at_20 value: 74.34100000000001 - type: recall_at_3 value: 41.8 - type: recall_at_5 value: 51.959 - task: type: Retrieval dataset: name: MTEB NFCorpus type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: main_score value: 36.201 - type: map_at_1 value: 5.654 - type: map_at_10 value: 13.402 - type: map_at_100 value: 16.849 - type: map_at_1000 value: 18.264 - type: map_at_20 value: 14.832 - type: map_at_3 value: 9.619 - type: map_at_5 value: 11.483 - type: mrr_at_1 value: 47.6780185758514 - type: mrr_at_10 value: 56.47906531033466 - type: mrr_at_100 value: 57.04539749991402 - type: mrr_at_1000 value: 57.08810157607369 - type: mrr_at_20 value: 56.88003170105462 - type: mrr_at_3 value: 54.43756449948401 - type: mrr_at_5 value: 55.660474716202266 - type: nauc_map_at_1000_diff1 value: 31.134615238698192 - type: nauc_map_at_1000_max value: 36.09522002487132 - type: nauc_map_at_1000_std value: 14.72627666649002 - type: nauc_map_at_100_diff1 value: 32.777473351864444 - type: nauc_map_at_100_max value: 35.25391471621035 - type: nauc_map_at_100_std value: 12.024428973861083 - type: nauc_map_at_10_diff1 value: 36.46466466148528 - type: nauc_map_at_10_max value: 29.707805406826722 - type: nauc_map_at_10_std value: 2.0678757794226335 - type: nauc_map_at_1_diff1 value: 54.30208426149679 - type: nauc_map_at_1_max value: 18.69125148481608 - type: nauc_map_at_1_std value: -8.970955660291802 - type: nauc_map_at_20_diff1 value: 34.76513311600623 - type: nauc_map_at_20_max value: 32.20666003570514 - type: nauc_map_at_20_std value: 5.924889441518581 - type: nauc_map_at_3_diff1 value: 45.73465176835491 - type: nauc_map_at_3_max value: 23.492291524989106 - type: nauc_map_at_3_std value: -5.0123536561688855 - type: nauc_map_at_5_diff1 value: 39.7128319374107 - type: nauc_map_at_5_max value: 25.84231729559691 - type: nauc_map_at_5_std value: -2.0861428981140344 - type: nauc_mrr_at_1000_diff1 value: 33.0997881703397 - type: nauc_mrr_at_1000_max value: 52.7089709923531 - type: nauc_mrr_at_1000_std value: 28.8517952674151 - type: nauc_mrr_at_100_diff1 value: 33.1094984027438 - type: nauc_mrr_at_100_max value: 52.74301398138847 - type: nauc_mrr_at_100_std value: 28.897997840300892 - type: nauc_mrr_at_10_diff1 value: 33.300713655464925 - type: nauc_mrr_at_10_max value: 52.572139698742184 - type: nauc_mrr_at_10_std value: 28.66875615527188 - type: nauc_mrr_at_1_diff1 value: 32.57632582147155 - type: nauc_mrr_at_1_max value: 46.020072246328816 - type: nauc_mrr_at_1_std value: 20.99097889820076 - type: nauc_mrr_at_20_diff1 value: 33.04083904518949 - type: nauc_mrr_at_20_max value: 52.597451362456994 - type: nauc_mrr_at_20_std value: 28.681527293587898 - type: nauc_mrr_at_3_diff1 value: 33.64864656322754 - type: nauc_mrr_at_3_max value: 51.82256412011279 - type: nauc_mrr_at_3_std value: 27.241260746740686 - type: nauc_mrr_at_5_diff1 value: 33.53201325467246 - type: nauc_mrr_at_5_max value: 52.79440885773516 - type: nauc_mrr_at_5_std value: 28.663081392086028 - type: nauc_ndcg_at_1000_diff1 value: 28.632650542040714 - type: nauc_ndcg_at_1000_max value: 51.24103069835822 - type: nauc_ndcg_at_1000_std value: 35.05503784757999 - type: nauc_ndcg_at_100_diff1 value: 29.082177715298503 - type: nauc_ndcg_at_100_max value: 45.24750203464315 - type: nauc_ndcg_at_100_std value: 27.146548925680914 - type: nauc_ndcg_at_10_diff1 value: 25.123554466093594 - type: nauc_ndcg_at_10_max value: 42.74355537806512 - type: nauc_ndcg_at_10_std value: 22.234407997803935 - type: nauc_ndcg_at_1_diff1 value: 33.75083940012058 - type: nauc_ndcg_at_1_max value: 44.44319402133161 - type: nauc_ndcg_at_1_std value: 19.146499358406487 - type: nauc_ndcg_at_20_diff1 value: 24.954207968331872 - type: nauc_ndcg_at_20_max value: 41.25991844405748 - type: nauc_ndcg_at_20_std value: 22.169009285868864 - type: nauc_ndcg_at_3_diff1 value: 28.186539942033516 - type: nauc_ndcg_at_3_max value: 44.40790009754965 - type: nauc_ndcg_at_3_std value: 20.99226576085115 - type: nauc_ndcg_at_5_diff1 value: 25.498387899376706 - type: nauc_ndcg_at_5_max value: 43.174709766261316 - type: nauc_ndcg_at_5_std value: 21.88111962672031 - type: nauc_precision_at_1000_diff1 value: -16.22321012507648 - type: nauc_precision_at_1000_max value: 5.808852256649677 - type: nauc_precision_at_1000_std value: 19.875641776698824 - type: nauc_precision_at_100_diff1 value: -10.248089374355486 - type: nauc_precision_at_100_max value: 19.29065415127588 - type: nauc_precision_at_100_std value: 31.75019665627339 - type: nauc_precision_at_10_diff1 value: 3.6783257583955056 - type: nauc_precision_at_10_max value: 39.22286010695767 - type: nauc_precision_at_10_std value: 31.225485732801022 - type: nauc_precision_at_1_diff1 value: 32.57632582147155 - type: nauc_precision_at_1_max value: 46.020072246328816 - type: nauc_precision_at_1_std value: 20.99097889820076 - type: nauc_precision_at_20_diff1 value: -3.1632510833242784 - type: nauc_precision_at_20_max value: 31.575496762405734 - type: nauc_precision_at_20_std value: 31.576283324468115 - type: nauc_precision_at_3_diff1 value: 17.78864585545647 - type: nauc_precision_at_3_max value: 44.201289661125585 - type: nauc_precision_at_3_std value: 25.447840649726693 - type: nauc_precision_at_5_diff1 value: 9.986748662091358 - type: nauc_precision_at_5_max value: 41.214164860776755 - type: nauc_precision_at_5_std value: 28.22551704127726 - type: nauc_recall_at_1000_diff1 value: 10.984331766850506 - type: nauc_recall_at_1000_max value: 24.641216018034104 - type: nauc_recall_at_1000_std value: 26.91064221008446 - type: nauc_recall_at_100_diff1 value: 23.7009352078473 - type: nauc_recall_at_100_max value: 30.176031609451297 - type: nauc_recall_at_100_std value: 20.360365243211564 - type: nauc_recall_at_10_diff1 value: 28.11831737650638 - type: nauc_recall_at_10_max value: 24.21539670487414 - type: nauc_recall_at_10_std value: 2.245504974150148 - type: nauc_recall_at_1_diff1 value: 54.30208426149679 - type: nauc_recall_at_1_max value: 18.69125148481608 - type: nauc_recall_at_1_std value: -8.970955660291802 - type: nauc_recall_at_20_diff1 value: 26.199425305139908 - type: nauc_recall_at_20_max value: 24.66704097503736 - type: nauc_recall_at_20_std value: 5.86052107206246 - type: nauc_recall_at_3_diff1 value: 42.88348677575622 - type: nauc_recall_at_3_max value: 21.189371077603308 - type: nauc_recall_at_3_std value: -4.537510127238226 - type: nauc_recall_at_5_diff1 value: 30.7936756722569 - type: nauc_recall_at_5_max value: 21.06136406164962 - type: nauc_recall_at_5_std value: -1.4113804735229794 - type: ndcg_at_1 value: 45.975 - type: ndcg_at_10 value: 36.201 - type: ndcg_at_100 value: 32.736 - type: ndcg_at_1000 value: 41.099000000000004 - type: ndcg_at_20 value: 33.724 - type: ndcg_at_3 value: 42.242000000000004 - type: ndcg_at_5 value: 40.137 - type: precision_at_1 value: 47.678 - type: precision_at_10 value: 26.904 - type: precision_at_100 value: 8.368 - type: precision_at_1000 value: 2.078 - type: precision_at_20 value: 19.845 - type: precision_at_3 value: 40.351 - type: precision_at_5 value: 35.108 - type: recall_at_1 value: 5.654 - type: recall_at_10 value: 17.793 - type: recall_at_100 value: 32.483000000000004 - type: recall_at_1000 value: 63.294 - type: recall_at_20 value: 21.754 - type: recall_at_3 value: 10.771 - type: recall_at_5 value: 14.084 - task: type: Retrieval dataset: name: MTEB NQ type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: main_score value: 62.464 - type: map_at_1 value: 38.0 - type: map_at_10 value: 54.806 - type: map_at_100 value: 55.599 - type: map_at_1000 value: 55.617000000000004 - type: map_at_20 value: 55.336 - type: map_at_3 value: 50.58200000000001 - type: map_at_5 value: 53.181 - type: mrr_at_1 value: 42.46813441483198 - type: mrr_at_10 value: 57.060710147326446 - type: mrr_at_100 value: 57.60978373431328 - type: mrr_at_1000 value: 57.62192762809547 - type: mrr_at_20 value: 57.43431796174232 - type: mrr_at_3 value: 53.78041714947835 - type: mrr_at_5 value: 55.81257242178437 - type: nauc_map_at_1000_diff1 value: 38.337572188308194 - type: nauc_map_at_1000_max value: 27.550035254787197 - type: nauc_map_at_1000_std value: -7.5513729587308145 - type: nauc_map_at_100_diff1 value: 38.335337794455015 - type: nauc_map_at_100_max value: 27.56919614414171 - type: nauc_map_at_100_std value: -7.526017855405723 - type: nauc_map_at_10_diff1 value: 38.308131361353816 - type: nauc_map_at_10_max value: 27.691849580929933 - type: nauc_map_at_10_std value: -7.971461731555123 - type: nauc_map_at_1_diff1 value: 42.721072690634884 - type: nauc_map_at_1_max value: 21.750451486885332 - type: nauc_map_at_1_std value: -9.99540950522643 - type: nauc_map_at_20_diff1 value: 38.25792874982169 - type: nauc_map_at_20_max value: 27.68877906159661 - type: nauc_map_at_20_std value: -7.560753583212102 - type: nauc_map_at_3_diff1 value: 37.950570055936254 - type: nauc_map_at_3_max value: 26.257969511794858 - type: nauc_map_at_3_std value: -9.236868658300553 - type: nauc_map_at_5_diff1 value: 37.99893219450212 - type: nauc_map_at_5_max value: 27.293454259158057 - type: nauc_map_at_5_std value: -8.734089449603806 - type: nauc_mrr_at_1000_diff1 value: 37.777767467474774 - type: nauc_mrr_at_1000_max value: 27.39507603748298 - type: nauc_mrr_at_1000_std value: -5.554754076870114 - type: nauc_mrr_at_100_diff1 value: 37.77981674583538 - type: nauc_mrr_at_100_max value: 27.411100989441557 - type: nauc_mrr_at_100_std value: -5.539061231412731 - type: nauc_mrr_at_10_diff1 value: 37.72399003363479 - type: nauc_mrr_at_10_max value: 27.618142546685416 - type: nauc_mrr_at_10_std value: -5.6819843907448195 - type: nauc_mrr_at_1_diff1 value: 41.17596078958236 - type: nauc_mrr_at_1_max value: 23.32588591818617 - type: nauc_mrr_at_1_std value: -7.126628034623689 - type: nauc_mrr_at_20_diff1 value: 37.695136721588 - type: nauc_mrr_at_20_max value: 27.52850676467322 - type: nauc_mrr_at_20_std value: -5.50667995515647 - type: nauc_mrr_at_3_diff1 value: 37.23845700908964 - type: nauc_mrr_at_3_max value: 26.69389772971012 - type: nauc_mrr_at_3_std value: -6.31868405989011 - type: nauc_mrr_at_5_diff1 value: 37.33757394192838 - type: nauc_mrr_at_5_max value: 27.42091593836207 - type: nauc_mrr_at_5_std value: -5.993243330132065 - type: nauc_ndcg_at_1000_diff1 value: 37.74836061640332 - type: nauc_ndcg_at_1000_max value: 29.03148916289089 - type: nauc_ndcg_at_1000_std value: -5.543065770074502 - type: nauc_ndcg_at_100_diff1 value: 37.75593955089626 - type: nauc_ndcg_at_100_max value: 29.67109480272493 - type: nauc_ndcg_at_100_std value: -4.773697596687493 - type: nauc_ndcg_at_10_diff1 value: 37.41701174824348 - type: nauc_ndcg_at_10_max value: 30.448703434043445 - type: nauc_ndcg_at_10_std value: -6.306202666419071 - type: nauc_ndcg_at_1_diff1 value: 41.17596078958236 - type: nauc_ndcg_at_1_max value: 23.32588591818617 - type: nauc_ndcg_at_1_std value: -7.126628034623689 - type: nauc_ndcg_at_20_diff1 value: 37.17445197824622 - type: nauc_ndcg_at_20_max value: 30.47378561555209 - type: nauc_ndcg_at_20_std value: -4.921584853993488 - type: nauc_ndcg_at_3_diff1 value: 36.5261976812068 - type: nauc_ndcg_at_3_max value: 27.560538820208926 - type: nauc_ndcg_at_3_std value: -8.556686332882931 - type: nauc_ndcg_at_5_diff1 value: 36.571462759614526 - type: nauc_ndcg_at_5_max value: 29.363401730752585 - type: nauc_ndcg_at_5_std value: -7.825739170420347 - type: nauc_precision_at_1000_diff1 value: -12.588899483401223 - type: nauc_precision_at_1000_max value: 2.641097890578701 - type: nauc_precision_at_1000_std value: 17.643107625788748 - type: nauc_precision_at_100_diff1 value: -8.40579874206785 - type: nauc_precision_at_100_max value: 9.725496771040037 - type: nauc_precision_at_100_std value: 21.558582760191243 - type: nauc_precision_at_10_diff1 value: 6.619157191854486 - type: nauc_precision_at_10_max value: 23.767406373688402 - type: nauc_precision_at_10_std value: 10.428535003478808 - type: nauc_precision_at_1_diff1 value: 41.17596078958236 - type: nauc_precision_at_1_max value: 23.32588591818617 - type: nauc_precision_at_1_std value: -7.126628034623689 - type: nauc_precision_at_20_diff1 value: -0.6449974218292859 - type: nauc_precision_at_20_max value: 20.211503851418783 - type: nauc_precision_at_20_std value: 17.922745410142575 - type: nauc_precision_at_3_diff1 value: 19.710276097428657 - type: nauc_precision_at_3_max value: 26.768918044758706 - type: nauc_precision_at_3_std value: -1.0636448912049246 - type: nauc_precision_at_5_diff1 value: 13.073181337982613 - type: nauc_precision_at_5_max value: 26.418340338971024 - type: nauc_precision_at_5_std value: 2.9842078949528688 - type: nauc_recall_at_1000_diff1 value: 30.52411148739828 - type: nauc_recall_at_1000_max value: 90.96409807536762 - type: nauc_recall_at_1000_std value: 83.94857830921949 - type: nauc_recall_at_100_diff1 value: 36.936303690592155 - type: nauc_recall_at_100_max value: 71.91515014325869 - type: nauc_recall_at_100_std value: 48.93061263403371 - type: nauc_recall_at_10_diff1 value: 32.84292362076269 - type: nauc_recall_at_10_max value: 44.27252783122478 - type: nauc_recall_at_10_std value: -1.5981198975612385 - type: nauc_recall_at_1_diff1 value: 42.721072690634884 - type: nauc_recall_at_1_max value: 21.750451486885332 - type: nauc_recall_at_1_std value: -9.99540950522643 - type: nauc_recall_at_20_diff1 value: 29.36724417081702 - type: nauc_recall_at_20_max value: 52.035846390214715 - type: nauc_recall_at_20_std value: 11.967264191332818 - type: nauc_recall_at_3_diff1 value: 31.634923771936098 - type: nauc_recall_at_3_max value: 30.225743369869473 - type: nauc_recall_at_3_std value: -9.253665347118615 - type: nauc_recall_at_5_diff1 value: 30.66271853090737 - type: nauc_recall_at_5_max value: 35.70815715994996 - type: nauc_recall_at_5_std value: -7.836012956078996 - type: ndcg_at_1 value: 42.468 - type: ndcg_at_10 value: 62.464 - type: ndcg_at_100 value: 65.618 - type: ndcg_at_1000 value: 66.014 - type: ndcg_at_20 value: 64.12 - type: ndcg_at_3 value: 54.790000000000006 - type: ndcg_at_5 value: 58.992 - type: precision_at_1 value: 42.468 - type: precision_at_10 value: 9.959 - type: precision_at_100 value: 1.174 - type: precision_at_1000 value: 0.121 - type: precision_at_20 value: 5.380999999999999 - type: precision_at_3 value: 24.73 - type: precision_at_5 value: 17.299999999999997 - type: recall_at_1 value: 38.0 - type: recall_at_10 value: 83.22699999999999 - type: recall_at_100 value: 96.584 - type: recall_at_1000 value: 99.512 - type: recall_at_20 value: 89.291 - type: recall_at_3 value: 63.666 - type: recall_at_5 value: 73.27900000000001 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: mteb/quora config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: main_score value: 87.366 - type: map_at_1 value: 69.95700000000001 - type: map_at_10 value: 83.55 - type: map_at_100 value: 84.196 - type: map_at_1000 value: 84.21600000000001 - type: map_at_20 value: 83.982 - type: map_at_3 value: 80.647 - type: map_at_5 value: 82.443 - type: mrr_at_1 value: 80.39 - type: mrr_at_10 value: 86.65646031746004 - type: mrr_at_100 value: 86.7852113210373 - type: mrr_at_1000 value: 86.78651118354796 - type: mrr_at_20 value: 86.75772838878498 - type: mrr_at_3 value: 85.67499999999971 - type: mrr_at_5 value: 86.33749999999962 - type: nauc_map_at_1000_diff1 value: 76.68189702770007 - type: nauc_map_at_1000_max value: 36.19988239025682 - type: nauc_map_at_1000_std value: -26.231691135645736 - type: nauc_map_at_100_diff1 value: 76.68832712120171 - type: nauc_map_at_100_max value: 36.18627717337547 - type: nauc_map_at_100_std value: -26.28243886166 - type: nauc_map_at_10_diff1 value: 76.88888516032657 - type: nauc_map_at_10_max value: 35.69809861085124 - type: nauc_map_at_10_std value: -27.859425473864224 - type: nauc_map_at_1_diff1 value: 79.5243725217315 - type: nauc_map_at_1_max value: 27.092773841207002 - type: nauc_map_at_1_std value: -26.223200911204543 - type: nauc_map_at_20_diff1 value: 76.74938996155176 - type: nauc_map_at_20_max value: 36.07373781351406 - type: nauc_map_at_20_std value: -26.891400098628015 - type: nauc_map_at_3_diff1 value: 77.29604745045076 - type: nauc_map_at_3_max value: 33.11431059356283 - type: nauc_map_at_3_std value: -29.555237195931085 - type: nauc_map_at_5_diff1 value: 77.14069217901078 - type: nauc_map_at_5_max value: 34.68656073526487 - type: nauc_map_at_5_std value: -28.945053669861508 - type: nauc_mrr_at_1000_diff1 value: 76.66087451567746 - type: nauc_mrr_at_1000_max value: 38.78133177265328 - type: nauc_mrr_at_1000_std value: -23.75726541774991 - type: nauc_mrr_at_100_diff1 value: 76.66117078261013 - type: nauc_mrr_at_100_max value: 38.782533036423885 - type: nauc_mrr_at_100_std value: -23.752587601473568 - type: nauc_mrr_at_10_diff1 value: 76.65866401411019 - type: nauc_mrr_at_10_max value: 38.87950311049704 - type: nauc_mrr_at_10_std value: -23.873660706680578 - type: nauc_mrr_at_1_diff1 value: 77.42633506487041 - type: nauc_mrr_at_1_max value: 37.93973722217786 - type: nauc_mrr_at_1_std value: -23.3984130771317 - type: nauc_mrr_at_20_diff1 value: 76.66210684923414 - type: nauc_mrr_at_20_max value: 38.81293033048911 - type: nauc_mrr_at_20_std value: -23.736590746133736 - type: nauc_mrr_at_3_diff1 value: 76.33711764736019 - type: nauc_mrr_at_3_max value: 38.5659231830368 - type: nauc_mrr_at_3_std value: -23.99588149124865 - type: nauc_mrr_at_5_diff1 value: 76.57123830226054 - type: nauc_mrr_at_5_max value: 38.97947097392977 - type: nauc_mrr_at_5_std value: -23.943668957974246 - type: nauc_ndcg_at_1000_diff1 value: 76.38447339050585 - type: nauc_ndcg_at_1000_max value: 37.756822792877934 - type: nauc_ndcg_at_1000_std value: -24.046995734357164 - type: nauc_ndcg_at_100_diff1 value: 76.44058018066822 - type: nauc_ndcg_at_100_max value: 37.72948294169218 - type: nauc_ndcg_at_100_std value: -24.083432140741795 - type: nauc_ndcg_at_10_diff1 value: 76.56246287923074 - type: nauc_ndcg_at_10_max value: 37.0329253490553 - type: nauc_ndcg_at_10_std value: -26.6495163705961 - type: nauc_ndcg_at_1_diff1 value: 77.4085129990432 - type: nauc_ndcg_at_1_max value: 38.06139172214421 - type: nauc_ndcg_at_1_std value: -23.656477126977386 - type: nauc_ndcg_at_20_diff1 value: 76.50192496743098 - type: nauc_ndcg_at_20_max value: 37.51759311013985 - type: nauc_ndcg_at_20_std value: -25.45517058360004 - type: nauc_ndcg_at_3_diff1 value: 75.94398494081794 - type: nauc_ndcg_at_3_max value: 35.7666711547279 - type: nauc_ndcg_at_3_std value: -26.866022682361578 - type: nauc_ndcg_at_5_diff1 value: 76.47334274088344 - type: nauc_ndcg_at_5_max value: 36.40830331490731 - type: nauc_ndcg_at_5_std value: -27.170121189572765 - type: nauc_precision_at_1000_diff1 value: -43.33672630765437 - type: nauc_precision_at_1000_max value: -5.089751329149161 - type: nauc_precision_at_1000_std value: 30.6241447847051 - type: nauc_precision_at_100_diff1 value: -42.736833035629864 - type: nauc_precision_at_100_max value: -4.060198408346224 - type: nauc_precision_at_100_std value: 29.807050266205344 - type: nauc_precision_at_10_diff1 value: -35.90810562245906 - type: nauc_precision_at_10_max value: 1.1633204529249133 - type: nauc_precision_at_10_std value: 20.129691203276018 - type: nauc_precision_at_1_diff1 value: 77.4085129990432 - type: nauc_precision_at_1_max value: 38.06139172214421 - type: nauc_precision_at_1_std value: -23.656477126977386 - type: nauc_precision_at_20_diff1 value: -40.2132286912738 - type: nauc_precision_at_20_max value: -1.3004735030734194 - type: nauc_precision_at_20_std value: 25.15612293757488 - type: nauc_precision_at_3_diff1 value: -13.873825299883904 - type: nauc_precision_at_3_max value: 11.038689278907233 - type: nauc_precision_at_3_std value: 5.4276449621706 - type: nauc_precision_at_5_diff1 value: -27.151668633894737 - type: nauc_precision_at_5_max value: 5.795130010163115 - type: nauc_precision_at_5_std value: 13.220722167587375 - type: nauc_recall_at_1000_diff1 value: 83.903950427863 - type: nauc_recall_at_1000_max value: 37.82919000897223 - type: nauc_recall_at_1000_std value: 70.65670846771707 - type: nauc_recall_at_100_diff1 value: 75.23306095335836 - type: nauc_recall_at_100_max value: 37.54281648247423 - type: nauc_recall_at_100_std value: 8.434289114377373 - type: nauc_recall_at_10_diff1 value: 72.7872912723047 - type: nauc_recall_at_10_max value: 34.261519652104184 - type: nauc_recall_at_10_std value: -34.60101950810808 - type: nauc_recall_at_1_diff1 value: 79.5243725217315 - type: nauc_recall_at_1_max value: 27.092773841207002 - type: nauc_recall_at_1_std value: -26.223200911204543 - type: nauc_recall_at_20_diff1 value: 72.8297963091964 - type: nauc_recall_at_20_max value: 36.070220569670916 - type: nauc_recall_at_20_std value: -27.20897179168245 - type: nauc_recall_at_3_diff1 value: 73.47456374650459 - type: nauc_recall_at_3_max value: 29.901663407294816 - type: nauc_recall_at_3_std value: -32.83329537040381 - type: nauc_recall_at_5_diff1 value: 73.05025750827126 - type: nauc_recall_at_5_max value: 32.35733470860963 - type: nauc_recall_at_5_std value: -34.32357558493091 - type: ndcg_at_1 value: 80.4 - type: ndcg_at_10 value: 87.366 - type: ndcg_at_100 value: 88.7 - type: ndcg_at_1000 value: 88.842 - type: ndcg_at_20 value: 88.11 - type: ndcg_at_3 value: 84.52499999999999 - type: ndcg_at_5 value: 86.047 - type: precision_at_1 value: 80.4 - type: precision_at_10 value: 13.235 - type: precision_at_100 value: 1.516 - type: precision_at_1000 value: 0.156 - type: precision_at_20 value: 7.037 - type: precision_at_3 value: 36.9 - type: precision_at_5 value: 24.236 - type: recall_at_1 value: 69.95700000000001 - type: recall_at_10 value: 94.535 - type: recall_at_100 value: 99.164 - type: recall_at_1000 value: 99.855 - type: recall_at_20 value: 96.974 - type: recall_at_3 value: 86.33800000000001 - type: recall_at_5 value: 90.69 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: mteb/scidocs config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: main_score value: 21.492 - type: map_at_1 value: 5.192 - type: map_at_10 value: 12.959000000000001 - type: map_at_100 value: 14.963999999999999 - type: map_at_1000 value: 15.261 - type: map_at_20 value: 13.988999999999999 - type: map_at_3 value: 9.235 - type: map_at_5 value: 11.042 - type: mrr_at_1 value: 25.5 - type: mrr_at_10 value: 36.37313492063491 - type: mrr_at_100 value: 37.36517957347626 - type: mrr_at_1000 value: 37.42538601073437 - type: mrr_at_20 value: 36.987896404421136 - type: mrr_at_3 value: 32.966666666666654 - type: mrr_at_5 value: 34.95166666666664 - type: nauc_map_at_1000_diff1 value: 13.635120934154395 - type: nauc_map_at_1000_max value: 28.03542983005195 - type: nauc_map_at_1000_std value: 17.07156940311778 - type: nauc_map_at_100_diff1 value: 13.59237295184475 - type: nauc_map_at_100_max value: 27.992291365051237 - type: nauc_map_at_100_std value: 16.926533467400464 - type: nauc_map_at_10_diff1 value: 14.149193235999993 - type: nauc_map_at_10_max value: 26.520643811139305 - type: nauc_map_at_10_std value: 13.168673602548925 - type: nauc_map_at_1_diff1 value: 20.096094508148465 - type: nauc_map_at_1_max value: 17.41582245576302 - type: nauc_map_at_1_std value: 5.771729007558897 - type: nauc_map_at_20_diff1 value: 13.977726400526427 - type: nauc_map_at_20_max value: 27.2322235491895 - type: nauc_map_at_20_std value: 14.972781677750435 - type: nauc_map_at_3_diff1 value: 17.371153027460355 - type: nauc_map_at_3_max value: 24.457758503208254 - type: nauc_map_at_3_std value: 7.719726821179824 - type: nauc_map_at_5_diff1 value: 14.600442843442574 - type: nauc_map_at_5_max value: 25.899736370856296 - type: nauc_map_at_5_std value: 10.125349354853359 - type: nauc_mrr_at_1000_diff1 value: 18.70342821390236 - type: nauc_mrr_at_1000_max value: 23.365194520549114 - type: nauc_mrr_at_1000_std value: 12.185114294903236 - type: nauc_mrr_at_100_diff1 value: 18.677858738015907 - type: nauc_mrr_at_100_max value: 23.372641996726742 - type: nauc_mrr_at_100_std value: 12.216130561991909 - type: nauc_mrr_at_10_diff1 value: 18.79094453090232 - type: nauc_mrr_at_10_max value: 23.511686337006466 - type: nauc_mrr_at_10_std value: 11.879716687008134 - type: nauc_mrr_at_1_diff1 value: 20.10455171810408 - type: nauc_mrr_at_1_max value: 17.741566234315428 - type: nauc_mrr_at_1_std value: 6.1676764583652215 - type: nauc_mrr_at_20_diff1 value: 18.70143648544655 - type: nauc_mrr_at_20_max value: 23.45603239095019 - type: nauc_mrr_at_20_std value: 12.244613576686202 - type: nauc_mrr_at_3_diff1 value: 18.894662528857374 - type: nauc_mrr_at_3_max value: 23.3739038101588 - type: nauc_mrr_at_3_std value: 10.4709044796543 - type: nauc_mrr_at_5_diff1 value: 18.877786065095563 - type: nauc_mrr_at_5_max value: 23.78061081203872 - type: nauc_mrr_at_5_std value: 11.847882917869622 - type: nauc_ndcg_at_1000_diff1 value: 13.99159027398115 - type: nauc_ndcg_at_1000_max value: 29.44766808611483 - type: nauc_ndcg_at_1000_std value: 24.289749574699915 - type: nauc_ndcg_at_100_diff1 value: 13.164020363258746 - type: nauc_ndcg_at_100_max value: 29.642442997167723 - type: nauc_ndcg_at_100_std value: 23.761764515453866 - type: nauc_ndcg_at_10_diff1 value: 14.839883268638546 - type: nauc_ndcg_at_10_max value: 27.21043708455449 - type: nauc_ndcg_at_10_std value: 15.56110419291775 - type: nauc_ndcg_at_1_diff1 value: 20.10455171810408 - type: nauc_ndcg_at_1_max value: 17.741566234315428 - type: nauc_ndcg_at_1_std value: 6.1676764583652215 - type: nauc_ndcg_at_20_diff1 value: 14.27998110295395 - type: nauc_ndcg_at_20_max value: 28.2492026337839 - type: nauc_ndcg_at_20_std value: 18.822356982979105 - type: nauc_ndcg_at_3_diff1 value: 17.659263157535445 - type: nauc_ndcg_at_3_max value: 25.416706421591396 - type: nauc_ndcg_at_3_std value: 9.650689638152636 - type: nauc_ndcg_at_5_diff1 value: 15.38459833918123 - type: nauc_ndcg_at_5_max value: 26.92495519416969 - type: nauc_ndcg_at_5_std value: 12.71017696809276 - type: nauc_precision_at_1000_diff1 value: 6.128490135458364 - type: nauc_precision_at_1000_max value: 23.52693893261883 - type: nauc_precision_at_1000_std value: 36.280432732819925 - type: nauc_precision_at_100_diff1 value: 5.306163791220436 - type: nauc_precision_at_100_max value: 27.67851033239246 - type: nauc_precision_at_100_std value: 34.29821573752515 - type: nauc_precision_at_10_diff1 value: 10.829686435425472 - type: nauc_precision_at_10_max value: 27.201648684015318 - type: nauc_precision_at_10_std value: 19.376999508233254 - type: nauc_precision_at_1_diff1 value: 20.10455171810408 - type: nauc_precision_at_1_max value: 17.741566234315428 - type: nauc_precision_at_1_std value: 6.1676764583652215 - type: nauc_precision_at_20_diff1 value: 9.416169626702048 - type: nauc_precision_at_20_max value: 27.65257998670333 - type: nauc_precision_at_20_std value: 24.761868509805826 - type: nauc_precision_at_3_diff1 value: 16.666456902017348 - type: nauc_precision_at_3_max value: 27.9969730961105 - type: nauc_precision_at_3_std value: 10.991562741393231 - type: nauc_precision_at_5_diff1 value: 12.26205064462843 - type: nauc_precision_at_5_max value: 29.083848730874095 - type: nauc_precision_at_5_std value: 15.66630836555747 - type: nauc_recall_at_1000_diff1 value: 5.600277836894063 - type: nauc_recall_at_1000_max value: 23.228705161815526 - type: nauc_recall_at_1000_std value: 36.822431061799485 - type: nauc_recall_at_100_diff1 value: 4.991781244867178 - type: nauc_recall_at_100_max value: 27.70095625483475 - type: nauc_recall_at_100_std value: 34.67168431597854 - type: nauc_recall_at_10_diff1 value: 10.580860425931972 - type: nauc_recall_at_10_max value: 27.145829414223666 - type: nauc_recall_at_10_std value: 19.330630157067382 - type: nauc_recall_at_1_diff1 value: 20.096094508148465 - type: nauc_recall_at_1_max value: 17.41582245576302 - type: nauc_recall_at_1_std value: 5.771729007558897 - type: nauc_recall_at_20_diff1 value: 9.06945331260344 - type: nauc_recall_at_20_max value: 27.56725251066482 - type: nauc_recall_at_20_std value: 24.77644509886098 - type: nauc_recall_at_3_diff1 value: 16.660507676429322 - type: nauc_recall_at_3_max value: 27.816546386536434 - type: nauc_recall_at_3_std value: 10.687824478247007 - type: nauc_recall_at_5_diff1 value: 11.992514446369388 - type: nauc_recall_at_5_max value: 28.789031176671948 - type: nauc_recall_at_5_std value: 15.422118990090805 - type: ndcg_at_1 value: 25.5 - type: ndcg_at_10 value: 21.492 - type: ndcg_at_100 value: 29.022 - type: ndcg_at_1000 value: 34.298 - type: ndcg_at_20 value: 24.237000000000002 - type: ndcg_at_3 value: 20.392 - type: ndcg_at_5 value: 17.801000000000002 - type: precision_at_1 value: 25.5 - type: precision_at_10 value: 11.09 - type: precision_at_100 value: 2.1919999999999997 - type: precision_at_1000 value: 0.346 - type: precision_at_20 value: 7.135 - type: precision_at_3 value: 18.933 - type: precision_at_5 value: 15.52 - type: recall_at_1 value: 5.192 - type: recall_at_10 value: 22.512999999999998 - type: recall_at_100 value: 44.505 - type: recall_at_1000 value: 70.267 - type: recall_at_20 value: 28.965000000000003 - type: recall_at_3 value: 11.522 - type: recall_at_5 value: 15.751999999999999 - task: type: Retrieval dataset: name: MTEB SciFact type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: main_score value: 71.586 - type: map_at_1 value: 56.760999999999996 - type: map_at_10 value: 66.893 - type: map_at_100 value: 67.42 - type: map_at_1000 value: 67.44200000000001 - type: map_at_20 value: 67.232 - type: map_at_3 value: 64.193 - type: map_at_5 value: 65.73400000000001 - type: mrr_at_1 value: 60.0 - type: mrr_at_10 value: 68.20383597883595 - type: mrr_at_100 value: 68.58867453733343 - type: mrr_at_1000 value: 68.61117469977329 - type: mrr_at_20 value: 68.43973740684265 - type: mrr_at_3 value: 66.11111111111111 - type: mrr_at_5 value: 67.44444444444446 - type: nauc_map_at_1000_diff1 value: 72.66688261123035 - type: nauc_map_at_1000_max value: 61.02926282006283 - type: nauc_map_at_1000_std value: 11.084549829740526 - type: nauc_map_at_100_diff1 value: 72.66226192320828 - type: nauc_map_at_100_max value: 61.04393223108811 - type: nauc_map_at_100_std value: 11.101529343291695 - type: nauc_map_at_10_diff1 value: 72.66732266693091 - type: nauc_map_at_10_max value: 61.24124296311832 - type: nauc_map_at_10_std value: 10.91179451961794 - type: nauc_map_at_1_diff1 value: 74.2356464256346 - type: nauc_map_at_1_max value: 54.06962758957632 - type: nauc_map_at_1_std value: 0.8037891907963532 - type: nauc_map_at_20_diff1 value: 72.65198594061253 - type: nauc_map_at_20_max value: 61.130159351448185 - type: nauc_map_at_20_std value: 11.2246899245522 - type: nauc_map_at_3_diff1 value: 72.78578673303954 - type: nauc_map_at_3_max value: 59.19073262936321 - type: nauc_map_at_3_std value: 8.460301560522968 - type: nauc_map_at_5_diff1 value: 72.55004168261968 - type: nauc_map_at_5_max value: 59.75181935082357 - type: nauc_map_at_5_std value: 9.440299527201889 - type: nauc_mrr_at_1000_diff1 value: 72.82720348470325 - type: nauc_mrr_at_1000_max value: 62.344231223741446 - type: nauc_mrr_at_1000_std value: 12.60196558488974 - type: nauc_mrr_at_100_diff1 value: 72.82236849255094 - type: nauc_mrr_at_100_max value: 62.35799491393125 - type: nauc_mrr_at_100_std value: 12.617900773655673 - type: nauc_mrr_at_10_diff1 value: 72.7722847495086 - type: nauc_mrr_at_10_max value: 62.66642401155435 - type: nauc_mrr_at_10_std value: 12.906381237738746 - type: nauc_mrr_at_1_diff1 value: 74.71208073612343 - type: nauc_mrr_at_1_max value: 59.50430394775893 - type: nauc_mrr_at_1_std value: 8.129514198080512 - type: nauc_mrr_at_20_diff1 value: 72.78312367361772 - type: nauc_mrr_at_20_max value: 62.421122493761885 - type: nauc_mrr_at_20_std value: 12.693437522498588 - type: nauc_mrr_at_3_diff1 value: 73.50670156385345 - type: nauc_mrr_at_3_max value: 62.01717537699209 - type: nauc_mrr_at_3_std value: 11.926548252191182 - type: nauc_mrr_at_5_diff1 value: 72.62204028549876 - type: nauc_mrr_at_5_max value: 62.319358766312085 - type: nauc_mrr_at_5_std value: 13.081257923284342 - type: nauc_ndcg_at_1000_diff1 value: 72.29960539074736 - type: nauc_ndcg_at_1000_max value: 62.75096959221402 - type: nauc_ndcg_at_1000_std value: 13.81528462505362 - type: nauc_ndcg_at_100_diff1 value: 72.19985782073529 - type: nauc_ndcg_at_100_max value: 63.18837705326287 - type: nauc_ndcg_at_100_std value: 14.506479655117138 - type: nauc_ndcg_at_10_diff1 value: 71.85759847832983 - type: nauc_ndcg_at_10_max value: 64.150996056865 - type: nauc_ndcg_at_10_std value: 14.580606901634278 - type: nauc_ndcg_at_1_diff1 value: 74.71208073612343 - type: nauc_ndcg_at_1_max value: 59.50430394775893 - type: nauc_ndcg_at_1_std value: 8.129514198080512 - type: nauc_ndcg_at_20_diff1 value: 71.80987178228351 - type: nauc_ndcg_at_20_max value: 63.56269460865743 - type: nauc_ndcg_at_20_std value: 15.024978004625922 - type: nauc_ndcg_at_3_diff1 value: 72.35095651602592 - type: nauc_ndcg_at_3_max value: 61.60548011855679 - type: nauc_ndcg_at_3_std value: 12.048248788835263 - type: nauc_ndcg_at_5_diff1 value: 71.48615621881864 - type: nauc_ndcg_at_5_max value: 61.72870035979784 - type: nauc_ndcg_at_5_std value: 12.83048357446691 - type: nauc_precision_at_1000_diff1 value: -14.743011420972 - type: nauc_precision_at_1000_max value: 19.281995763080158 - type: nauc_precision_at_1000_std value: 49.6140660398164 - type: nauc_precision_at_100_diff1 value: 0.11278174806205563 - type: nauc_precision_at_100_max value: 29.704511820077332 - type: nauc_precision_at_100_std value: 47.84916954122579 - type: nauc_precision_at_10_diff1 value: 20.498227967235728 - type: nauc_precision_at_10_max value: 47.883119365891595 - type: nauc_precision_at_10_std value: 45.182178693450595 - type: nauc_precision_at_1_diff1 value: 74.71208073612343 - type: nauc_precision_at_1_max value: 59.50430394775893 - type: nauc_precision_at_1_std value: 8.129514198080512 - type: nauc_precision_at_20_diff1 value: 12.551737222341455 - type: nauc_precision_at_20_max value: 40.618899501225634 - type: nauc_precision_at_20_std value: 48.5598454249067 - type: nauc_precision_at_3_diff1 value: 47.67720764601145 - type: nauc_precision_at_3_max value: 56.50632017305064 - type: nauc_precision_at_3_std value: 31.14175140162157 - type: nauc_precision_at_5_diff1 value: 35.10058622792819 - type: nauc_precision_at_5_max value: 51.88948872657981 - type: nauc_precision_at_5_std value: 37.62796957461928 - type: nauc_recall_at_1000_diff1 value: 79.57516339869238 - type: nauc_recall_at_1000_max value: 86.11111111111035 - type: nauc_recall_at_1000_std value: 79.57516339869238 - type: nauc_recall_at_100_diff1 value: 70.50859559510081 - type: nauc_recall_at_100_max value: 79.17009941231396 - type: nauc_recall_at_100_std value: 44.32910419069595 - type: nauc_recall_at_10_diff1 value: 66.16118569361245 - type: nauc_recall_at_10_max value: 74.73542948302286 - type: nauc_recall_at_10_std value: 27.680330939810037 - type: nauc_recall_at_1_diff1 value: 74.2356464256346 - type: nauc_recall_at_1_max value: 54.06962758957632 - type: nauc_recall_at_1_std value: 0.8037891907963532 - type: nauc_recall_at_20_diff1 value: 65.4748436545527 - type: nauc_recall_at_20_max value: 73.81532199081235 - type: nauc_recall_at_20_std value: 33.59324708196253 - type: nauc_recall_at_3_diff1 value: 68.83194804473622 - type: nauc_recall_at_3_max value: 61.77722610439669 - type: nauc_recall_at_3_std value: 13.984923756556714 - type: nauc_recall_at_5_diff1 value: 65.51467417209523 - type: nauc_recall_at_5_max value: 64.08276291427661 - type: nauc_recall_at_5_std value: 19.976472037847167 - type: ndcg_at_1 value: 60.0 - type: ndcg_at_10 value: 71.586 - type: ndcg_at_100 value: 73.76899999999999 - type: ndcg_at_1000 value: 74.386 - type: ndcg_at_20 value: 72.612 - type: ndcg_at_3 value: 66.944 - type: ndcg_at_5 value: 69.333 - type: precision_at_1 value: 60.0 - type: precision_at_10 value: 9.6 - type: precision_at_100 value: 1.073 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_20 value: 5.033 - type: precision_at_3 value: 26.333000000000002 - type: precision_at_5 value: 17.4 - type: recall_at_1 value: 56.760999999999996 - type: recall_at_10 value: 84.589 - type: recall_at_100 value: 94.333 - type: recall_at_1000 value: 99.333 - type: recall_at_20 value: 88.43299999999999 - type: recall_at_3 value: 72.10600000000001 - type: recall_at_5 value: 78.194 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: mteb/trec-covid config: default split: test revision: bb9466bac8153a0349341eb1b22e06409e78ef4e metrics: - type: main_score value: 84.60600000000001 - type: map_at_1 value: 0.257 - type: map_at_10 value: 2.196 - type: map_at_100 value: 13.252 - type: map_at_1000 value: 31.473000000000003 - type: map_at_20 value: 4.023000000000001 - type: map_at_3 value: 0.722 - type: map_at_5 value: 1.146 - type: mrr_at_1 value: 94.0 - type: mrr_at_10 value: 97.0 - type: mrr_at_100 value: 97.0 - type: mrr_at_1000 value: 97.0 - type: mrr_at_20 value: 97.0 - type: mrr_at_3 value: 97.0 - type: mrr_at_5 value: 97.0 - type: nauc_map_at_1000_diff1 value: -30.674816554207062 - type: nauc_map_at_1000_max value: 53.18598689657068 - type: nauc_map_at_1000_std value: 78.88325309469121 - type: nauc_map_at_100_diff1 value: -17.6877824653978 - type: nauc_map_at_100_max value: 19.584159765315658 - type: nauc_map_at_100_std value: 48.051154190992726 - type: nauc_map_at_10_diff1 value: 20.076631089898626 - type: nauc_map_at_10_max value: -8.642556160185636 - type: nauc_map_at_10_std value: -5.768698617334298 - type: nauc_map_at_1_diff1 value: 27.342260509653798 - type: nauc_map_at_1_max value: -23.400451210297994 - type: nauc_map_at_1_std value: -21.152006353733853 - type: nauc_map_at_20_diff1 value: 8.019321726240506 - type: nauc_map_at_20_max value: -1.4826378210544222 - type: nauc_map_at_20_std value: 5.698208117745366 - type: nauc_map_at_3_diff1 value: 32.073377946749446 - type: nauc_map_at_3_max value: -13.099353983204654 - type: nauc_map_at_3_std value: -15.36319127398037 - type: nauc_map_at_5_diff1 value: 22.500045815797876 - type: nauc_map_at_5_max value: -8.548135411428023 - type: nauc_map_at_5_std value: -8.547850460331334 - type: nauc_mrr_at_1000_diff1 value: -6.022408963585526 - type: nauc_mrr_at_1000_max value: 4.481792717087155 - type: nauc_mrr_at_1000_std value: 51.6962340491753 - type: nauc_mrr_at_100_diff1 value: -6.022408963585526 - type: nauc_mrr_at_100_max value: 4.481792717087155 - type: nauc_mrr_at_100_std value: 51.6962340491753 - type: nauc_mrr_at_10_diff1 value: -6.022408963585526 - type: nauc_mrr_at_10_max value: 4.481792717087155 - type: nauc_mrr_at_10_std value: 51.6962340491753 - type: nauc_mrr_at_1_diff1 value: -6.022408963585076 - type: nauc_mrr_at_1_max value: 4.481792717087146 - type: nauc_mrr_at_1_std value: 51.69623404917518 - type: nauc_mrr_at_20_diff1 value: -6.022408963585526 - type: nauc_mrr_at_20_max value: 4.481792717087155 - type: nauc_mrr_at_20_std value: 51.6962340491753 - type: nauc_mrr_at_3_diff1 value: -6.022408963585526 - type: nauc_mrr_at_3_max value: 4.481792717087155 - type: nauc_mrr_at_3_std value: 51.6962340491753 - type: nauc_mrr_at_5_diff1 value: -6.022408963585526 - type: nauc_mrr_at_5_max value: 4.481792717087155 - type: nauc_mrr_at_5_std value: 51.6962340491753 - type: nauc_ndcg_at_1000_diff1 value: -20.79697283984295 - type: nauc_ndcg_at_1000_max value: 52.97671908009218 - type: nauc_ndcg_at_1000_std value: 75.43907707019758 - type: nauc_ndcg_at_100_diff1 value: -38.620752706946455 - type: nauc_ndcg_at_100_max value: 49.41307462381511 - type: nauc_ndcg_at_100_std value: 81.33299379244252 - type: nauc_ndcg_at_10_diff1 value: -18.611906363037356 - type: nauc_ndcg_at_10_max value: 44.20544651664479 - type: nauc_ndcg_at_10_std value: 61.322552829935816 - type: nauc_ndcg_at_1_diff1 value: 18.625935567849073 - type: nauc_ndcg_at_1_max value: -10.104132769280879 - type: nauc_ndcg_at_1_std value: 22.449560689879743 - type: nauc_ndcg_at_20_diff1 value: -30.61130208138771 - type: nauc_ndcg_at_20_max value: 52.68851710375231 - type: nauc_ndcg_at_20_std value: 69.72357683382992 - type: nauc_ndcg_at_3_diff1 value: 5.695394821691213 - type: nauc_ndcg_at_3_max value: 37.909122367102135 - type: nauc_ndcg_at_3_std value: 46.2366603255159 - type: nauc_ndcg_at_5_diff1 value: -15.273067832464731 - type: nauc_ndcg_at_5_max value: 49.7054639475091 - type: nauc_ndcg_at_5_std value: 58.83754007826166 - type: nauc_precision_at_1000_diff1 value: -31.565302588492035 - type: nauc_precision_at_1000_max value: 52.56214379514724 - type: nauc_precision_at_1000_std value: 53.40618234326055 - type: nauc_precision_at_100_diff1 value: -44.67273120709088 - type: nauc_precision_at_100_max value: 48.30381155522576 - type: nauc_precision_at_100_std value: 82.1984661602578 - type: nauc_precision_at_10_diff1 value: -24.737383556860145 - type: nauc_precision_at_10_max value: 52.816815002878556 - type: nauc_precision_at_10_std value: 67.99052410030845 - type: nauc_precision_at_1_diff1 value: -6.022408963585076 - type: nauc_precision_at_1_max value: 4.481792717087146 - type: nauc_precision_at_1_std value: 51.69623404917518 - type: nauc_precision_at_20_diff1 value: -40.23628054967093 - type: nauc_precision_at_20_max value: 56.980056980057014 - type: nauc_precision_at_20_std value: 76.60976777785895 - type: nauc_precision_at_3_diff1 value: -4.661784068466279 - type: nauc_precision_at_3_max value: 59.052007899934125 - type: nauc_precision_at_3_std value: 58.187952600394986 - type: nauc_precision_at_5_diff1 value: -38.11848143512736 - type: nauc_precision_at_5_max value: 68.6149353358365 - type: nauc_precision_at_5_std value: 73.55652899457661 - type: nauc_recall_at_1000_diff1 value: -14.886527444436345 - type: nauc_recall_at_1000_max value: 48.07492302795808 - type: nauc_recall_at_1000_std value: 65.05623212485906 - type: nauc_recall_at_100_diff1 value: -8.148385729388195 - type: nauc_recall_at_100_max value: 8.041615364614533 - type: nauc_recall_at_100_std value: 33.77187914574611 - type: nauc_recall_at_10_diff1 value: 24.333628413035942 - type: nauc_recall_at_10_max value: -14.577877145192078 - type: nauc_recall_at_10_std value: -12.131819145098557 - type: nauc_recall_at_1_diff1 value: 27.342260509653798 - type: nauc_recall_at_1_max value: -23.400451210297994 - type: nauc_recall_at_1_std value: -21.152006353733853 - type: nauc_recall_at_20_diff1 value: 13.695556376785564 - type: nauc_recall_at_20_max value: -8.872009346408264 - type: nauc_recall_at_20_std value: -3.163199444247112 - type: nauc_recall_at_3_diff1 value: 32.00442538217753 - type: nauc_recall_at_3_max value: -15.159737942664552 - type: nauc_recall_at_3_std value: -17.530833132440645 - type: nauc_recall_at_5_diff1 value: 22.64740552912405 - type: nauc_recall_at_5_max value: -12.947090597010414 - type: nauc_recall_at_5_std value: -12.914478822476807 - type: ndcg_at_1 value: 88.0 - type: ndcg_at_10 value: 84.60600000000001 - type: ndcg_at_100 value: 64.31700000000001 - type: ndcg_at_1000 value: 56.40500000000001 - type: ndcg_at_20 value: 80.561 - type: ndcg_at_3 value: 87.87700000000001 - type: ndcg_at_5 value: 86.641 - type: precision_at_1 value: 94.0 - type: precision_at_10 value: 88.2 - type: precision_at_100 value: 65.9 - type: precision_at_1000 value: 25.019999999999996 - type: precision_at_20 value: 84.7 - type: precision_at_3 value: 92.0 - type: precision_at_5 value: 90.0 - type: recall_at_1 value: 0.257 - type: recall_at_10 value: 2.338 - type: recall_at_100 value: 15.831999999999999 - type: recall_at_1000 value: 52.519000000000005 - type: recall_at_20 value: 4.367 - type: recall_at_3 value: 0.74 - type: recall_at_5 value: 1.196 - task: type: Retrieval dataset: name: MTEB Touche2020 type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: main_score value: 31.426 - type: map_at_1 value: 3.4709999999999996 - type: map_at_10 value: 13.236999999999998 - type: map_at_100 value: 19.521 - type: map_at_1000 value: 21.224 - type: map_at_20 value: 15.626000000000001 - type: map_at_3 value: 7.152 - type: map_at_5 value: 9.914000000000001 - type: mrr_at_1 value: 44.89795918367347 - type: mrr_at_10 value: 57.54373177842565 - type: mrr_at_100 value: 57.855267710139536 - type: mrr_at_1000 value: 57.855267710139536 - type: mrr_at_20 value: 57.70071764969724 - type: mrr_at_3 value: 52.72108843537414 - type: mrr_at_5 value: 55.06802721088435 - type: nauc_map_at_1000_diff1 value: 21.148857552115558 - type: nauc_map_at_1000_max value: 2.0837572569021323 - type: nauc_map_at_1000_std value: 3.203419709665347 - type: nauc_map_at_100_diff1 value: 21.383778167597878 - type: nauc_map_at_100_max value: 0.965767943155967 - type: nauc_map_at_100_std value: 0.3949924961020957 - type: nauc_map_at_10_diff1 value: 27.178555638086394 - type: nauc_map_at_10_max value: 4.480675175857958 - type: nauc_map_at_10_std value: -13.69553539513878 - type: nauc_map_at_1_diff1 value: 27.63901823865334 - type: nauc_map_at_1_max value: -18.6387233237763 - type: nauc_map_at_1_std value: -27.02164241863646 - type: nauc_map_at_20_diff1 value: 23.892104752374888 - type: nauc_map_at_20_max value: 3.5343136621362348 - type: nauc_map_at_20_std value: -8.765101188860816 - type: nauc_map_at_3_diff1 value: 22.065793929837493 - type: nauc_map_at_3_max value: 0.8063396680860568 - type: nauc_map_at_3_std value: -20.404849396621824 - type: nauc_map_at_5_diff1 value: 22.66626080580714 - type: nauc_map_at_5_max value: 5.423340658352383 - type: nauc_map_at_5_std value: -18.31523779843455 - type: nauc_mrr_at_1000_diff1 value: 30.520722269282665 - type: nauc_mrr_at_1000_max value: -16.644959497742267 - type: nauc_mrr_at_1000_std value: -16.3824126273053 - type: nauc_mrr_at_100_diff1 value: 30.520722269282665 - type: nauc_mrr_at_100_max value: -16.644959497742267 - type: nauc_mrr_at_100_std value: -16.3824126273053 - type: nauc_mrr_at_10_diff1 value: 30.428248939332974 - type: nauc_mrr_at_10_max value: -16.300183919261585 - type: nauc_mrr_at_10_std value: -15.404823235836309 - type: nauc_mrr_at_1_diff1 value: 27.041346572613474 - type: nauc_mrr_at_1_max value: -23.181309312755804 - type: nauc_mrr_at_1_std value: -24.33076726484014 - type: nauc_mrr_at_20_diff1 value: 30.676558567379303 - type: nauc_mrr_at_20_max value: -16.914268763031416 - type: nauc_mrr_at_20_std value: -15.77742854976336 - type: nauc_mrr_at_3_diff1 value: 31.718457109787096 - type: nauc_mrr_at_3_max value: -15.508391132202235 - type: nauc_mrr_at_3_std value: -20.33229438349494 - type: nauc_mrr_at_5_diff1 value: 28.73798376227693 - type: nauc_mrr_at_5_max value: -16.086295031060196 - type: nauc_mrr_at_5_std value: -15.644604635769321 - type: nauc_ndcg_at_1000_diff1 value: 22.158724660189606 - type: nauc_ndcg_at_1000_max value: -3.1755686809941475 - type: nauc_ndcg_at_1000_std value: 19.258386224159075 - type: nauc_ndcg_at_100_diff1 value: 21.83846748649288 - type: nauc_ndcg_at_100_max value: -10.939957598756036 - type: nauc_ndcg_at_100_std value: 14.729678880436623 - type: nauc_ndcg_at_10_diff1 value: 26.944882726098424 - type: nauc_ndcg_at_10_max value: -3.5176483833346617 - type: nauc_ndcg_at_10_std value: -5.400606773697211 - type: nauc_ndcg_at_1_diff1 value: 26.649410985172985 - type: nauc_ndcg_at_1_max value: -18.806716526067493 - type: nauc_ndcg_at_1_std value: -25.100244999343506 - type: nauc_ndcg_at_20_diff1 value: 24.860266153648315 - type: nauc_ndcg_at_20_max value: -7.521401821712892 - type: nauc_ndcg_at_20_std value: -3.3696577425983003 - type: nauc_ndcg_at_3_diff1 value: 23.9933326962406 - type: nauc_ndcg_at_3_max value: -0.4609479344284664 - type: nauc_ndcg_at_3_std value: -15.176459166869897 - type: nauc_ndcg_at_5_diff1 value: 22.50595978713142 - type: nauc_ndcg_at_5_max value: -2.1093870656000857 - type: nauc_ndcg_at_5_std value: -12.732197425528257 - type: nauc_precision_at_1000_diff1 value: -20.335120385950024 - type: nauc_precision_at_1000_max value: 26.95109729939765 - type: nauc_precision_at_1000_std value: 29.981685890622117 - type: nauc_precision_at_100_diff1 value: -2.782114329320704 - type: nauc_precision_at_100_max value: 2.9489322002048604 - type: nauc_precision_at_100_std value: 67.3074073674319 - type: nauc_precision_at_10_diff1 value: 21.385177180383383 - type: nauc_precision_at_10_max value: -2.4696365259422817 - type: nauc_precision_at_10_std value: 14.469784299536673 - type: nauc_precision_at_1_diff1 value: 27.041346572613474 - type: nauc_precision_at_1_max value: -23.181309312755804 - type: nauc_precision_at_1_std value: -24.33076726484014 - type: nauc_precision_at_20_diff1 value: 11.993846579997673 - type: nauc_precision_at_20_max value: -2.4792189693296227 - type: nauc_precision_at_20_std value: 28.581394687807745 - type: nauc_precision_at_3_diff1 value: 20.70568446328836 - type: nauc_precision_at_3_max value: 0.37326398699875984 - type: nauc_precision_at_3_std value: -12.983918676694389 - type: nauc_precision_at_5_diff1 value: 19.47466335828124 - type: nauc_precision_at_5_max value: -1.8921617684385994 - type: nauc_precision_at_5_std value: -6.533875294402164 - type: nauc_recall_at_1000_diff1 value: 7.611201305723156 - type: nauc_recall_at_1000_max value: 5.6416194035820055 - type: nauc_recall_at_1000_std value: 61.695208644278 - type: nauc_recall_at_100_diff1 value: 10.0183258158735 - type: nauc_recall_at_100_max value: -10.950612455698973 - type: nauc_recall_at_100_std value: 33.06069987640471 - type: nauc_recall_at_10_diff1 value: 24.738210305731535 - type: nauc_recall_at_10_max value: -2.6592454032071546 - type: nauc_recall_at_10_std value: -4.83987517793115 - type: nauc_recall_at_1_diff1 value: 27.63901823865334 - type: nauc_recall_at_1_max value: -18.6387233237763 - type: nauc_recall_at_1_std value: -27.02164241863646 - type: nauc_recall_at_20_diff1 value: 17.79601177409034 - type: nauc_recall_at_20_max value: -6.681637093148051 - type: nauc_recall_at_20_std value: 3.369193919932238 - type: nauc_recall_at_3_diff1 value: 24.9589431081204 - type: nauc_recall_at_3_max value: 2.4783640980500232 - type: nauc_recall_at_3_std value: -19.567415651090702 - type: nauc_recall_at_5_diff1 value: 23.71803410135437 - type: nauc_recall_at_5_max value: 1.6294309357641652 - type: nauc_recall_at_5_std value: -15.365511906408983 - type: ndcg_at_1 value: 40.816 - type: ndcg_at_10 value: 31.426 - type: ndcg_at_100 value: 41.558 - type: ndcg_at_1000 value: 53.042 - type: ndcg_at_20 value: 31.108999999999998 - type: ndcg_at_3 value: 35.518 - type: ndcg_at_5 value: 33.235 - type: precision_at_1 value: 44.897999999999996 - type: precision_at_10 value: 27.551 - type: precision_at_100 value: 8.204 - type: precision_at_1000 value: 1.582 - type: precision_at_20 value: 19.796 - type: precision_at_3 value: 36.735 - type: precision_at_5 value: 33.061 - type: recall_at_1 value: 3.4709999999999996 - type: recall_at_10 value: 19.563 - type: recall_at_100 value: 50.3 - type: recall_at_1000 value: 85.13199999999999 - type: recall_at_20 value: 26.738 - type: recall_at_3 value: 7.8420000000000005 - type: recall_at_5 value: 11.994 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 68.29850746268657 - type: ap value: 30.109785890841966 - type: ap_weighted value: 30.109785890841966 - type: f1 value: 61.76875915202924 - type: f1_weighted value: 71.32073190458556 - type: main_score value: 68.29850746268657 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification (default) type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 90.3068 - type: ap value: 86.17914339624038 - type: ap_weighted value: 86.17914339624038 - type: f1 value: 90.29716826358077 - type: f1_weighted value: 90.29716826358077 - type: main_score value: 90.3068 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 46.272000000000006 - type: f1 value: 45.57042543386915 - type: f1_weighted value: 45.57042543386915 - type: main_score value: 46.272000000000006 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P (default) type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: main_score value: 44.9469238081379 - type: v_measure value: 44.9469238081379 - type: v_measure_std value: 13.26811262671461 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S (default) type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: main_score value: 34.12071448053325 - type: v_measure value: 34.12071448053325 - type: v_measure_std value: 13.7019879046405 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions (default) type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: main_score value: 61.597667288657846 - type: map value: 61.597667288657846 - type: mrr value: 75.57940904893813 - type: nAUC_map_diff1 value: 8.745172077340095 - type: nAUC_map_max value: 20.114863024035493 - type: nAUC_map_std value: 15.991351189572192 - type: nAUC_mrr_diff1 value: 20.781369244159983 - type: nAUC_mrr_max value: 30.78542570228559 - type: nAUC_mrr_std value: 19.861484857303676 - task: type: STS dataset: name: MTEB BIOSSES (default) type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cosine_pearson value: 88.55587996301419 - type: cosine_spearman value: 86.40317357420093 - type: euclidean_pearson value: 86.93771958250231 - type: euclidean_spearman value: 86.40317357420093 - type: main_score value: 86.40317357420093 - type: manhattan_pearson value: 86.92196577117366 - type: manhattan_spearman value: 85.79834051556095 - type: pearson value: 88.55587996301419 - type: spearman value: 86.40317357420093 - task: type: Classification dataset: name: MTEB Banking77Classification (default) type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 80.0064935064935 - type: f1 value: 79.29524254086299 - type: f1_weighted value: 79.295242540863 - type: main_score value: 80.0064935064935 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P (default) type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: main_score value: 35.27186813341181 - type: v_measure value: 35.27186813341181 - type: v_measure_std value: 0.8621482145872432 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S (default) type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: main_score value: 28.411805064852295 - type: v_measure value: 28.411805064852295 - type: v_measure_std value: 0.7194290078011281 - task: type: Classification dataset: name: MTEB EmotionClassification (default) type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 43.675 - type: f1 value: 40.15061931375577 - type: f1_weighted value: 45.714186572727066 - type: main_score value: 43.675 - task: type: Classification dataset: name: MTEB ImdbClassification (default) type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 84.35640000000001 - type: ap value: 79.07507736685174 - type: ap_weighted value: 79.07507736685174 - type: f1 value: 84.32288494833531 - type: f1_weighted value: 84.32288494833531 - type: main_score value: 84.35640000000001 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 91.35658914728684 - type: f1 value: 90.86877537911086 - type: f1_weighted value: 91.3282092774443 - type: main_score value: 91.35658914728684 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 60.63611491108071 - type: f1 value: 42.78886482112741 - type: f1_weighted value: 63.44208631840539 - type: main_score value: 60.63611491108071 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 4672e20407010da34463acc759c162ca9734bca6 metrics: - type: accuracy value: 66.68796234028245 - type: f1 value: 64.44940791000278 - type: f1_weighted value: 65.77554417406792 - type: main_score value: 66.68796234028245 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 metrics: - type: accuracy value: 73.0598520511096 - type: f1 value: 72.14267273884774 - type: f1_weighted value: 72.93345180137516 - type: main_score value: 73.0598520511096 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P (default) type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: main_score value: 31.143081341699606 - type: v_measure value: 31.143081341699606 - type: v_measure_std value: 1.5578716347076906 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S (default) type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: main_score value: 27.010818869829556 - type: v_measure value: 27.010818869829556 - type: v_measure_std value: 1.1771554540819378 - task: type: Reranking dataset: name: MTEB MindSmallReranking (default) type: mteb/mind_small config: default split: test revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7 metrics: - type: main_score value: 30.20503776754942 - type: map value: 30.20503776754942 - type: mrr value: 31.076636002733437 - type: nAUC_map_diff1 value: 7.290568655287842 - type: nAUC_map_max value: -21.381599355932945 - type: nAUC_map_std value: -7.709920607543168 - type: nAUC_mrr_diff1 value: 7.558397329284913 - type: nAUC_mrr_max value: -15.981397186427607 - type: nAUC_mrr_std value: -4.870495243168834 - task: type: Clustering dataset: name: MTEB RedditClustering (default) type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: main_score value: 51.85893476633338 - type: v_measure value: 51.85893476633338 - type: v_measure_std value: 4.704770139385852 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P (default) type: mteb/reddit-clustering-p2p config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: main_score value: 61.8124222918822 - type: v_measure value: 61.8124222918822 - type: v_measure_std value: 11.994472578100165 - task: type: STS dataset: name: MTEB SICK-R (default) type: mteb/sickr-sts config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: cosine_pearson value: 77.63310776935984 - type: cosine_spearman value: 69.86468291111039 - type: euclidean_pearson value: 73.91537077798837 - type: euclidean_spearman value: 69.86468376650203 - type: main_score value: 69.86468291111039 - type: manhattan_pearson value: 73.68616048370464 - type: manhattan_spearman value: 69.76232036206659 - type: pearson value: 77.63310776935984 - type: spearman value: 69.86468291111039 - task: type: STS dataset: name: MTEB STS12 (default) type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cosine_pearson value: 57.71716838245049 - type: cosine_spearman value: 61.797855543446424 - type: euclidean_pearson value: 58.22958675325848 - type: euclidean_spearman value: 61.797855543446424 - type: main_score value: 61.797855543446424 - type: manhattan_pearson value: 57.63117544997929 - type: manhattan_spearman value: 61.3629404350085 - type: pearson value: 57.71716838245049 - type: spearman value: 61.797855543446424 - task: type: STS dataset: name: MTEB STS13 (default) type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cosine_pearson value: 82.30260026790903 - type: cosine_spearman value: 82.66959813070869 - type: euclidean_pearson value: 82.08383017580783 - type: euclidean_spearman value: 82.66959813070869 - type: main_score value: 82.66959813070869 - type: manhattan_pearson value: 81.77991451392153 - type: manhattan_spearman value: 82.3652534745606 - type: pearson value: 82.30260026790903 - type: spearman value: 82.66959813070869 - task: type: STS dataset: name: MTEB STS14 (default) type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cosine_pearson value: 71.50608384084478 - type: cosine_spearman value: 68.94968064977785 - type: euclidean_pearson value: 70.73381299949564 - type: euclidean_spearman value: 68.94968064977785 - type: main_score value: 68.94968064977785 - type: manhattan_pearson value: 70.5385486953787 - type: manhattan_spearman value: 68.82132770672365 - type: pearson value: 71.50608384084478 - type: spearman value: 68.94968064977785 - task: type: STS dataset: name: MTEB STS15 (default) type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cosine_pearson value: 73.66969825874907 - type: cosine_spearman value: 75.55374982088381 - type: euclidean_pearson value: 75.9339313749594 - type: euclidean_spearman value: 75.55374982088381 - type: main_score value: 75.55374982088381 - type: manhattan_pearson value: 75.88287553383817 - type: manhattan_spearman value: 75.50729812977688 - type: pearson value: 73.66969825874907 - type: spearman value: 75.55374982088381 - task: type: STS dataset: name: MTEB STS16 (default) type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cosine_pearson value: 74.5954724414016 - type: cosine_spearman value: 77.2688820850505 - type: euclidean_pearson value: 77.19866353971555 - type: euclidean_spearman value: 77.2688820850505 - type: main_score value: 77.2688820850505 - type: manhattan_pearson value: 77.27072603680978 - type: manhattan_spearman value: 77.29408453673607 - type: pearson value: 74.5954724414016 - type: spearman value: 77.2688820850505 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: cosine_pearson value: 71.52588722654055 - type: cosine_spearman value: 74.97235736456061 - type: euclidean_pearson value: 74.51952528854038 - type: euclidean_spearman value: 74.97235736456061 - type: main_score value: 74.97235736456061 - type: manhattan_pearson value: 74.48272300884209 - type: manhattan_spearman value: 74.80633649415176 - type: pearson value: 71.52588722654055 - type: spearman value: 74.97235736456061 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: cosine_pearson value: 68.80031120401976 - type: cosine_spearman value: 69.07945196478491 - type: euclidean_pearson value: 68.99674496430792 - type: euclidean_spearman value: 69.07945196478491 - type: main_score value: 69.07945196478491 - type: manhattan_pearson value: 69.00236107775687 - type: manhattan_spearman value: 68.98064879049272 - type: pearson value: 68.80031120401976 - type: spearman value: 69.07945196478491 - task: type: STS dataset: name: MTEB STSBenchmark (default) type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cosine_pearson value: 65.6898007230089 - type: cosine_spearman value: 69.72386211803668 - type: euclidean_pearson value: 69.04523003701475 - type: euclidean_spearman value: 69.72386211803668 - type: main_score value: 69.72386211803668 - type: manhattan_pearson value: 68.80479743770702 - type: manhattan_spearman value: 69.43264575177459 - type: pearson value: 65.6898007230089 - type: spearman value: 69.72386211803668 - task: type: Reranking dataset: name: MTEB SciDocsRR (default) type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: main_score value: 79.74088066874383 - type: map value: 79.74088066874383 - type: mrr value: 94.47697455050397 - type: nAUC_map_diff1 value: 8.036086256905502 - type: nAUC_map_max value: 54.88199803816819 - type: nAUC_map_std value: 69.16267942176574 - type: nAUC_mrr_diff1 value: 50.020738477678115 - type: nAUC_mrr_max value: 83.28922770326483 - type: nAUC_mrr_std value: 83.63973501802224 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions (default) type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cosine_accuracy value: 99.83861386138614 - type: cosine_accuracy_threshold value: 74.75666999816895 - type: cosine_ap value: 96.15132792066652 - type: cosine_f1 value: 91.84890656063618 - type: cosine_f1_threshold value: 71.70594930648804 - type: cosine_precision value: 91.30434782608695 - type: cosine_recall value: 92.4 - type: dot_accuracy value: 99.83861386138614 - type: dot_accuracy_threshold value: 74.75666999816895 - type: dot_ap value: 96.15132792066653 - type: dot_f1 value: 91.84890656063618 - type: dot_f1_threshold value: 71.70596122741699 - type: dot_precision value: 91.30434782608695 - type: dot_recall value: 92.4 - type: euclidean_accuracy value: 99.83861386138614 - type: euclidean_accuracy_threshold value: 71.05395793914795 - type: euclidean_ap value: 96.15132792066652 - type: euclidean_f1 value: 91.84890656063618 - type: euclidean_f1_threshold value: 75.22505521774292 - type: euclidean_precision value: 91.30434782608695 - type: euclidean_recall value: 92.4 - type: main_score value: 96.15132792066653 - type: manhattan_accuracy value: 99.83564356435643 - type: manhattan_accuracy_threshold value: 1547.6950645446777 - type: manhattan_ap value: 96.06151211452136 - type: manhattan_f1 value: 91.61676646706587 - type: manhattan_f1_threshold value: 1626.3608932495117 - type: manhattan_precision value: 91.43426294820716 - type: manhattan_recall value: 91.8 - type: max_ap value: 96.15132792066653 - type: max_f1 value: 91.84890656063618 - type: max_precision value: 91.43426294820716 - type: max_recall value: 92.4 - type: similarity_accuracy value: 99.83861386138614 - type: similarity_accuracy_threshold value: 74.75666999816895 - type: similarity_ap value: 96.15132792066652 - type: similarity_f1 value: 91.84890656063618 - type: similarity_f1_threshold value: 71.70594930648804 - type: similarity_precision value: 91.30434782608695 - type: similarity_recall value: 92.4 - task: type: Clustering dataset: name: MTEB StackExchangeClustering (default) type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: main_score value: 61.24120328328453 - type: v_measure value: 61.24120328328453 - type: v_measure_std value: 3.9946560691100372 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P (default) type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: main_score value: 33.808268374864745 - type: v_measure value: 33.808268374864745 - type: v_measure_std value: 1.2212188701887239 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions (default) type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: main_score value: 52.19806018468037 - type: map value: 52.19806018468037 - type: mrr value: 52.98921462524404 - type: nAUC_map_diff1 value: 37.41443156995912 - type: nAUC_map_max value: 9.410262727675603 - type: nAUC_map_std value: 8.7094185014992 - type: nAUC_mrr_diff1 value: 37.78202772392581 - type: nAUC_mrr_max value: 10.517635536565816 - type: nAUC_mrr_std value: 8.509423813772491 - task: type: Summarization dataset: name: MTEB SummEval (default) type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cosine_pearson value: 30.48413700430812 - type: cosine_spearman value: 30.357162200875816 - type: dot_pearson value: 30.484140144824938 - type: dot_spearman value: 30.357162200875816 - type: main_score value: 30.357162200875816 - type: pearson value: 30.48413700430812 - type: spearman value: 30.357162200875816 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification (default) type: mteb/toxic_conversations_50k config: default split: test revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de metrics: - type: accuracy value: 66.8359375 - type: ap value: 12.482653786025985 - type: ap_weighted value: 12.482653786025985 - type: f1 value: 51.328608527332385 - type: f1_weighted value: 74.07974463955398 - type: main_score value: 66.8359375 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification (default) type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 53.907753254103 - type: f1 value: 54.22707647269581 - type: f1_weighted value: 53.611822984407695 - type: main_score value: 53.907753254103 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering (default) type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: main_score value: 38.1364789307295 - type: v_measure value: 38.1364789307295 - type: v_measure_std value: 2.0731634966352077 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 (default) type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cosine_accuracy value: 82.66674614054956 - type: cosine_accuracy_threshold value: 79.80123162269592 - type: cosine_ap value: 63.28209719072804 - type: cosine_f1 value: 60.16389710903711 - type: cosine_f1_threshold value: 72.22893834114075 - type: cosine_precision value: 52.90232185748599 - type: cosine_recall value: 69.73614775725594 - type: dot_accuracy value: 82.66674614054956 - type: dot_accuracy_threshold value: 79.8012375831604 - type: dot_ap value: 63.282103870645166 - type: dot_f1 value: 60.16389710903711 - type: dot_f1_threshold value: 72.22894430160522 - type: dot_precision value: 52.90232185748599 - type: dot_recall value: 69.73614775725594 - type: euclidean_accuracy value: 82.66674614054956 - type: euclidean_accuracy_threshold value: 63.55905532836914 - type: euclidean_ap value: 63.282095399953164 - type: euclidean_f1 value: 60.16389710903711 - type: euclidean_f1_threshold value: 74.5265781879425 - type: euclidean_precision value: 52.90232185748599 - type: euclidean_recall value: 69.73614775725594 - type: main_score value: 63.282103870645166 - type: manhattan_accuracy value: 82.74423317637242 - type: manhattan_accuracy_threshold value: 1415.380859375 - type: manhattan_ap value: 63.26931757839598 - type: manhattan_f1 value: 60.11014948859166 - type: manhattan_f1_threshold value: 1632.522201538086 - type: manhattan_precision value: 52.359506559624045 - type: manhattan_recall value: 70.55408970976254 - type: max_ap value: 63.282103870645166 - type: max_f1 value: 60.16389710903711 - type: max_precision value: 52.90232185748599 - type: max_recall value: 70.55408970976254 - type: similarity_accuracy value: 82.66674614054956 - type: similarity_accuracy_threshold value: 79.80123162269592 - type: similarity_ap value: 63.28209719072804 - type: similarity_f1 value: 60.16389710903711 - type: similarity_f1_threshold value: 72.22893834114075 - type: similarity_precision value: 52.90232185748599 - type: similarity_recall value: 69.73614775725594 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus (default) type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cosine_accuracy value: 88.10105949470253 - type: cosine_accuracy_threshold value: 68.95147562026978 - type: cosine_ap value: 84.65516103854583 - type: cosine_f1 value: 76.54581123301605 - type: cosine_f1_threshold value: 63.92929553985596 - type: cosine_precision value: 72.46526344751685 - type: cosine_recall value: 81.11333538651063 - type: dot_accuracy value: 88.10105949470253 - type: dot_accuracy_threshold value: 68.95147562026978 - type: dot_ap value: 84.65516301437592 - type: dot_f1 value: 76.54581123301605 - type: dot_f1_threshold value: 63.92928957939148 - type: dot_precision value: 72.46526344751685 - type: dot_recall value: 81.11333538651063 - type: euclidean_accuracy value: 88.10105949470253 - type: euclidean_accuracy_threshold value: 78.80169153213501 - type: euclidean_ap value: 84.65517268264233 - type: euclidean_f1 value: 76.54581123301605 - type: euclidean_f1_threshold value: 84.93610620498657 - type: euclidean_precision value: 72.46526344751685 - type: euclidean_recall value: 81.11333538651063 - type: main_score value: 84.65517268264233 - type: manhattan_accuracy value: 88.08941669577366 - type: manhattan_accuracy_threshold value: 1739.3169403076172 - type: manhattan_ap value: 84.64592398855694 - type: manhattan_f1 value: 76.62890540443034 - type: manhattan_f1_threshold value: 1861.344337463379 - type: manhattan_precision value: 72.09775967413442 - type: manhattan_recall value: 81.76778564829073 - type: max_ap value: 84.65517268264233 - type: max_f1 value: 76.62890540443034 - type: max_precision value: 72.46526344751685 - type: max_recall value: 81.76778564829073 - type: similarity_accuracy value: 88.10105949470253 - type: similarity_accuracy_threshold value: 68.95147562026978 - type: similarity_ap value: 84.65516103854583 - type: similarity_f1 value: 76.54581123301605 - type: similarity_f1_threshold value: 63.92929553985596 - type: similarity_precision value: 72.46526344751685 - type: similarity_recall value: 81.11333538651063 --- # yishan-wang/snowflake-arctic-embed-m-v1.5-Q8_0-GGUF This model was converted to GGUF format from [`Snowflake/snowflake-arctic-embed-m-v1.5`](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Snowflake/snowflake-arctic-embed-m-v1.5) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo yishan-wang/snowflake-arctic-embed-m-v1.5-Q8_0-GGUF --hf-file snowflake-arctic-embed-m-v1.5-q8_0.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo yishan-wang/snowflake-arctic-embed-m-v1.5-Q8_0-GGUF --hf-file snowflake-arctic-embed-m-v1.5-q8_0.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo yishan-wang/snowflake-arctic-embed-m-v1.5-Q8_0-GGUF --hf-file snowflake-arctic-embed-m-v1.5-q8_0.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo yishan-wang/snowflake-arctic-embed-m-v1.5-Q8_0-GGUF --hf-file snowflake-arctic-embed-m-v1.5-q8_0.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
JunxiongWang/Llama3.1-Mamba-8B-distill
JunxiongWang
null
[ "pytorch", "llama", "arxiv:2408.15237", "license:apache-2.0", "region:us" ]
2024-11-15T22:15:25Z
2024-11-17T03:57:31+00:00
19
0
--- license: apache-2.0 --- Zero-shot results when using the [Llama-3.1-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) as the teacher model, and the [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) as the initialized model | Task | Llama-3.1-8B-Instruct | Llama3.1-Mamba-8B-distill | Llama3.1-Mamba-8B-dpo | Llama3.1-Mamba2-8B-distill | Llama3.1-Mamba2-8B-dpo | |---------------------|-----------------------|--------------------------|-----------------------|---------------------------|-----------------------| | arc_challenge | 0.552 | 0.5384 | 0.5657 | 0.5265 | 0.5973 | | arc_easy | 0.8178 | 0.8224 | 0.8401 | 0.822 | 0.8481 | | hellaswag | 0.7921 | 0.7591 | 0.7736 | 0.7536 | 0.7969 | | mmlu (0 shot) | 0.6812 | 0.6213 | 0.636 | 0.6101 | 0.5974 | | openbookqa | 0.432 | 0.428 | 0.442 | 0.416 | 0.44 | | piqa | 0.8079 | 0.7933 | 0.8041 | 0.7889 | 0.8003 | | pubmedqa | 0.752 | 0.72 | 0.744 | 0.726 | 0.746 | | race | 0.4478 | 0.4211 | 0.4344 | 0.4211 | 0.4612 | | winogrande | 0.7388 | 0.7277 | 0.738 | 0.7174 | 0.7411 | | truthful | 0.4267 | 0.4002 | 0.4607 | 0.4031 | 0.5022 | ``` @article{junxiongdaniele2024mambainllama, title = {The Mamba in the Llama: Distilling and Accelerating Hybrid Models}, author = {Junxiong Wang and Daniele Paliotta and Avner May and Alexander M. Rush and Tri Dao}, journal = {arXiv preprint arXiv:2408.15237}, year = {2024} } ```
[ "PUBMEDQA" ]
liuwenhan/RankMistral100
liuwenhan
null
[ "safetensors", "mistral", "en", "arxiv:2412.14574", "base_model:mistralai/Mistral-7B-Instruct-v0.3", "base_model:finetune:mistralai/Mistral-7B-Instruct-v0.3", "license:mit", "region:us" ]
2024-12-18T12:36:11Z
2024-12-20T02:35:29+00:00
19
2
--- base_model: - mistralai/Mistral-7B-Instruct-v0.3 language: - en license: mit --- ## Model Information We release the full ranking model RankMistral100 distilled from GPT-4o-2024-08-06 used in **Sliding Windows Are Not the End: Exploring Full Ranking with Long-Context Large Language Models**. <p align="left"> Useful links: 📝 <a href="https://arxiv.org/abs/2412.14574" target="_blank">Paper</a> • 🤗 <a href="https://huggingface.co/datasets/liuwenhan/msmarco_full_ranking_list" target="_blank">Dataset</a> • </a> 🧩 <a href="https://github.com/8421BCD/fullrank" target="_blank">Github</a> </p> ## Training framework Our full ranking model aims to directly rerank 100 passages at a time, abandoning the sliding window strategy. We propose a multi-pass sliding window approach for generating the full ranking list as label and design a importance-aware training loss for optimization. <img src="https://8421bcd.oss-cn-beijing.aliyuncs.com/img/image-20241218200920116.png" alt="image-20241218200920116" style="zoom: 45%;" /> ## Backbone Model RankMistral100 is finetuned from https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.3. ## Performance We surpuss the strong baseline RankZephyr with 1.2 points on BEIR Avg. | Models | Covid | DBPedia | SciFact | NFCorpus | Signal | Robust04 | Touche | News | Avg. | | ------------------------- | ----- | ------- | ------- | -------- | ------ | -------- | ------ | ----- | --------- | | BM25 | 59.47 | 31.80 | 67.89 | 33.75 | 33.04 | 40.70 | 44.22 | 39.52 | 43.80 | | monoBERT (340M) | 73.45 | 41.69 | 62.22 | 34.92 | 30.63 | 44.21 | 30.26 | 47.03 | 45.55 | | monoT5 (220M) | 75.94 | 42.43 | 65.07 | 35.42 | 31.20 | 44.15 | 30.35 | 46.98 | 46.44 | | RankVicuna (7B) | 79.19 | 44.51 | 70.67 | 34.51 | 34.24 | 48.33 | 33.00 | 47.15 | 48.95 | | RankZepeyer (7B) | 82.92 | 44.42 | 75.42 | 38.26 | 31.41 | 53.73 | 30.22 | 52.80 | 51.15 | | RankMistral<sub>100</sub> (7B) | 82.24 | 43.54 | 77.04 | 39.14 | 33.99 | 57.91 | 34.63 | 50.59 | **52.40** | 🌹 If you use this model, please ✨star our <a href="https://github.com/8421BCD/fullrank" target="_blank">GitHub repository</a> to support us. Your star means a lot!
[ "SCIFACT" ]
yoeven/multilingual-e5-large-instruct-Q5_K_M-GGUF
yoeven
null
[ "sentence-transformers", "gguf", "mteb", "transformers", "llama-cpp", "gguf-my-repo", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "om", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sa", "sd", "si", "sk", "sl", "so", "sq", "sr", "su", "sv", "sw", "ta", "te", "th", "tl", "tr", "ug", "uk", "ur", "uz", "vi", "xh", "yi", "zh", "base_model:intfloat/multilingual-e5-large-instruct", "base_model:quantized:intfloat/multilingual-e5-large-instruct", "license:mit", "model-index", "endpoints_compatible", "region:us", "feature-extraction" ]
2025-01-06T13:56:29Z
2025-01-06T13:56:36+00:00
19
1
--- base_model: intfloat/multilingual-e5-large-instruct language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn - mr - ms - my - ne - nl - 'no' - om - or - pa - pl - ps - pt - ro - ru - sa - sd - si - sk - sl - so - sq - sr - su - sv - sw - ta - te - th - tl - tr - ug - uk - ur - uz - vi - xh - yi - zh license: mit tags: - mteb - sentence-transformers - transformers - llama-cpp - gguf-my-repo model-index: - name: multilingual-e5-large-instruct results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 76.23880597014924 - type: ap value: 39.07351965022687 - type: f1 value: 70.04836733862683 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (de) type: mteb/amazon_counterfactual config: de split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 66.71306209850107 - type: ap value: 79.01499914759529 - type: f1 value: 64.81951817560703 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en-ext) type: mteb/amazon_counterfactual config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.85307346326837 - type: ap value: 22.447519885878737 - type: f1 value: 61.0162730745633 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (ja) type: mteb/amazon_counterfactual config: ja split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 76.04925053533191 - type: ap value: 23.44983217128922 - type: f1 value: 62.5723230907759 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 96.28742500000001 - type: ap value: 94.8449918887462 - type: f1 value: 96.28680923610432 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 56.716 - type: f1 value: 55.76510398266401 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (de) type: mteb/amazon_reviews_multi config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 52.99999999999999 - type: f1 value: 52.00829994765178 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (es) type: mteb/amazon_reviews_multi config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.806000000000004 - type: f1 value: 48.082345914983634 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (fr) type: mteb/amazon_reviews_multi config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.507999999999996 - type: f1 value: 47.68752844642045 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (ja) type: mteb/amazon_reviews_multi config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 47.709999999999994 - type: f1 value: 47.05870376637181 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.662000000000006 - type: f1 value: 43.42371965372771 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 31.721 - type: map_at_10 value: 49.221 - type: map_at_100 value: 49.884 - type: map_at_1000 value: 49.888 - type: map_at_3 value: 44.31 - type: map_at_5 value: 47.276 - type: mrr_at_1 value: 32.432 - type: mrr_at_10 value: 49.5 - type: mrr_at_100 value: 50.163000000000004 - type: mrr_at_1000 value: 50.166 - type: mrr_at_3 value: 44.618 - type: mrr_at_5 value: 47.541 - type: ndcg_at_1 value: 31.721 - type: ndcg_at_10 value: 58.384 - type: ndcg_at_100 value: 61.111000000000004 - type: ndcg_at_1000 value: 61.187999999999995 - type: ndcg_at_3 value: 48.386 - type: ndcg_at_5 value: 53.708999999999996 - type: precision_at_1 value: 31.721 - type: precision_at_10 value: 8.741 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 20.057 - type: precision_at_5 value: 14.609 - type: recall_at_1 value: 31.721 - type: recall_at_10 value: 87.411 - type: recall_at_100 value: 99.075 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 60.171 - type: recall_at_5 value: 73.044 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 46.40419580759799 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 40.48593255007969 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 63.889179122289995 - type: mrr value: 77.61146286769556 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 88.15075203727929 - type: cos_sim_spearman value: 86.9622224570873 - type: euclidean_pearson value: 86.70473853624121 - type: euclidean_spearman value: 86.9622224570873 - type: manhattan_pearson value: 86.21089380980065 - type: manhattan_spearman value: 86.75318154937008 - task: type: BitextMining dataset: name: MTEB BUCC (de-en) type: mteb/bucc-bitext-mining config: de-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.65553235908142 - type: f1 value: 99.60681976339595 - type: precision value: 99.58246346555325 - type: recall value: 99.65553235908142 - task: type: BitextMining dataset: name: MTEB BUCC (fr-en) type: mteb/bucc-bitext-mining config: fr-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.26260180497468 - type: f1 value: 99.14520507740848 - type: precision value: 99.08650671362535 - type: recall value: 99.26260180497468 - task: type: BitextMining dataset: name: MTEB BUCC (ru-en) type: mteb/bucc-bitext-mining config: ru-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 98.07412538967787 - type: f1 value: 97.86629719431936 - type: precision value: 97.76238309664012 - type: recall value: 98.07412538967787 - task: type: BitextMining dataset: name: MTEB BUCC (zh-en) type: mteb/bucc-bitext-mining config: zh-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.42074776197998 - type: f1 value: 99.38564156573635 - type: precision value: 99.36808846761454 - type: recall value: 99.42074776197998 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 85.73376623376623 - type: f1 value: 85.68480707214599 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 40.935218072113855 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.276389017675264 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 27.764166666666668 - type: map_at_10 value: 37.298166666666674 - type: map_at_100 value: 38.530166666666666 - type: map_at_1000 value: 38.64416666666667 - type: map_at_3 value: 34.484833333333334 - type: map_at_5 value: 36.0385 - type: mrr_at_1 value: 32.93558333333333 - type: mrr_at_10 value: 41.589749999999995 - type: mrr_at_100 value: 42.425333333333334 - type: mrr_at_1000 value: 42.476333333333336 - type: mrr_at_3 value: 39.26825 - type: mrr_at_5 value: 40.567083333333336 - type: ndcg_at_1 value: 32.93558333333333 - type: ndcg_at_10 value: 42.706583333333334 - type: ndcg_at_100 value: 47.82483333333333 - type: ndcg_at_1000 value: 49.95733333333334 - type: ndcg_at_3 value: 38.064750000000004 - type: ndcg_at_5 value: 40.18158333333333 - type: precision_at_1 value: 32.93558333333333 - type: precision_at_10 value: 7.459833333333334 - type: precision_at_100 value: 1.1830833333333335 - type: precision_at_1000 value: 0.15608333333333332 - type: precision_at_3 value: 17.5235 - type: precision_at_5 value: 12.349833333333333 - type: recall_at_1 value: 27.764166666666668 - type: recall_at_10 value: 54.31775 - type: recall_at_100 value: 76.74350000000001 - type: recall_at_1000 value: 91.45208333333332 - type: recall_at_3 value: 41.23425 - type: recall_at_5 value: 46.73983333333334 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 12.969 - type: map_at_10 value: 21.584999999999997 - type: map_at_100 value: 23.3 - type: map_at_1000 value: 23.5 - type: map_at_3 value: 18.218999999999998 - type: map_at_5 value: 19.983 - type: mrr_at_1 value: 29.316 - type: mrr_at_10 value: 40.033 - type: mrr_at_100 value: 40.96 - type: mrr_at_1000 value: 41.001 - type: mrr_at_3 value: 37.123 - type: mrr_at_5 value: 38.757999999999996 - type: ndcg_at_1 value: 29.316 - type: ndcg_at_10 value: 29.858 - type: ndcg_at_100 value: 36.756 - type: ndcg_at_1000 value: 40.245999999999995 - type: ndcg_at_3 value: 24.822 - type: ndcg_at_5 value: 26.565 - type: precision_at_1 value: 29.316 - type: precision_at_10 value: 9.186 - type: precision_at_100 value: 1.6549999999999998 - type: precision_at_1000 value: 0.22999999999999998 - type: precision_at_3 value: 18.436 - type: precision_at_5 value: 13.876 - type: recall_at_1 value: 12.969 - type: recall_at_10 value: 35.142 - type: recall_at_100 value: 59.143 - type: recall_at_1000 value: 78.594 - type: recall_at_3 value: 22.604 - type: recall_at_5 value: 27.883000000000003 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.527999999999999 - type: map_at_10 value: 17.974999999999998 - type: map_at_100 value: 25.665 - type: map_at_1000 value: 27.406000000000002 - type: map_at_3 value: 13.017999999999999 - type: map_at_5 value: 15.137 - type: mrr_at_1 value: 62.5 - type: mrr_at_10 value: 71.891 - type: mrr_at_100 value: 72.294 - type: mrr_at_1000 value: 72.296 - type: mrr_at_3 value: 69.958 - type: mrr_at_5 value: 71.121 - type: ndcg_at_1 value: 50.875 - type: ndcg_at_10 value: 38.36 - type: ndcg_at_100 value: 44.235 - type: ndcg_at_1000 value: 52.154 - type: ndcg_at_3 value: 43.008 - type: ndcg_at_5 value: 40.083999999999996 - type: precision_at_1 value: 62.5 - type: precision_at_10 value: 30.0 - type: precision_at_100 value: 10.038 - type: precision_at_1000 value: 2.0869999999999997 - type: precision_at_3 value: 46.833000000000006 - type: precision_at_5 value: 38.800000000000004 - type: recall_at_1 value: 8.527999999999999 - type: recall_at_10 value: 23.828 - type: recall_at_100 value: 52.322 - type: recall_at_1000 value: 77.143 - type: recall_at_3 value: 14.136000000000001 - type: recall_at_5 value: 17.761 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 51.51 - type: f1 value: 47.632159862049896 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 60.734 - type: map_at_10 value: 72.442 - type: map_at_100 value: 72.735 - type: map_at_1000 value: 72.75 - type: map_at_3 value: 70.41199999999999 - type: map_at_5 value: 71.80499999999999 - type: mrr_at_1 value: 65.212 - type: mrr_at_10 value: 76.613 - type: mrr_at_100 value: 76.79899999999999 - type: mrr_at_1000 value: 76.801 - type: mrr_at_3 value: 74.8 - type: mrr_at_5 value: 76.12400000000001 - type: ndcg_at_1 value: 65.212 - type: ndcg_at_10 value: 77.988 - type: ndcg_at_100 value: 79.167 - type: ndcg_at_1000 value: 79.452 - type: ndcg_at_3 value: 74.362 - type: ndcg_at_5 value: 76.666 - type: precision_at_1 value: 65.212 - type: precision_at_10 value: 10.003 - type: precision_at_100 value: 1.077 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 29.518 - type: precision_at_5 value: 19.016 - type: recall_at_1 value: 60.734 - type: recall_at_10 value: 90.824 - type: recall_at_100 value: 95.71600000000001 - type: recall_at_1000 value: 97.577 - type: recall_at_3 value: 81.243 - type: recall_at_5 value: 86.90299999999999 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 23.845 - type: map_at_10 value: 39.281 - type: map_at_100 value: 41.422 - type: map_at_1000 value: 41.593 - type: map_at_3 value: 34.467 - type: map_at_5 value: 37.017 - type: mrr_at_1 value: 47.531 - type: mrr_at_10 value: 56.204 - type: mrr_at_100 value: 56.928999999999995 - type: mrr_at_1000 value: 56.962999999999994 - type: mrr_at_3 value: 54.115 - type: mrr_at_5 value: 55.373000000000005 - type: ndcg_at_1 value: 47.531 - type: ndcg_at_10 value: 47.711999999999996 - type: ndcg_at_100 value: 54.510999999999996 - type: ndcg_at_1000 value: 57.103 - type: ndcg_at_3 value: 44.145 - type: ndcg_at_5 value: 45.032 - type: precision_at_1 value: 47.531 - type: precision_at_10 value: 13.194 - type: precision_at_100 value: 2.045 - type: precision_at_1000 value: 0.249 - type: precision_at_3 value: 29.424 - type: precision_at_5 value: 21.451 - type: recall_at_1 value: 23.845 - type: recall_at_10 value: 54.967 - type: recall_at_100 value: 79.11399999999999 - type: recall_at_1000 value: 94.56700000000001 - type: recall_at_3 value: 40.256 - type: recall_at_5 value: 46.215 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 37.819 - type: map_at_10 value: 60.889 - type: map_at_100 value: 61.717999999999996 - type: map_at_1000 value: 61.778 - type: map_at_3 value: 57.254000000000005 - type: map_at_5 value: 59.541 - type: mrr_at_1 value: 75.638 - type: mrr_at_10 value: 82.173 - type: mrr_at_100 value: 82.362 - type: mrr_at_1000 value: 82.37 - type: mrr_at_3 value: 81.089 - type: mrr_at_5 value: 81.827 - type: ndcg_at_1 value: 75.638 - type: ndcg_at_10 value: 69.317 - type: ndcg_at_100 value: 72.221 - type: ndcg_at_1000 value: 73.382 - type: ndcg_at_3 value: 64.14 - type: ndcg_at_5 value: 67.07600000000001 - type: precision_at_1 value: 75.638 - type: precision_at_10 value: 14.704999999999998 - type: precision_at_100 value: 1.698 - type: precision_at_1000 value: 0.185 - type: precision_at_3 value: 41.394999999999996 - type: precision_at_5 value: 27.162999999999997 - type: recall_at_1 value: 37.819 - type: recall_at_10 value: 73.52499999999999 - type: recall_at_100 value: 84.875 - type: recall_at_1000 value: 92.559 - type: recall_at_3 value: 62.092999999999996 - type: recall_at_5 value: 67.907 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 94.60079999999999 - type: ap value: 92.67396345347356 - type: f1 value: 94.5988098167121 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 21.285 - type: map_at_10 value: 33.436 - type: map_at_100 value: 34.63 - type: map_at_1000 value: 34.681 - type: map_at_3 value: 29.412 - type: map_at_5 value: 31.715 - type: mrr_at_1 value: 21.848 - type: mrr_at_10 value: 33.979 - type: mrr_at_100 value: 35.118 - type: mrr_at_1000 value: 35.162 - type: mrr_at_3 value: 30.036 - type: mrr_at_5 value: 32.298 - type: ndcg_at_1 value: 21.862000000000002 - type: ndcg_at_10 value: 40.43 - type: ndcg_at_100 value: 46.17 - type: ndcg_at_1000 value: 47.412 - type: ndcg_at_3 value: 32.221 - type: ndcg_at_5 value: 36.332 - type: precision_at_1 value: 21.862000000000002 - type: precision_at_10 value: 6.491 - type: precision_at_100 value: 0.935 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 13.744 - type: precision_at_5 value: 10.331999999999999 - type: recall_at_1 value: 21.285 - type: recall_at_10 value: 62.083 - type: recall_at_100 value: 88.576 - type: recall_at_1000 value: 98.006 - type: recall_at_3 value: 39.729 - type: recall_at_5 value: 49.608000000000004 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.92612859097127 - type: f1 value: 93.82370333372853 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (de) type: mteb/mtop_domain config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 92.67681036911807 - type: f1 value: 92.14191382411472 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (es) type: mteb/mtop_domain config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 92.26817878585723 - type: f1 value: 91.92824250337878 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (fr) type: mteb/mtop_domain config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.96554963983714 - type: f1 value: 90.02859329630792 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (hi) type: mteb/mtop_domain config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 90.02509860164935 - type: f1 value: 89.30665159182062 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (th) type: mteb/mtop_domain config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 87.55515370705244 - type: f1 value: 87.94449232331907 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 82.4623803009576 - type: f1 value: 66.06738378772725 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (de) type: mteb/mtop_intent config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 79.3716539870386 - type: f1 value: 60.37614033396853 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (es) type: mteb/mtop_intent config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 80.34022681787857 - type: f1 value: 58.302008026952 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (fr) type: mteb/mtop_intent config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 76.72095208268087 - type: f1 value: 59.64524724009049 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (hi) type: mteb/mtop_intent config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 77.87020437432773 - type: f1 value: 57.80202694670567 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (th) type: mteb/mtop_intent config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 77.73598553345387 - type: f1 value: 58.19628250675031 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (af) type: mteb/amazon_massive_intent config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.6630800268998 - type: f1 value: 65.00996668051691 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (am) type: mteb/amazon_massive_intent config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.7128446536651 - type: f1 value: 57.95860594874963 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ar) type: mteb/amazon_massive_intent config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.61129791526563 - type: f1 value: 59.75328290206483 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (az) type: mteb/amazon_massive_intent config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.00134498991257 - type: f1 value: 67.0230483991802 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (bn) type: mteb/amazon_massive_intent config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.54068594485541 - type: f1 value: 65.54604628946976 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (cy) type: mteb/amazon_massive_intent config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.032952252858095 - type: f1 value: 58.715741857057104 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (da) type: mteb/amazon_massive_intent config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.80901143241427 - type: f1 value: 68.33963989243877 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (de) type: mteb/amazon_massive_intent config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.47141896435777 - type: f1 value: 69.56765020308262 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (el) type: mteb/amazon_massive_intent config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.2373907195696 - type: f1 value: 69.04529836036467 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 77.05783456624076 - type: f1 value: 74.69430584708174 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (es) type: mteb/amazon_massive_intent config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.82111634162744 - type: f1 value: 70.77228952803762 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fa) type: mteb/amazon_massive_intent config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.25353059852051 - type: f1 value: 71.05310103416411 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fi) type: mteb/amazon_massive_intent config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.28648285137861 - type: f1 value: 69.08020473732226 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fr) type: mteb/amazon_massive_intent config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.31540013449899 - type: f1 value: 70.9426355465791 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (he) type: mteb/amazon_massive_intent config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.2151983860121 - type: f1 value: 67.52541755908858 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hi) type: mteb/amazon_massive_intent config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.58372562205784 - type: f1 value: 69.49769064229827 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hu) type: mteb/amazon_massive_intent config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.9233355749832 - type: f1 value: 69.36311548259593 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hy) type: mteb/amazon_massive_intent config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.07330195023538 - type: f1 value: 64.99882022345572 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (id) type: mteb/amazon_massive_intent config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.62273032952253 - type: f1 value: 70.6394885471001 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (is) type: mteb/amazon_massive_intent config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.77000672494957 - type: f1 value: 62.9368944815065 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (it) type: mteb/amazon_massive_intent config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.453261600538 - type: f1 value: 70.85069934666681 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ja) type: mteb/amazon_massive_intent config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.6906523201076 - type: f1 value: 72.03249740074217 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (jv) type: mteb/amazon_massive_intent config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.03631472763953 - type: f1 value: 59.3165215571852 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ka) type: mteb/amazon_massive_intent config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.913920645595155 - type: f1 value: 57.367337711611285 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (km) type: mteb/amazon_massive_intent config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.42837928715535 - type: f1 value: 52.60527294970906 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (kn) type: mteb/amazon_massive_intent config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.33490248823135 - type: f1 value: 63.213340969404065 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ko) type: mteb/amazon_massive_intent config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.58507061197041 - type: f1 value: 68.40256628040486 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (lv) type: mteb/amazon_massive_intent config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.11230665770006 - type: f1 value: 66.44863577842305 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ml) type: mteb/amazon_massive_intent config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.70073974445192 - type: f1 value: 67.21291337273702 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (mn) type: mteb/amazon_massive_intent config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.43913920645595 - type: f1 value: 64.09838087422806 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ms) type: mteb/amazon_massive_intent config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.80026899798251 - type: f1 value: 68.76986742962444 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (my) type: mteb/amazon_massive_intent config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.78816408876934 - type: f1 value: 62.18781873428972 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nb) type: mteb/amazon_massive_intent config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.6577000672495 - type: f1 value: 68.75171511133003 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nl) type: mteb/amazon_massive_intent config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.42501681237391 - type: f1 value: 71.18434963451544 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pl) type: mteb/amazon_massive_intent config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.64828513786146 - type: f1 value: 70.67741914007422 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pt) type: mteb/amazon_massive_intent config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.62811028917284 - type: f1 value: 71.36402039740959 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ro) type: mteb/amazon_massive_intent config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.88634835238736 - type: f1 value: 69.23701923480677 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ru) type: mteb/amazon_massive_intent config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.15938130464022 - type: f1 value: 71.87792218993388 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sl) type: mteb/amazon_massive_intent config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.96301277740416 - type: f1 value: 67.29584200202983 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sq) type: mteb/amazon_massive_intent config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.49562878278412 - type: f1 value: 66.91716685679431 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sv) type: mteb/amazon_massive_intent config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 74.6805648957633 - type: f1 value: 72.02723592594374 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sw) type: mteb/amazon_massive_intent config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.00605245460659 - type: f1 value: 60.16716669482932 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ta) type: mteb/amazon_massive_intent config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.90988567585742 - type: f1 value: 63.99405488777784 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (te) type: mteb/amazon_massive_intent config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.62273032952253 - type: f1 value: 65.17213906909481 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (th) type: mteb/amazon_massive_intent config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.50907868190988 - type: f1 value: 69.15165697194853 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tl) type: mteb/amazon_massive_intent config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.30733019502352 - type: f1 value: 66.69024007380474 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tr) type: mteb/amazon_massive_intent config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.24277067921989 - type: f1 value: 68.80515408492947 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ur) type: mteb/amazon_massive_intent config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.49831876260929 - type: f1 value: 64.83778567111116 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (vi) type: mteb/amazon_massive_intent config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.28782784129119 - type: f1 value: 69.3294186700733 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.315400134499 - type: f1 value: 71.22674385243207 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-TW) type: mteb/amazon_massive_intent config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.37794216543377 - type: f1 value: 68.96962492838232 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (af) type: mteb/amazon_massive_scenario config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.33557498318764 - type: f1 value: 72.28949738478356 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (am) type: mteb/amazon_massive_scenario config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.84398117014123 - type: f1 value: 64.71026362091463 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ar) type: mteb/amazon_massive_scenario config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.76462676529925 - type: f1 value: 69.8229667407667 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (az) type: mteb/amazon_massive_scenario config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.02420981842636 - type: f1 value: 71.76576384895898 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (bn) type: mteb/amazon_massive_scenario config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.7572293207801 - type: f1 value: 72.76840765295256 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (cy) type: mteb/amazon_massive_scenario config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.02286482851379 - type: f1 value: 66.17237947327872 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (da) type: mteb/amazon_massive_scenario config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.60928043039678 - type: f1 value: 77.27094731234773 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (de) type: mteb/amazon_massive_scenario config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.68325487558843 - type: f1 value: 77.97530399082261 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (el) type: mteb/amazon_massive_scenario config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.13315400134498 - type: f1 value: 75.97558584796424 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 80.47410894418292 - type: f1 value: 80.52244841473792 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (es) type: mteb/amazon_massive_scenario config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.9670477471419 - type: f1 value: 77.37318805793146 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fa) type: mteb/amazon_massive_scenario config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.09683927370544 - type: f1 value: 77.69773737430847 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fi) type: mteb/amazon_massive_scenario config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.20847343644922 - type: f1 value: 75.17071738727348 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fr) type: mteb/amazon_massive_scenario config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.07464694014796 - type: f1 value: 77.16136207698571 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (he) type: mteb/amazon_massive_scenario config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.53396099529255 - type: f1 value: 73.58296404484122 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hi) type: mteb/amazon_massive_scenario config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.75319435104237 - type: f1 value: 75.24674707850833 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hu) type: mteb/amazon_massive_scenario config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.0948217888366 - type: f1 value: 76.47559490205028 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hy) type: mteb/amazon_massive_scenario config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.07599193006052 - type: f1 value: 70.76028043093511 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (id) type: mteb/amazon_massive_scenario config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.10490921318089 - type: f1 value: 77.01215275283272 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (is) type: mteb/amazon_massive_scenario config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.25756556825824 - type: f1 value: 70.20605314648762 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (it) type: mteb/amazon_massive_scenario config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.08137188971082 - type: f1 value: 77.3899269057439 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ja) type: mteb/amazon_massive_scenario config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.35440484196369 - type: f1 value: 79.58964690002772 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (jv) type: mteb/amazon_massive_scenario config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.42299932750504 - type: f1 value: 68.07844356925413 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ka) type: mteb/amazon_massive_scenario config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.15669132481507 - type: f1 value: 65.89383352608513 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (km) type: mteb/amazon_massive_scenario config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.11432414256894 - type: f1 value: 57.69910594559806 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (kn) type: mteb/amazon_massive_scenario config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.24747814391392 - type: f1 value: 70.42455553830918 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ko) type: mteb/amazon_massive_scenario config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.46267652992603 - type: f1 value: 76.8854559308316 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (lv) type: mteb/amazon_massive_scenario config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.24815063887021 - type: f1 value: 72.77805034658074 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ml) type: mteb/amazon_massive_scenario config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.11566913248151 - type: f1 value: 73.86147988001356 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (mn) type: mteb/amazon_massive_scenario config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.0168123739072 - type: f1 value: 69.38515920054571 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ms) type: mteb/amazon_massive_scenario config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.41156691324814 - type: f1 value: 73.43474953408237 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (my) type: mteb/amazon_massive_scenario config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.39609952925353 - type: f1 value: 67.29731681109291 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nb) type: mteb/amazon_massive_scenario config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.20914593140552 - type: f1 value: 77.07066497935367 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nl) type: mteb/amazon_massive_scenario config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.52387357094821 - type: f1 value: 78.5259569473291 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pl) type: mteb/amazon_massive_scenario config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.6913248150639 - type: f1 value: 76.91201656350455 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pt) type: mteb/amazon_massive_scenario config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.1217215870881 - type: f1 value: 77.41179937912504 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ro) type: mteb/amazon_massive_scenario config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.25891055817083 - type: f1 value: 75.8089244542887 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ru) type: mteb/amazon_massive_scenario config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.70679219905851 - type: f1 value: 78.21459594517711 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sl) type: mteb/amazon_massive_scenario config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.83523873570948 - type: f1 value: 74.86847028401978 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sq) type: mteb/amazon_massive_scenario config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.71755211835911 - type: f1 value: 74.0214326485662 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sv) type: mteb/amazon_massive_scenario config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 79.06523201075991 - type: f1 value: 79.10545620325138 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sw) type: mteb/amazon_massive_scenario config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.91862811028918 - type: f1 value: 66.50386121217983 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ta) type: mteb/amazon_massive_scenario config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.93140551445865 - type: f1 value: 70.755435928495 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (te) type: mteb/amazon_massive_scenario config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.40753194351042 - type: f1 value: 71.61816115782923 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (th) type: mteb/amazon_massive_scenario config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.1815736381977 - type: f1 value: 75.08016717887205 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tl) type: mteb/amazon_massive_scenario config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.86482851378614 - type: f1 value: 72.39521180006291 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tr) type: mteb/amazon_massive_scenario config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.46940147948891 - type: f1 value: 76.70044085362349 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ur) type: mteb/amazon_massive_scenario config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.89307330195024 - type: f1 value: 71.5721825332298 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (vi) type: mteb/amazon_massive_scenario config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.7511768661735 - type: f1 value: 75.17918654541515 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.69535978480162 - type: f1 value: 78.90019070153316 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-TW) type: mteb/amazon_massive_scenario config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.45729657027572 - type: f1 value: 76.19578371794672 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 36.92715354123554 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 35.53536244162518 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 33.08507884504006 - type: mrr value: 34.32436977159129 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.935 - type: map_at_10 value: 13.297 - type: map_at_100 value: 16.907 - type: map_at_1000 value: 18.391 - type: map_at_3 value: 9.626999999999999 - type: map_at_5 value: 11.190999999999999 - type: mrr_at_1 value: 46.129999999999995 - type: mrr_at_10 value: 54.346000000000004 - type: mrr_at_100 value: 55.067 - type: mrr_at_1000 value: 55.1 - type: mrr_at_3 value: 51.961 - type: mrr_at_5 value: 53.246 - type: ndcg_at_1 value: 44.118 - type: ndcg_at_10 value: 35.534 - type: ndcg_at_100 value: 32.946999999999996 - type: ndcg_at_1000 value: 41.599000000000004 - type: ndcg_at_3 value: 40.25 - type: ndcg_at_5 value: 37.978 - type: precision_at_1 value: 46.129999999999995 - type: precision_at_10 value: 26.842 - type: precision_at_100 value: 8.427 - type: precision_at_1000 value: 2.128 - type: precision_at_3 value: 37.977 - type: precision_at_5 value: 32.879000000000005 - type: recall_at_1 value: 5.935 - type: recall_at_10 value: 17.211000000000002 - type: recall_at_100 value: 34.33 - type: recall_at_1000 value: 65.551 - type: recall_at_3 value: 10.483 - type: recall_at_5 value: 13.078999999999999 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 35.231 - type: map_at_10 value: 50.202000000000005 - type: map_at_100 value: 51.154999999999994 - type: map_at_1000 value: 51.181 - type: map_at_3 value: 45.774 - type: map_at_5 value: 48.522 - type: mrr_at_1 value: 39.687 - type: mrr_at_10 value: 52.88 - type: mrr_at_100 value: 53.569 - type: mrr_at_1000 value: 53.58500000000001 - type: mrr_at_3 value: 49.228 - type: mrr_at_5 value: 51.525 - type: ndcg_at_1 value: 39.687 - type: ndcg_at_10 value: 57.754000000000005 - type: ndcg_at_100 value: 61.597 - type: ndcg_at_1000 value: 62.18900000000001 - type: ndcg_at_3 value: 49.55 - type: ndcg_at_5 value: 54.11899999999999 - type: precision_at_1 value: 39.687 - type: precision_at_10 value: 9.313 - type: precision_at_100 value: 1.146 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 22.229 - type: precision_at_5 value: 15.939 - type: recall_at_1 value: 35.231 - type: recall_at_10 value: 78.083 - type: recall_at_100 value: 94.42099999999999 - type: recall_at_1000 value: 98.81 - type: recall_at_3 value: 57.047000000000004 - type: recall_at_5 value: 67.637 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.241 - type: map_at_10 value: 85.462 - type: map_at_100 value: 86.083 - type: map_at_1000 value: 86.09700000000001 - type: map_at_3 value: 82.49499999999999 - type: map_at_5 value: 84.392 - type: mrr_at_1 value: 82.09 - type: mrr_at_10 value: 88.301 - type: mrr_at_100 value: 88.383 - type: mrr_at_1000 value: 88.384 - type: mrr_at_3 value: 87.37 - type: mrr_at_5 value: 88.035 - type: ndcg_at_1 value: 82.12 - type: ndcg_at_10 value: 89.149 - type: ndcg_at_100 value: 90.235 - type: ndcg_at_1000 value: 90.307 - type: ndcg_at_3 value: 86.37599999999999 - type: ndcg_at_5 value: 87.964 - type: precision_at_1 value: 82.12 - type: precision_at_10 value: 13.56 - type: precision_at_100 value: 1.539 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.88 - type: precision_at_5 value: 24.92 - type: recall_at_1 value: 71.241 - type: recall_at_10 value: 96.128 - type: recall_at_100 value: 99.696 - type: recall_at_1000 value: 99.994 - type: recall_at_3 value: 88.181 - type: recall_at_5 value: 92.694 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 56.59757799655151 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 64.27391998854624 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.243 - type: map_at_10 value: 10.965 - type: map_at_100 value: 12.934999999999999 - type: map_at_1000 value: 13.256 - type: map_at_3 value: 7.907 - type: map_at_5 value: 9.435 - type: mrr_at_1 value: 20.9 - type: mrr_at_10 value: 31.849 - type: mrr_at_100 value: 32.964 - type: mrr_at_1000 value: 33.024 - type: mrr_at_3 value: 28.517 - type: mrr_at_5 value: 30.381999999999998 - type: ndcg_at_1 value: 20.9 - type: ndcg_at_10 value: 18.723 - type: ndcg_at_100 value: 26.384999999999998 - type: ndcg_at_1000 value: 32.114 - type: ndcg_at_3 value: 17.753 - type: ndcg_at_5 value: 15.558 - type: precision_at_1 value: 20.9 - type: precision_at_10 value: 9.8 - type: precision_at_100 value: 2.078 - type: precision_at_1000 value: 0.345 - type: precision_at_3 value: 16.900000000000002 - type: precision_at_5 value: 13.88 - type: recall_at_1 value: 4.243 - type: recall_at_10 value: 19.885 - type: recall_at_100 value: 42.17 - type: recall_at_1000 value: 70.12 - type: recall_at_3 value: 10.288 - type: recall_at_5 value: 14.072000000000001 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 85.84209174935282 - type: cos_sim_spearman value: 81.73248048438833 - type: euclidean_pearson value: 83.02810070308149 - type: euclidean_spearman value: 81.73248295679514 - type: manhattan_pearson value: 82.95368060376002 - type: manhattan_spearman value: 81.60277910998718 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 88.52628804556943 - type: cos_sim_spearman value: 82.5713913555672 - type: euclidean_pearson value: 85.8796774746988 - type: euclidean_spearman value: 82.57137506803424 - type: manhattan_pearson value: 85.79671002960058 - type: manhattan_spearman value: 82.49445981618027 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 86.23682503505542 - type: cos_sim_spearman value: 87.15008956711806 - type: euclidean_pearson value: 86.79805401524959 - type: euclidean_spearman value: 87.15008956711806 - type: manhattan_pearson value: 86.65298502699244 - type: manhattan_spearman value: 86.97677821948562 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 85.63370304677802 - type: cos_sim_spearman value: 84.97105553540318 - type: euclidean_pearson value: 85.28896108687721 - type: euclidean_spearman value: 84.97105553540318 - type: manhattan_pearson value: 85.09663190337331 - type: manhattan_spearman value: 84.79126831644619 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 90.2614838800733 - type: cos_sim_spearman value: 91.0509162991835 - type: euclidean_pearson value: 90.33098317533373 - type: euclidean_spearman value: 91.05091625871644 - type: manhattan_pearson value: 90.26250435151107 - type: manhattan_spearman value: 90.97999594417519 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 85.80480973335091 - type: cos_sim_spearman value: 87.313695492969 - type: euclidean_pearson value: 86.49267251576939 - type: euclidean_spearman value: 87.313695492969 - type: manhattan_pearson value: 86.44019901831935 - type: manhattan_spearman value: 87.24205395460392 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 90.05662789380672 - type: cos_sim_spearman value: 90.02759424426651 - type: euclidean_pearson value: 90.4042483422981 - type: euclidean_spearman value: 90.02759424426651 - type: manhattan_pearson value: 90.51446975000226 - type: manhattan_spearman value: 90.08832889933616 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.5975528273532 - type: cos_sim_spearman value: 67.62969861411354 - type: euclidean_pearson value: 69.224275734323 - type: euclidean_spearman value: 67.62969861411354 - type: manhattan_pearson value: 69.3761447059927 - type: manhattan_spearman value: 67.90921005611467 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 87.11244327231684 - type: cos_sim_spearman value: 88.37902438979035 - type: euclidean_pearson value: 87.86054279847336 - type: euclidean_spearman value: 88.37902438979035 - type: manhattan_pearson value: 87.77257757320378 - type: manhattan_spearman value: 88.25208966098123 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 85.87174608143563 - type: mrr value: 96.12836872640794 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 57.760999999999996 - type: map_at_10 value: 67.258 - type: map_at_100 value: 67.757 - type: map_at_1000 value: 67.78800000000001 - type: map_at_3 value: 64.602 - type: map_at_5 value: 65.64 - type: mrr_at_1 value: 60.667 - type: mrr_at_10 value: 68.441 - type: mrr_at_100 value: 68.825 - type: mrr_at_1000 value: 68.853 - type: mrr_at_3 value: 66.444 - type: mrr_at_5 value: 67.26100000000001 - type: ndcg_at_1 value: 60.667 - type: ndcg_at_10 value: 71.852 - type: ndcg_at_100 value: 73.9 - type: ndcg_at_1000 value: 74.628 - type: ndcg_at_3 value: 67.093 - type: ndcg_at_5 value: 68.58 - type: precision_at_1 value: 60.667 - type: precision_at_10 value: 9.6 - type: precision_at_100 value: 1.0670000000000002 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 26.111 - type: precision_at_5 value: 16.733 - type: recall_at_1 value: 57.760999999999996 - type: recall_at_10 value: 84.967 - type: recall_at_100 value: 93.833 - type: recall_at_1000 value: 99.333 - type: recall_at_3 value: 71.589 - type: recall_at_5 value: 75.483 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.66633663366336 - type: cos_sim_ap value: 91.17685358899108 - type: cos_sim_f1 value: 82.16818642350559 - type: cos_sim_precision value: 83.26488706365504 - type: cos_sim_recall value: 81.10000000000001 - type: dot_accuracy value: 99.66633663366336 - type: dot_ap value: 91.17663411119032 - type: dot_f1 value: 82.16818642350559 - type: dot_precision value: 83.26488706365504 - type: dot_recall value: 81.10000000000001 - type: euclidean_accuracy value: 99.66633663366336 - type: euclidean_ap value: 91.17685189882275 - type: euclidean_f1 value: 82.16818642350559 - type: euclidean_precision value: 83.26488706365504 - type: euclidean_recall value: 81.10000000000001 - type: manhattan_accuracy value: 99.66633663366336 - type: manhattan_ap value: 91.2241619496737 - type: manhattan_f1 value: 82.20472440944883 - type: manhattan_precision value: 86.51933701657458 - type: manhattan_recall value: 78.3 - type: max_accuracy value: 99.66633663366336 - type: max_ap value: 91.2241619496737 - type: max_f1 value: 82.20472440944883 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 66.85101268897951 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 42.461184054706905 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 51.44542568873886 - type: mrr value: 52.33656151854681 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.75982974997539 - type: cos_sim_spearman value: 30.385405026539914 - type: dot_pearson value: 30.75982433546523 - type: dot_spearman value: 30.385405026539914 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.22799999999999998 - type: map_at_10 value: 2.064 - type: map_at_100 value: 13.056000000000001 - type: map_at_1000 value: 31.747999999999998 - type: map_at_3 value: 0.67 - type: map_at_5 value: 1.097 - type: mrr_at_1 value: 90.0 - type: mrr_at_10 value: 94.667 - type: mrr_at_100 value: 94.667 - type: mrr_at_1000 value: 94.667 - type: mrr_at_3 value: 94.667 - type: mrr_at_5 value: 94.667 - type: ndcg_at_1 value: 86.0 - type: ndcg_at_10 value: 82.0 - type: ndcg_at_100 value: 64.307 - type: ndcg_at_1000 value: 57.023999999999994 - type: ndcg_at_3 value: 85.816 - type: ndcg_at_5 value: 84.904 - type: precision_at_1 value: 90.0 - type: precision_at_10 value: 85.8 - type: precision_at_100 value: 66.46 - type: precision_at_1000 value: 25.202 - type: precision_at_3 value: 90.0 - type: precision_at_5 value: 89.2 - type: recall_at_1 value: 0.22799999999999998 - type: recall_at_10 value: 2.235 - type: recall_at_100 value: 16.185 - type: recall_at_1000 value: 53.620999999999995 - type: recall_at_3 value: 0.7040000000000001 - type: recall_at_5 value: 1.172 - task: type: BitextMining dataset: name: MTEB Tatoeba (sqi-eng) type: mteb/tatoeba-bitext-mining config: sqi-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.39999999999999 - type: f1 value: 96.75 - type: precision value: 96.45 - type: recall value: 97.39999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (fry-eng) type: mteb/tatoeba-bitext-mining config: fry-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.54913294797689 - type: f1 value: 82.46628131021194 - type: precision value: 81.1175337186898 - type: recall value: 85.54913294797689 - task: type: BitextMining dataset: name: MTEB Tatoeba (kur-eng) type: mteb/tatoeba-bitext-mining config: kur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.21951219512195 - type: f1 value: 77.33333333333334 - type: precision value: 75.54878048780488 - type: recall value: 81.21951219512195 - task: type: BitextMining dataset: name: MTEB Tatoeba (tur-eng) type: mteb/tatoeba-bitext-mining config: tur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 98.6 - type: f1 value: 98.26666666666665 - type: precision value: 98.1 - type: recall value: 98.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (deu-eng) type: mteb/tatoeba-bitext-mining config: deu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 99.5 - type: f1 value: 99.33333333333333 - type: precision value: 99.25 - type: recall value: 99.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (nld-eng) type: mteb/tatoeba-bitext-mining config: nld-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.8 - type: f1 value: 97.2 - type: precision value: 96.89999999999999 - type: recall value: 97.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (ron-eng) type: mteb/tatoeba-bitext-mining config: ron-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.8 - type: f1 value: 97.18333333333334 - type: precision value: 96.88333333333333 - type: recall value: 97.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (ang-eng) type: mteb/tatoeba-bitext-mining config: ang-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.61194029850746 - type: f1 value: 72.81094527363183 - type: precision value: 70.83333333333333 - type: recall value: 77.61194029850746 - task: type: BitextMining dataset: name: MTEB Tatoeba (ido-eng) type: mteb/tatoeba-bitext-mining config: ido-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.7 - type: f1 value: 91.91666666666667 - type: precision value: 91.08333333333334 - type: recall value: 93.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (jav-eng) type: mteb/tatoeba-bitext-mining config: jav-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.29268292682927 - type: f1 value: 85.27642276422765 - type: precision value: 84.01277584204414 - type: recall value: 88.29268292682927 - task: type: BitextMining dataset: name: MTEB Tatoeba (isl-eng) type: mteb/tatoeba-bitext-mining config: isl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.1 - type: f1 value: 95.0 - type: precision value: 94.46666666666668 - type: recall value: 96.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (slv-eng) type: mteb/tatoeba-bitext-mining config: slv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.681652490887 - type: f1 value: 91.90765492102065 - type: precision value: 91.05913325232888 - type: recall value: 93.681652490887 - task: type: BitextMining dataset: name: MTEB Tatoeba (cym-eng) type: mteb/tatoeba-bitext-mining config: cym-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.17391304347827 - type: f1 value: 89.97101449275361 - type: precision value: 88.96811594202899 - type: recall value: 92.17391304347827 - task: type: BitextMining dataset: name: MTEB Tatoeba (kaz-eng) type: mteb/tatoeba-bitext-mining config: kaz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.43478260869566 - type: f1 value: 87.72173913043478 - type: precision value: 86.42028985507245 - type: recall value: 90.43478260869566 - task: type: BitextMining dataset: name: MTEB Tatoeba (est-eng) type: mteb/tatoeba-bitext-mining config: est-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.4 - type: f1 value: 88.03 - type: precision value: 86.95 - type: recall value: 90.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (heb-eng) type: mteb/tatoeba-bitext-mining config: heb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.4 - type: f1 value: 91.45666666666666 - type: precision value: 90.525 - type: recall value: 93.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (gla-eng) type: mteb/tatoeba-bitext-mining config: gla-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.9059107358263 - type: f1 value: 78.32557872364869 - type: precision value: 76.78260286824823 - type: recall value: 81.9059107358263 - task: type: BitextMining dataset: name: MTEB Tatoeba (mar-eng) type: mteb/tatoeba-bitext-mining config: mar-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.3 - type: f1 value: 92.58333333333333 - type: precision value: 91.73333333333332 - type: recall value: 94.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (lat-eng) type: mteb/tatoeba-bitext-mining config: lat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.10000000000001 - type: f1 value: 74.50500000000001 - type: precision value: 72.58928571428571 - type: recall value: 79.10000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (bel-eng) type: mteb/tatoeba-bitext-mining config: bel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.6 - type: f1 value: 95.55 - type: precision value: 95.05 - type: recall value: 96.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (pms-eng) type: mteb/tatoeba-bitext-mining config: pms-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.0952380952381 - type: f1 value: 77.98458049886621 - type: precision value: 76.1968253968254 - type: recall value: 82.0952380952381 - task: type: BitextMining dataset: name: MTEB Tatoeba (gle-eng) type: mteb/tatoeba-bitext-mining config: gle-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.9 - type: f1 value: 84.99190476190476 - type: precision value: 83.65 - type: recall value: 87.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (pes-eng) type: mteb/tatoeba-bitext-mining config: pes-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.7 - type: f1 value: 94.56666666666666 - type: precision value: 94.01666666666667 - type: recall value: 95.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (nob-eng) type: mteb/tatoeba-bitext-mining config: nob-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 98.6 - type: f1 value: 98.2 - type: precision value: 98.0 - type: recall value: 98.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (bul-eng) type: mteb/tatoeba-bitext-mining config: bul-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.6 - type: f1 value: 94.38333333333334 - type: precision value: 93.78333333333335 - type: recall value: 95.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (cbk-eng) type: mteb/tatoeba-bitext-mining config: cbk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.4 - type: f1 value: 84.10380952380952 - type: precision value: 82.67 - type: recall value: 87.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (hun-eng) type: mteb/tatoeba-bitext-mining config: hun-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.5 - type: f1 value: 94.33333333333334 - type: precision value: 93.78333333333333 - type: recall value: 95.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (uig-eng) type: mteb/tatoeba-bitext-mining config: uig-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.4 - type: f1 value: 86.82000000000001 - type: precision value: 85.64500000000001 - type: recall value: 89.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (rus-eng) type: mteb/tatoeba-bitext-mining config: rus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.1 - type: f1 value: 93.56666666666668 - type: precision value: 92.81666666666666 - type: recall value: 95.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (spa-eng) type: mteb/tatoeba-bitext-mining config: spa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 98.9 - type: f1 value: 98.6 - type: precision value: 98.45 - type: recall value: 98.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (hye-eng) type: mteb/tatoeba-bitext-mining config: hye-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.01347708894879 - type: f1 value: 93.51752021563343 - type: precision value: 92.82794249775381 - type: recall value: 95.01347708894879 - task: type: BitextMining dataset: name: MTEB Tatoeba (tel-eng) type: mteb/tatoeba-bitext-mining config: tel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.00854700854701 - type: f1 value: 96.08262108262107 - type: precision value: 95.65527065527067 - type: recall value: 97.00854700854701 - task: type: BitextMining dataset: name: MTEB Tatoeba (afr-eng) type: mteb/tatoeba-bitext-mining config: afr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.5 - type: f1 value: 95.39999999999999 - type: precision value: 94.88333333333333 - type: recall value: 96.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (mon-eng) type: mteb/tatoeba-bitext-mining config: mon-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.5909090909091 - type: f1 value: 95.49242424242425 - type: precision value: 94.9621212121212 - type: recall value: 96.5909090909091 - task: type: BitextMining dataset: name: MTEB Tatoeba (arz-eng) type: mteb/tatoeba-bitext-mining config: arz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.90566037735849 - type: f1 value: 81.85883997204752 - type: precision value: 80.54507337526205 - type: recall value: 84.90566037735849 - task: type: BitextMining dataset: name: MTEB Tatoeba (hrv-eng) type: mteb/tatoeba-bitext-mining config: hrv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.5 - type: f1 value: 96.75 - type: precision value: 96.38333333333333 - type: recall value: 97.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (nov-eng) type: mteb/tatoeba-bitext-mining config: nov-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.7704280155642 - type: f1 value: 82.99610894941635 - type: precision value: 81.32295719844358 - type: recall value: 86.7704280155642 - task: type: BitextMining dataset: name: MTEB Tatoeba (gsw-eng) type: mteb/tatoeba-bitext-mining config: gsw-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 67.52136752136752 - type: f1 value: 61.89662189662191 - type: precision value: 59.68660968660969 - type: recall value: 67.52136752136752 - task: type: BitextMining dataset: name: MTEB Tatoeba (nds-eng) type: mteb/tatoeba-bitext-mining config: nds-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.2 - type: f1 value: 86.32 - type: precision value: 85.015 - type: recall value: 89.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (ukr-eng) type: mteb/tatoeba-bitext-mining config: ukr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.0 - type: f1 value: 94.78333333333333 - type: precision value: 94.18333333333334 - type: recall value: 96.0 - task: type: BitextMining dataset: name: MTEB Tatoeba (uzb-eng) type: mteb/tatoeba-bitext-mining config: uzb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 83.8785046728972 - type: f1 value: 80.54517133956385 - type: precision value: 79.154984423676 - type: recall value: 83.8785046728972 - task: type: BitextMining dataset: name: MTEB Tatoeba (lit-eng) type: mteb/tatoeba-bitext-mining config: lit-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.60000000000001 - type: f1 value: 92.01333333333334 - type: precision value: 91.28333333333333 - type: recall value: 93.60000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (ina-eng) type: mteb/tatoeba-bitext-mining config: ina-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.1 - type: f1 value: 96.26666666666667 - type: precision value: 95.85000000000001 - type: recall value: 97.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (lfn-eng) type: mteb/tatoeba-bitext-mining config: lfn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.3 - type: f1 value: 80.67833333333333 - type: precision value: 79.03928571428571 - type: recall value: 84.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (zsm-eng) type: mteb/tatoeba-bitext-mining config: zsm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.3 - type: f1 value: 96.48333333333332 - type: precision value: 96.08333333333331 - type: recall value: 97.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (ita-eng) type: mteb/tatoeba-bitext-mining config: ita-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.7 - type: f1 value: 94.66666666666667 - type: precision value: 94.16666666666667 - type: recall value: 95.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (cmn-eng) type: mteb/tatoeba-bitext-mining config: cmn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.2 - type: f1 value: 96.36666666666667 - type: precision value: 95.96666666666668 - type: recall value: 97.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (lvs-eng) type: mteb/tatoeba-bitext-mining config: lvs-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.3 - type: f1 value: 92.80666666666667 - type: precision value: 92.12833333333333 - type: recall value: 94.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (glg-eng) type: mteb/tatoeba-bitext-mining config: glg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.0 - type: f1 value: 96.22333333333334 - type: precision value: 95.875 - type: recall value: 97.0 - task: type: BitextMining dataset: name: MTEB Tatoeba (ceb-eng) type: mteb/tatoeba-bitext-mining config: ceb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74.33333333333333 - type: f1 value: 70.78174603174602 - type: precision value: 69.28333333333332 - type: recall value: 74.33333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (bre-eng) type: mteb/tatoeba-bitext-mining config: bre-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 37.6 - type: f1 value: 32.938348952090365 - type: precision value: 31.2811038961039 - type: recall value: 37.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (ben-eng) type: mteb/tatoeba-bitext-mining config: ben-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.5 - type: f1 value: 89.13333333333333 - type: precision value: 88.03333333333333 - type: recall value: 91.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (swg-eng) type: mteb/tatoeba-bitext-mining config: swg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.14285714285714 - type: f1 value: 77.67857142857143 - type: precision value: 75.59523809523809 - type: recall value: 82.14285714285714 - task: type: BitextMining dataset: name: MTEB Tatoeba (arq-eng) type: mteb/tatoeba-bitext-mining config: arq-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.0450054884742 - type: f1 value: 63.070409283362075 - type: precision value: 60.58992781824835 - type: recall value: 69.0450054884742 - task: type: BitextMining dataset: name: MTEB Tatoeba (kab-eng) type: mteb/tatoeba-bitext-mining config: kab-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 63.1 - type: f1 value: 57.848333333333336 - type: precision value: 55.69500000000001 - type: recall value: 63.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (fra-eng) type: mteb/tatoeba-bitext-mining config: fra-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.1 - type: f1 value: 95.01666666666667 - type: precision value: 94.5 - type: recall value: 96.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (por-eng) type: mteb/tatoeba-bitext-mining config: por-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.89999999999999 - type: f1 value: 94.90666666666667 - type: precision value: 94.425 - type: recall value: 95.89999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (tat-eng) type: mteb/tatoeba-bitext-mining config: tat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.6 - type: f1 value: 84.61333333333333 - type: precision value: 83.27 - type: recall value: 87.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (oci-eng) type: mteb/tatoeba-bitext-mining config: oci-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 76.4 - type: f1 value: 71.90746031746032 - type: precision value: 70.07027777777778 - type: recall value: 76.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (pol-eng) type: mteb/tatoeba-bitext-mining config: pol-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.89999999999999 - type: f1 value: 97.26666666666667 - type: precision value: 96.95 - type: recall value: 97.89999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (war-eng) type: mteb/tatoeba-bitext-mining config: war-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.8 - type: f1 value: 74.39555555555555 - type: precision value: 72.59416666666667 - type: recall value: 78.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (aze-eng) type: mteb/tatoeba-bitext-mining config: aze-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.19999999999999 - type: f1 value: 93.78999999999999 - type: precision value: 93.125 - type: recall value: 95.19999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (vie-eng) type: mteb/tatoeba-bitext-mining config: vie-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.8 - type: f1 value: 97.1 - type: precision value: 96.75 - type: recall value: 97.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (nno-eng) type: mteb/tatoeba-bitext-mining config: nno-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.6 - type: f1 value: 94.25666666666666 - type: precision value: 93.64166666666668 - type: recall value: 95.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (cha-eng) type: mteb/tatoeba-bitext-mining config: cha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 56.934306569343065 - type: f1 value: 51.461591936044485 - type: precision value: 49.37434827945776 - type: recall value: 56.934306569343065 - task: type: BitextMining dataset: name: MTEB Tatoeba (mhr-eng) type: mteb/tatoeba-bitext-mining config: mhr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 20.200000000000003 - type: f1 value: 16.91799284049284 - type: precision value: 15.791855158730158 - type: recall value: 20.200000000000003 - task: type: BitextMining dataset: name: MTEB Tatoeba (dan-eng) type: mteb/tatoeba-bitext-mining config: dan-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.2 - type: f1 value: 95.3 - type: precision value: 94.85 - type: recall value: 96.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (ell-eng) type: mteb/tatoeba-bitext-mining config: ell-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.3 - type: f1 value: 95.11666666666667 - type: precision value: 94.53333333333333 - type: recall value: 96.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (amh-eng) type: mteb/tatoeba-bitext-mining config: amh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.88095238095238 - type: f1 value: 87.14285714285714 - type: precision value: 85.96230158730161 - type: recall value: 89.88095238095238 - task: type: BitextMining dataset: name: MTEB Tatoeba (pam-eng) type: mteb/tatoeba-bitext-mining config: pam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 24.099999999999998 - type: f1 value: 19.630969083349783 - type: precision value: 18.275094905094907 - type: recall value: 24.099999999999998 - task: type: BitextMining dataset: name: MTEB Tatoeba (hsb-eng) type: mteb/tatoeba-bitext-mining config: hsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 83.4368530020704 - type: f1 value: 79.45183870649709 - type: precision value: 77.7432712215321 - type: recall value: 83.4368530020704 - task: type: BitextMining dataset: name: MTEB Tatoeba (srp-eng) type: mteb/tatoeba-bitext-mining config: srp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.8 - type: f1 value: 94.53333333333333 - type: precision value: 93.91666666666666 - type: recall value: 95.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (epo-eng) type: mteb/tatoeba-bitext-mining config: epo-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 98.8 - type: f1 value: 98.48333333333332 - type: precision value: 98.33333333333334 - type: recall value: 98.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (kzj-eng) type: mteb/tatoeba-bitext-mining config: kzj-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 17.5 - type: f1 value: 14.979285714285714 - type: precision value: 14.23235060690943 - type: recall value: 17.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (awa-eng) type: mteb/tatoeba-bitext-mining config: awa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.93939393939394 - type: f1 value: 91.991341991342 - type: precision value: 91.05339105339105 - type: recall value: 93.93939393939394 - task: type: BitextMining dataset: name: MTEB Tatoeba (fao-eng) type: mteb/tatoeba-bitext-mining config: fao-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.31297709923665 - type: f1 value: 86.76844783715012 - type: precision value: 85.63613231552164 - type: recall value: 89.31297709923665 - task: type: BitextMining dataset: name: MTEB Tatoeba (mal-eng) type: mteb/tatoeba-bitext-mining config: mal-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 99.12663755458514 - type: f1 value: 98.93255701115964 - type: precision value: 98.83551673944687 - type: recall value: 99.12663755458514 - task: type: BitextMining dataset: name: MTEB Tatoeba (ile-eng) type: mteb/tatoeba-bitext-mining config: ile-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.0 - type: f1 value: 89.77999999999999 - type: precision value: 88.78333333333333 - type: recall value: 92.0 - task: type: BitextMining dataset: name: MTEB Tatoeba (bos-eng) type: mteb/tatoeba-bitext-mining config: bos-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.89265536723164 - type: f1 value: 95.85687382297553 - type: precision value: 95.33898305084746 - type: recall value: 96.89265536723164 - task: type: BitextMining dataset: name: MTEB Tatoeba (cor-eng) type: mteb/tatoeba-bitext-mining config: cor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 14.6 - type: f1 value: 11.820611790170615 - type: precision value: 11.022616224355355 - type: recall value: 14.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (cat-eng) type: mteb/tatoeba-bitext-mining config: cat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.89999999999999 - type: f1 value: 94.93333333333334 - type: precision value: 94.48666666666666 - type: recall value: 95.89999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (eus-eng) type: mteb/tatoeba-bitext-mining config: eus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.6 - type: f1 value: 84.72333333333334 - type: precision value: 83.44166666666666 - type: recall value: 87.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (yue-eng) type: mteb/tatoeba-bitext-mining config: yue-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.8 - type: f1 value: 93.47333333333333 - type: precision value: 92.875 - type: recall value: 94.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (swe-eng) type: mteb/tatoeba-bitext-mining config: swe-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.6 - type: f1 value: 95.71666666666665 - type: precision value: 95.28333333333335 - type: recall value: 96.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (dtp-eng) type: mteb/tatoeba-bitext-mining config: dtp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 17.8 - type: f1 value: 14.511074040901628 - type: precision value: 13.503791000666002 - type: recall value: 17.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (kat-eng) type: mteb/tatoeba-bitext-mining config: kat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.10187667560321 - type: f1 value: 92.46648793565683 - type: precision value: 91.71134941912423 - type: recall value: 94.10187667560321 - task: type: BitextMining dataset: name: MTEB Tatoeba (jpn-eng) type: mteb/tatoeba-bitext-mining config: jpn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.0 - type: f1 value: 96.11666666666666 - type: precision value: 95.68333333333334 - type: recall value: 97.0 - task: type: BitextMining dataset: name: MTEB Tatoeba (csb-eng) type: mteb/tatoeba-bitext-mining config: csb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 72.72727272727273 - type: f1 value: 66.58949745906267 - type: precision value: 63.86693017127799 - type: recall value: 72.72727272727273 - task: type: BitextMining dataset: name: MTEB Tatoeba (xho-eng) type: mteb/tatoeba-bitext-mining config: xho-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.14084507042254 - type: f1 value: 88.26291079812206 - type: precision value: 87.32394366197182 - type: recall value: 90.14084507042254 - task: type: BitextMining dataset: name: MTEB Tatoeba (orv-eng) type: mteb/tatoeba-bitext-mining config: orv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 64.67065868263472 - type: f1 value: 58.2876627696987 - type: precision value: 55.79255774165953 - type: recall value: 64.67065868263472 - task: type: BitextMining dataset: name: MTEB Tatoeba (ind-eng) type: mteb/tatoeba-bitext-mining config: ind-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.6 - type: f1 value: 94.41666666666667 - type: precision value: 93.85 - type: recall value: 95.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (tuk-eng) type: mteb/tatoeba-bitext-mining config: tuk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 55.172413793103445 - type: f1 value: 49.63992493549144 - type: precision value: 47.71405113769646 - type: recall value: 55.172413793103445 - task: type: BitextMining dataset: name: MTEB Tatoeba (max-eng) type: mteb/tatoeba-bitext-mining config: max-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.46478873239437 - type: f1 value: 73.4417616811983 - type: precision value: 71.91607981220658 - type: recall value: 77.46478873239437 - task: type: BitextMining dataset: name: MTEB Tatoeba (swh-eng) type: mteb/tatoeba-bitext-mining config: swh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.61538461538461 - type: f1 value: 80.91452991452994 - type: precision value: 79.33760683760683 - type: recall value: 84.61538461538461 - task: type: BitextMining dataset: name: MTEB Tatoeba (hin-eng) type: mteb/tatoeba-bitext-mining config: hin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 98.2 - type: f1 value: 97.6 - type: precision value: 97.3 - type: recall value: 98.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (dsb-eng) type: mteb/tatoeba-bitext-mining config: dsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.5741127348643 - type: f1 value: 72.00417536534445 - type: precision value: 70.53467872883321 - type: recall value: 75.5741127348643 - task: type: BitextMining dataset: name: MTEB Tatoeba (ber-eng) type: mteb/tatoeba-bitext-mining config: ber-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 62.2 - type: f1 value: 55.577460317460314 - type: precision value: 52.98583333333333 - type: recall value: 62.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (tam-eng) type: mteb/tatoeba-bitext-mining config: tam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.18241042345277 - type: f1 value: 90.6468124709167 - type: precision value: 89.95656894679696 - type: recall value: 92.18241042345277 - task: type: BitextMining dataset: name: MTEB Tatoeba (slk-eng) type: mteb/tatoeba-bitext-mining config: slk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.1 - type: f1 value: 95.13333333333333 - type: precision value: 94.66666666666667 - type: recall value: 96.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (tgl-eng) type: mteb/tatoeba-bitext-mining config: tgl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.8 - type: f1 value: 95.85000000000001 - type: precision value: 95.39999999999999 - type: recall value: 96.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (ast-eng) type: mteb/tatoeba-bitext-mining config: ast-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.1259842519685 - type: f1 value: 89.76377952755905 - type: precision value: 88.71391076115485 - type: recall value: 92.1259842519685 - task: type: BitextMining dataset: name: MTEB Tatoeba (mkd-eng) type: mteb/tatoeba-bitext-mining config: mkd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.1 - type: f1 value: 92.49 - type: precision value: 91.725 - type: recall value: 94.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (khm-eng) type: mteb/tatoeba-bitext-mining config: khm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.5623268698061 - type: f1 value: 73.27364463791058 - type: precision value: 71.51947852086357 - type: recall value: 77.5623268698061 - task: type: BitextMining dataset: name: MTEB Tatoeba (ces-eng) type: mteb/tatoeba-bitext-mining config: ces-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.39999999999999 - type: f1 value: 96.56666666666666 - type: precision value: 96.16666666666667 - type: recall value: 97.39999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (tzl-eng) type: mteb/tatoeba-bitext-mining config: tzl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.34615384615384 - type: f1 value: 61.092032967032964 - type: precision value: 59.27197802197802 - type: recall value: 66.34615384615384 - task: type: BitextMining dataset: name: MTEB Tatoeba (urd-eng) type: mteb/tatoeba-bitext-mining config: urd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.89999999999999 - type: f1 value: 93.41190476190476 - type: precision value: 92.7 - type: recall value: 94.89999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (ara-eng) type: mteb/tatoeba-bitext-mining config: ara-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.10000000000001 - type: f1 value: 91.10000000000001 - type: precision value: 90.13333333333333 - type: recall value: 93.10000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (kor-eng) type: mteb/tatoeba-bitext-mining config: kor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.7 - type: f1 value: 91.97333333333334 - type: precision value: 91.14166666666667 - type: recall value: 93.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (yid-eng) type: mteb/tatoeba-bitext-mining config: yid-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.21698113207547 - type: f1 value: 90.3796046720575 - type: precision value: 89.56367924528303 - type: recall value: 92.21698113207547 - task: type: BitextMining dataset: name: MTEB Tatoeba (fin-eng) type: mteb/tatoeba-bitext-mining config: fin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.6 - type: f1 value: 96.91666666666667 - type: precision value: 96.6 - type: recall value: 97.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (tha-eng) type: mteb/tatoeba-bitext-mining config: tha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.44525547445255 - type: f1 value: 96.71532846715328 - type: precision value: 96.35036496350365 - type: recall value: 97.44525547445255 - task: type: BitextMining dataset: name: MTEB Tatoeba (wuu-eng) type: mteb/tatoeba-bitext-mining config: wuu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.1 - type: f1 value: 92.34000000000002 - type: precision value: 91.49166666666667 - type: recall value: 94.1 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 3.2910000000000004 - type: map_at_10 value: 10.373000000000001 - type: map_at_100 value: 15.612 - type: map_at_1000 value: 17.06 - type: map_at_3 value: 6.119 - type: map_at_5 value: 7.917000000000001 - type: mrr_at_1 value: 44.897999999999996 - type: mrr_at_10 value: 56.054 - type: mrr_at_100 value: 56.82000000000001 - type: mrr_at_1000 value: 56.82000000000001 - type: mrr_at_3 value: 52.381 - type: mrr_at_5 value: 53.81 - type: ndcg_at_1 value: 42.857 - type: ndcg_at_10 value: 27.249000000000002 - type: ndcg_at_100 value: 36.529 - type: ndcg_at_1000 value: 48.136 - type: ndcg_at_3 value: 33.938 - type: ndcg_at_5 value: 29.951 - type: precision_at_1 value: 44.897999999999996 - type: precision_at_10 value: 22.653000000000002 - type: precision_at_100 value: 7.000000000000001 - type: precision_at_1000 value: 1.48 - type: precision_at_3 value: 32.653 - type: precision_at_5 value: 27.755000000000003 - type: recall_at_1 value: 3.2910000000000004 - type: recall_at_10 value: 16.16 - type: recall_at_100 value: 43.908 - type: recall_at_1000 value: 79.823 - type: recall_at_3 value: 7.156 - type: recall_at_5 value: 10.204 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.05879999999999 - type: ap value: 14.609748142799111 - type: f1 value: 54.878956295843096 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 64.61799660441426 - type: f1 value: 64.8698191961434 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.32860036611885 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 88.34714192048638 - type: cos_sim_ap value: 80.26732975975634 - type: cos_sim_f1 value: 73.53415148134374 - type: cos_sim_precision value: 69.34767360299276 - type: cos_sim_recall value: 78.25857519788919 - type: dot_accuracy value: 88.34714192048638 - type: dot_ap value: 80.26733698491206 - type: dot_f1 value: 73.53415148134374 - type: dot_precision value: 69.34767360299276 - type: dot_recall value: 78.25857519788919 - type: euclidean_accuracy value: 88.34714192048638 - type: euclidean_ap value: 80.26734337771738 - type: euclidean_f1 value: 73.53415148134374 - type: euclidean_precision value: 69.34767360299276 - type: euclidean_recall value: 78.25857519788919 - type: manhattan_accuracy value: 88.30541813196639 - type: manhattan_ap value: 80.19415808104145 - type: manhattan_f1 value: 73.55143870713441 - type: manhattan_precision value: 73.25307511122743 - type: manhattan_recall value: 73.85224274406332 - type: max_accuracy value: 88.34714192048638 - type: max_ap value: 80.26734337771738 - type: max_f1 value: 73.55143870713441 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.81061047075717 - type: cos_sim_ap value: 87.11747055081017 - type: cos_sim_f1 value: 80.04355498817256 - type: cos_sim_precision value: 78.1165262000733 - type: cos_sim_recall value: 82.06806282722513 - type: dot_accuracy value: 89.81061047075717 - type: dot_ap value: 87.11746902745236 - type: dot_f1 value: 80.04355498817256 - type: dot_precision value: 78.1165262000733 - type: dot_recall value: 82.06806282722513 - type: euclidean_accuracy value: 89.81061047075717 - type: euclidean_ap value: 87.11746919324248 - type: euclidean_f1 value: 80.04355498817256 - type: euclidean_precision value: 78.1165262000733 - type: euclidean_recall value: 82.06806282722513 - type: manhattan_accuracy value: 89.79508673885202 - type: manhattan_ap value: 87.11074390832218 - type: manhattan_f1 value: 80.13002540726349 - type: manhattan_precision value: 77.83826945412311 - type: manhattan_recall value: 82.56082537727133 - type: max_accuracy value: 89.81061047075717 - type: max_ap value: 87.11747055081017 - type: max_f1 value: 80.13002540726349 --- # yoeven/multilingual-e5-large-instruct-Q5_K_M-GGUF This model was converted to GGUF format from [`intfloat/multilingual-e5-large-instruct`](https://huggingface.co/intfloat/multilingual-e5-large-instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/intfloat/multilingual-e5-large-instruct) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo yoeven/multilingual-e5-large-instruct-Q5_K_M-GGUF --hf-file multilingual-e5-large-instruct-q5_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo yoeven/multilingual-e5-large-instruct-Q5_K_M-GGUF --hf-file multilingual-e5-large-instruct-q5_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo yoeven/multilingual-e5-large-instruct-Q5_K_M-GGUF --hf-file multilingual-e5-large-instruct-q5_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo yoeven/multilingual-e5-large-instruct-Q5_K_M-GGUF --hf-file multilingual-e5-large-instruct-q5_k_m.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
Teradata/gte-multilingual-base
Teradata
sentence-similarity
[ "onnx", "mteb", "multilingual", "sentence-similarity", "teradata", "af", "ar", "az", "be", "bg", "bn", "ca", "ceb", "cs", "cy", "da", "de", "el", "en", "es", "et", "eu", "fa", "fi", "fr", "gl", "gu", "he", "hi", "hr", "ht", "hu", "hy", "id", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ky", "lo", "lt", "lv", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "pa", "pl", "pt", "qu", "ro", "ru", "si", "sk", "sl", "so", "sq", "sr", "sv", "sw", "ta", "te", "th", "tl", "tr", "uk", "ur", "vi", "yo", "zh", "license:apache-2.0", "model-index", "region:us" ]
2025-02-12T16:57:01Z
2025-03-04T09:41:47+00:00
19
0
--- language: - af - ar - az - be - bg - bn - ca - ceb - cs - cy - da - de - el - en - es - et - eu - fa - fi - fr - gl - gu - he - hi - hr - ht - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ky - lo - lt - lv - mk - ml - mn - mr - ms - my - ne - nl - 'no' - pa - pl - pt - qu - ro - ru - si - sk - sl - so - sq - sr - sv - sw - ta - te - th - tl - tr - uk - ur - vi - yo - zh license: apache-2.0 tags: - mteb - multilingual - sentence-similarity - onnx - teradata model-index: - name: gte-multilingual-base (dense) results: - task: type: Clustering dataset: name: MTEB 8TagsClustering type: PL-MTEB/8tags-clustering config: default split: test revision: None metrics: - type: v_measure value: 33.66681726329994 - task: type: STS dataset: name: MTEB AFQMC type: C-MTEB/AFQMC config: default split: validation revision: b44c3b011063adb25877c13823db83bb193913c4 metrics: - type: cos_sim_spearman value: 43.54760696384009 - task: type: STS dataset: name: MTEB ATEC type: C-MTEB/ATEC config: default split: test revision: 0f319b1142f28d00e055a6770f3f726ae9b7d865 metrics: - type: cos_sim_spearman value: 48.91186363417501 - task: type: Classification dataset: name: MTEB AllegroReviews type: PL-MTEB/allegro-reviews config: default split: test revision: None metrics: - type: accuracy value: 41.689860834990064 - task: type: Clustering dataset: name: MTEB AlloProfClusteringP2P type: lyon-nlp/alloprof config: default split: test revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b metrics: - type: v_measure value: 54.20241337977897 - type: v_measure value: 44.34083695608643 - task: type: Reranking dataset: name: MTEB AlloprofReranking type: lyon-nlp/mteb-fr-reranking-alloprof-s2p config: default split: test revision: 666fdacebe0291776e86f29345663dfaf80a0db9 metrics: - type: map value: 64.91495250072002 - task: type: Retrieval dataset: name: MTEB AlloprofRetrieval type: lyon-nlp/alloprof config: default split: test revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b metrics: - type: ndcg_at_10 value: 53.638 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.95522388059702 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 80.717625 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 43.64199999999999 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (de) type: mteb/amazon_reviews_multi config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.108 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (es) type: mteb/amazon_reviews_multi config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.169999999999995 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (fr) type: mteb/amazon_reviews_multi config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 39.56799999999999 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (ja) type: mteb/amazon_reviews_multi config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 35.75000000000001 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 33.342000000000006 - task: type: Retrieval dataset: name: MTEB ArguAna type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: ndcg_at_10 value: 58.231 - task: type: Retrieval dataset: name: MTEB ArguAna-PL type: clarin-knext/arguana-pl config: default split: test revision: 63fc86750af76253e8c760fc9e534bbf24d260a2 metrics: - type: ndcg_at_10 value: 53.166000000000004 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 46.01900557959478 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 41.06626465345723 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 61.87514497610431 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_spearman value: 81.21450112991194 - task: type: STS dataset: name: MTEB BQ type: C-MTEB/BQ config: default split: test revision: e3dda5e115e487b39ec7e618c0c6a29137052a55 metrics: - type: cos_sim_spearman value: 51.71589543397271 - task: type: Retrieval dataset: name: MTEB BSARDRetrieval type: maastrichtlawtech/bsard config: default split: test revision: 5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59 metrics: - type: ndcg_at_10 value: 26.115 - task: type: BitextMining dataset: name: MTEB BUCC (de-en) type: mteb/bucc-bitext-mining config: de-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: f1 value: 98.6169102296451 - task: type: BitextMining dataset: name: MTEB BUCC (fr-en) type: mteb/bucc-bitext-mining config: fr-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: f1 value: 97.89603052314916 - task: type: BitextMining dataset: name: MTEB BUCC (ru-en) type: mteb/bucc-bitext-mining config: ru-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: f1 value: 97.12388869645537 - task: type: BitextMining dataset: name: MTEB BUCC (zh-en) type: mteb/bucc-bitext-mining config: zh-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: f1 value: 98.15692469720906 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 85.36038961038962 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 37.5903826674123 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 34.21474277151329 - task: type: Classification dataset: name: MTEB CBD type: PL-MTEB/cbd config: default split: test revision: None metrics: - type: accuracy value: 62.519999999999996 - task: type: PairClassification dataset: name: MTEB CDSC-E type: PL-MTEB/cdsce-pairclassification config: default split: test revision: None metrics: - type: cos_sim_ap value: 74.90132799162956 - task: type: STS dataset: name: MTEB CDSC-R type: PL-MTEB/cdscr-sts config: default split: test revision: None metrics: - type: cos_sim_spearman value: 90.30727955142524 - task: type: Clustering dataset: name: MTEB CLSClusteringP2P type: C-MTEB/CLSClusteringP2P config: default split: test revision: 4b6227591c6c1a73bc76b1055f3b7f3588e72476 metrics: - type: v_measure value: 37.94850105022274 - task: type: Clustering dataset: name: MTEB CLSClusteringS2S type: C-MTEB/CLSClusteringS2S config: default split: test revision: e458b3f5414b62b7f9f83499ac1f5497ae2e869f metrics: - type: v_measure value: 38.11958675421534 - task: type: Reranking dataset: name: MTEB CMedQAv1 type: C-MTEB/CMedQAv1-reranking config: default split: test revision: 8d7f1e942507dac42dc58017c1a001c3717da7df metrics: - type: map value: 86.10950950485399 - task: type: Reranking dataset: name: MTEB CMedQAv2 type: C-MTEB/CMedQAv2-reranking config: default split: test revision: 23d186750531a14a0357ca22cd92d712fd512ea0 metrics: - type: map value: 87.28038294231966 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: mteb/cqadupstack-android config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: ndcg_at_10 value: 47.099000000000004 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval type: mteb/cqadupstack-english config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: ndcg_at_10 value: 45.973000000000006 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval type: mteb/cqadupstack-gaming config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: ndcg_at_10 value: 55.606 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval type: mteb/cqadupstack-gis config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: ndcg_at_10 value: 36.638 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval type: mteb/cqadupstack-mathematica config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: ndcg_at_10 value: 30.711 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval type: mteb/cqadupstack-physics config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: ndcg_at_10 value: 44.523 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval type: mteb/cqadupstack-programmers config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: ndcg_at_10 value: 37.940000000000005 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: mteb/cqadupstack config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: ndcg_at_10 value: 38.12183333333333 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval type: mteb/cqadupstack-stats config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: ndcg_at_10 value: 32.684000000000005 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval type: mteb/cqadupstack-tex config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: ndcg_at_10 value: 26.735 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval type: mteb/cqadupstack-unix config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: ndcg_at_10 value: 36.933 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval type: mteb/cqadupstack-webmasters config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: ndcg_at_10 value: 33.747 - task: type: Retrieval dataset: name: MTEB CQADupstackWordpressRetrieval type: mteb/cqadupstack-wordpress config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: ndcg_at_10 value: 28.872999999999998 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: ndcg_at_10 value: 34.833 - task: type: Retrieval dataset: name: MTEB CmedqaRetrieval type: C-MTEB/CmedqaRetrieval config: default split: dev revision: cd540c506dae1cf9e9a59c3e06f42030d54e7301 metrics: - type: ndcg_at_10 value: 43.78 - task: type: PairClassification dataset: name: MTEB Cmnli type: C-MTEB/CMNLI config: default split: validation revision: 41bc36f332156f7adc9e38f53777c959b2ae9766 metrics: - type: cos_sim_ap value: 84.00640599186677 - task: type: Retrieval dataset: name: MTEB CovidRetrieval type: C-MTEB/CovidRetrieval config: default split: dev revision: 1271c7809071a13532e05f25fb53511ffce77117 metrics: - type: ndcg_at_10 value: 80.60000000000001 - task: type: Retrieval dataset: name: MTEB DBPedia type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: ndcg_at_10 value: 40.116 - task: type: Retrieval dataset: name: MTEB DBPedia-PL type: clarin-knext/dbpedia-pl config: default split: test revision: 76afe41d9af165cc40999fcaa92312b8b012064a metrics: - type: ndcg_at_10 value: 32.498 - task: type: Retrieval dataset: name: MTEB DuRetrieval type: C-MTEB/DuRetrieval config: default split: dev revision: a1a333e290fe30b10f3f56498e3a0d911a693ced metrics: - type: ndcg_at_10 value: 87.547 - task: type: Retrieval dataset: name: MTEB EcomRetrieval type: C-MTEB/EcomRetrieval config: default split: dev revision: 687de13dc7294d6fd9be10c6945f9e8fec8166b9 metrics: - type: ndcg_at_10 value: 64.85 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 47.949999999999996 - task: type: Retrieval dataset: name: MTEB FEVER type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: ndcg_at_10 value: 92.111 - task: type: Retrieval dataset: name: MTEB FiQA-PL type: clarin-knext/fiqa-pl config: default split: test revision: 2e535829717f8bf9dc829b7f911cc5bbd4e6608e metrics: - type: ndcg_at_10 value: 28.962 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: ndcg_at_10 value: 45.005 - task: type: Clustering dataset: name: MTEB HALClusteringS2S type: lyon-nlp/clustering-hal-s2s config: default split: test revision: e06ebbbb123f8144bef1a5d18796f3dec9ae2915 metrics: - type: v_measure value: 25.133776435657595 - task: type: Retrieval dataset: name: MTEB HotpotQA type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: ndcg_at_10 value: 63.036 - task: type: Retrieval dataset: name: MTEB HotpotQA-PL type: clarin-knext/hotpotqa-pl config: default split: test revision: a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907 metrics: - type: ndcg_at_10 value: 56.904999999999994 - task: type: Classification dataset: name: MTEB IFlyTek type: C-MTEB/IFlyTek-classification config: default split: validation revision: 421605374b29664c5fc098418fe20ada9bd55f8a metrics: - type: accuracy value: 44.59407464409388 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 74.912 - task: type: Classification dataset: name: MTEB JDReview type: C-MTEB/JDReview-classification config: default split: test revision: b7c64bd89eb87f8ded463478346f76731f07bf8b metrics: - type: accuracy value: 79.26829268292683 - task: type: STS dataset: name: MTEB LCQMC type: C-MTEB/LCQMC config: default split: test revision: 17f9b096f80380fce5ed12a9be8be7784b337daf metrics: - type: cos_sim_spearman value: 74.8601229809791 - task: type: Clustering dataset: name: MTEB MLSUMClusteringP2P type: mlsum config: default split: test revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7 metrics: - type: v_measure value: 42.331902754246556 - type: v_measure value: 40.92029335502153 - task: type: Reranking dataset: name: MTEB MMarcoReranking type: C-MTEB/Mmarco-reranking config: default split: dev revision: 8e0c766dbe9e16e1d221116a3f36795fbade07f6 metrics: - type: map value: 32.19266316591337 - task: type: Retrieval dataset: name: MTEB MMarcoRetrieval type: C-MTEB/MMarcoRetrieval config: default split: dev revision: 539bbde593d947e2a124ba72651aafc09eb33fc2 metrics: - type: ndcg_at_10 value: 79.346 - task: type: Retrieval dataset: name: MTEB MSMARCO type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: ndcg_at_10 value: 39.922999999999995 - task: type: Retrieval dataset: name: MTEB MSMARCO-PL type: clarin-knext/msmarco-pl config: default split: test revision: 8634c07806d5cce3a6138e260e59b81760a0a640 metrics: - type: ndcg_at_10 value: 55.620999999999995 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 92.53989968080255 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (de) type: mteb/mtop_domain config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 88.26993519301212 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (es) type: mteb/mtop_domain config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 90.87725150100067 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (fr) type: mteb/mtop_domain config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 87.48512370811149 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (hi) type: mteb/mtop_domain config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.45141627823591 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (th) type: mteb/mtop_domain config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 83.45750452079565 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 72.57637938896488 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (de) type: mteb/mtop_intent config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 63.50803043110736 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (es) type: mteb/mtop_intent config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.6577718478986 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (fr) type: mteb/mtop_intent config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 64.05887879736925 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (hi) type: mteb/mtop_intent config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 65.27070634636071 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (th) type: mteb/mtop_intent config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 63.04520795660037 - task: type: Classification dataset: name: MTEB MasakhaNEWSClassification (fra) type: masakhane/masakhanews config: fra split: test revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 metrics: - type: accuracy value: 80.66350710900474 - task: type: Clustering dataset: name: MTEB MasakhaNEWSClusteringP2P (fra) type: masakhane/masakhanews config: fra split: test revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 metrics: - type: v_measure value: 44.016506455899425 - type: v_measure value: 40.67730129573544 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (af) type: mteb/amazon_massive_intent config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.94552790854068 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (am) type: mteb/amazon_massive_intent config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 49.273705447209146 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ar) type: mteb/amazon_massive_intent config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.490921318090116 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (az) type: mteb/amazon_massive_intent config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.97511768661733 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (bn) type: mteb/amazon_massive_intent config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.5689307330195 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (cy) type: mteb/amazon_massive_intent config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 48.34902488231337 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (da) type: mteb/amazon_massive_intent config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.6684599865501 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (de) type: mteb/amazon_massive_intent config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.54539340954942 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (el) type: mteb/amazon_massive_intent config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.08675184936112 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.12508406186953 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (es) type: mteb/amazon_massive_intent config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.41425689307331 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fa) type: mteb/amazon_massive_intent config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.59515803631474 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fi) type: mteb/amazon_massive_intent config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.90517821116342 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fr) type: mteb/amazon_massive_intent config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.91526563550774 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (he) type: mteb/amazon_massive_intent config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.198386012104905 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hi) type: mteb/amazon_massive_intent config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.04371217215869 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hu) type: mteb/amazon_massive_intent config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.31203765971756 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hy) type: mteb/amazon_massive_intent config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.521183591123055 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (id) type: mteb/amazon_massive_intent config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.06254203093476 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (is) type: mteb/amazon_massive_intent config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.01546738399461 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (it) type: mteb/amazon_massive_intent config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.27975790181574 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ja) type: mteb/amazon_massive_intent config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.79556153328849 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (jv) type: mteb/amazon_massive_intent config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 50.18493611297915 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ka) type: mteb/amazon_massive_intent config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 47.888365837256224 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (km) type: mteb/amazon_massive_intent config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 50.79690652320108 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (kn) type: mteb/amazon_massive_intent config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.225958305312716 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ko) type: mteb/amazon_massive_intent config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.58641560188299 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (lv) type: mteb/amazon_massive_intent config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.08204438466711 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ml) type: mteb/amazon_massive_intent config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.54606590450572 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (mn) type: mteb/amazon_massive_intent config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.443174176193665 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ms) type: mteb/amazon_massive_intent config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.65097511768661 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (my) type: mteb/amazon_massive_intent config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.45662407531944 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nb) type: mteb/amazon_massive_intent config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.739071956960316 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nl) type: mteb/amazon_massive_intent config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.36180228648286 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pl) type: mteb/amazon_massive_intent config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.3920645595158 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pt) type: mteb/amazon_massive_intent config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.06993947545395 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ro) type: mteb/amazon_massive_intent config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.123739071956955 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ru) type: mteb/amazon_massive_intent config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.46133154001346 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sl) type: mteb/amazon_massive_intent config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.54472091459314 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sq) type: mteb/amazon_massive_intent config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.204438466711494 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sv) type: mteb/amazon_massive_intent config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.69603227975792 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sw) type: mteb/amazon_massive_intent config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 51.684599865501 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ta) type: mteb/amazon_massive_intent config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.523873570948226 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (te) type: mteb/amazon_massive_intent config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.53396099529253 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (th) type: mteb/amazon_massive_intent config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.88298587760591 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tl) type: mteb/amazon_massive_intent config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.65097511768662 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tr) type: mteb/amazon_massive_intent config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.8453261600538 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ur) type: mteb/amazon_massive_intent config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.6247478143914 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (vi) type: mteb/amazon_massive_intent config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.16274377942166 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.61667787491594 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-TW) type: mteb/amazon_massive_intent config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.17283120376598 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (af) type: mteb/amazon_massive_scenario config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.89912575655683 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (am) type: mteb/amazon_massive_scenario config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.27975790181573 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ar) type: mteb/amazon_massive_scenario config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.269670477471415 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (az) type: mteb/amazon_massive_scenario config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.10423671822461 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (bn) type: mteb/amazon_massive_scenario config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.40753194351043 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (cy) type: mteb/amazon_massive_scenario config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 55.369872225958304 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (da) type: mteb/amazon_massive_scenario config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.60726294552792 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (de) type: mteb/amazon_massive_scenario config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.30262273032952 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (el) type: mteb/amazon_massive_scenario config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.52925353059851 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.28446536650976 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (es) type: mteb/amazon_massive_scenario config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.45460659045058 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fa) type: mteb/amazon_massive_scenario config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.26563550773368 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fi) type: mteb/amazon_massive_scenario config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.20578345662408 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fr) type: mteb/amazon_massive_scenario config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.64963012777405 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (he) type: mteb/amazon_massive_scenario config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.698049764626774 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hi) type: mteb/amazon_massive_scenario config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.14458641560188 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hu) type: mteb/amazon_massive_scenario config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.51445864156018 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hy) type: mteb/amazon_massive_scenario config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.13786146603901 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (id) type: mteb/amazon_massive_scenario config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.61533288500337 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (is) type: mteb/amazon_massive_scenario config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.526563550773375 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (it) type: mteb/amazon_massive_scenario config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.99731002017484 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ja) type: mteb/amazon_massive_scenario config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.59381304640216 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (jv) type: mteb/amazon_massive_scenario config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.010759919300604 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ka) type: mteb/amazon_massive_scenario config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 53.26160053799597 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (km) type: mteb/amazon_massive_scenario config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 57.800941492938804 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (kn) type: mteb/amazon_massive_scenario config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.387357094821795 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ko) type: mteb/amazon_massive_scenario config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.5359784801614 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (lv) type: mteb/amazon_massive_scenario config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.36919973100203 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ml) type: mteb/amazon_massive_scenario config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.81506388702084 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (mn) type: mteb/amazon_massive_scenario config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.35104236718225 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ms) type: mteb/amazon_massive_scenario config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.67787491593813 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (my) type: mteb/amazon_massive_scenario config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.4250168123739 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nb) type: mteb/amazon_massive_scenario config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.49630127774043 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nl) type: mteb/amazon_massive_scenario config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.95696032279758 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pl) type: mteb/amazon_massive_scenario config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.11768661735036 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pt) type: mteb/amazon_massive_scenario config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.86953597848016 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ro) type: mteb/amazon_massive_scenario config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.51042367182247 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ru) type: mteb/amazon_massive_scenario config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.65097511768661 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sl) type: mteb/amazon_massive_scenario config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.81573638197713 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sq) type: mteb/amazon_massive_scenario config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.26227303295225 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sv) type: mteb/amazon_massive_scenario config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.51513113651646 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sw) type: mteb/amazon_massive_scenario config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.29858776059179 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ta) type: mteb/amazon_massive_scenario config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.72696704774714 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (te) type: mteb/amazon_massive_scenario config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.57700067249496 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (th) type: mteb/amazon_massive_scenario config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.22797579018157 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tl) type: mteb/amazon_massive_scenario config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.97041022192333 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tr) type: mteb/amazon_massive_scenario config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.72629455279085 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ur) type: mteb/amazon_massive_scenario config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.16072629455278 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (vi) type: mteb/amazon_massive_scenario config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.92199058507062 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.40484196368527 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-TW) type: mteb/amazon_massive_scenario config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.61398789509079 - task: type: Retrieval dataset: name: MTEB MedicalRetrieval type: C-MTEB/MedicalRetrieval config: default split: dev revision: 2039188fb5800a9803ba5048df7b76e6fb151fc6 metrics: - type: ndcg_at_10 value: 61.934999999999995 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 33.052031054565205 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.969909524076794 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.7530992892652 - task: type: Retrieval dataset: name: MTEB MintakaRetrieval (fr) type: jinaai/mintakaqa config: fr split: test revision: efa78cc2f74bbcd21eff2261f9e13aebe40b814e metrics: - type: ndcg_at_10 value: 34.705999999999996 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (ar) type: Shitao/MLDR config: ar split: test revision: None metrics: - type: ndcg_at_10 value: 55.166000000000004 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (de) type: Shitao/MLDR config: de split: test revision: None metrics: - type: ndcg_at_10 value: 55.155 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (en) type: Shitao/MLDR config: en split: test revision: None metrics: - type: ndcg_at_10 value: 50.993 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (es) type: Shitao/MLDR config: es split: test revision: None metrics: - type: ndcg_at_10 value: 81.228 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (fr) type: Shitao/MLDR config: fr split: test revision: None metrics: - type: ndcg_at_10 value: 76.19 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (hi) type: Shitao/MLDR config: hi split: test revision: None metrics: - type: ndcg_at_10 value: 45.206 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (it) type: Shitao/MLDR config: it split: test revision: None metrics: - type: ndcg_at_10 value: 66.741 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (ja) type: Shitao/MLDR config: ja split: test revision: None metrics: - type: ndcg_at_10 value: 52.111 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (ko) type: Shitao/MLDR config: ko split: test revision: None metrics: - type: ndcg_at_10 value: 46.733000000000004 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (pt) type: Shitao/MLDR config: pt split: test revision: None metrics: - type: ndcg_at_10 value: 79.105 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (ru) type: Shitao/MLDR config: ru split: test revision: None metrics: - type: ndcg_at_10 value: 64.21 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (th) type: Shitao/MLDR config: th split: test revision: None metrics: - type: ndcg_at_10 value: 35.467 - task: type: Retrieval dataset: name: MTEB MultiLongDocRetrieval (zh) type: Shitao/MLDR config: zh split: test revision: None metrics: - type: ndcg_at_10 value: 27.419 - task: type: Classification dataset: name: MTEB MultilingualSentiment type: C-MTEB/MultilingualSentiment-classification config: default split: validation revision: 46958b007a63fdbf239b7672c25d0bea67b5ea1a metrics: - type: accuracy value: 61.02000000000001 - task: type: Retrieval dataset: name: MTEB NFCorpus type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: ndcg_at_10 value: 36.65 - task: type: Retrieval dataset: name: MTEB NFCorpus-PL type: clarin-knext/nfcorpus-pl config: default split: test revision: 9a6f9567fda928260afed2de480d79c98bf0bec0 metrics: - type: ndcg_at_10 value: 26.831 - task: type: Retrieval dataset: name: MTEB NQ type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: ndcg_at_10 value: 58.111000000000004 - task: type: Retrieval dataset: name: MTEB NQ-PL type: clarin-knext/nq-pl config: default split: test revision: f171245712cf85dd4700b06bef18001578d0ca8d metrics: - type: ndcg_at_10 value: 43.126999999999995 - task: type: PairClassification dataset: name: MTEB Ocnli type: C-MTEB/OCNLI config: default split: validation revision: 66e76a618a34d6d565d5538088562851e6daa7ec metrics: - type: cos_sim_ap value: 72.67630697316041 - task: type: Classification dataset: name: MTEB OnlineShopping type: C-MTEB/OnlineShopping-classification config: default split: test revision: e610f2ebd179a8fda30ae534c3878750a96db120 metrics: - type: accuracy value: 84.85000000000001 - task: type: PairClassification dataset: name: MTEB OpusparcusPC (fr) type: GEM/opusparcus config: fr split: test revision: 9e9b1f8ef51616073f47f306f7f47dd91663f86a metrics: - type: cos_sim_ap value: 100 - task: type: Classification dataset: name: MTEB PAC type: laugustyniak/abusive-clauses-pl config: default split: test revision: None metrics: - type: accuracy value: 65.99189110918043 - task: type: STS dataset: name: MTEB PAWSX type: C-MTEB/PAWSX config: default split: test revision: 9c6a90e430ac22b5779fb019a23e820b11a8b5e1 metrics: - type: cos_sim_spearman value: 16.124364530596228 - task: type: PairClassification dataset: name: MTEB PPC type: PL-MTEB/ppc-pairclassification config: default split: test revision: None metrics: - type: cos_sim_ap value: 92.43431057460192 - task: type: PairClassification dataset: name: MTEB PSC type: PL-MTEB/psc-pairclassification config: default split: test revision: None metrics: - type: cos_sim_ap value: 99.06090138049724 - task: type: PairClassification dataset: name: MTEB PawsX (fr) type: paws-x config: fr split: test revision: 8a04d940a42cd40658986fdd8e3da561533a3646 metrics: - type: cos_sim_ap value: 58.9314954874314 - task: type: Classification dataset: name: MTEB PolEmo2.0-IN type: PL-MTEB/polemo2_in config: default split: test revision: None metrics: - type: accuracy value: 69.59833795013851 - task: type: Classification dataset: name: MTEB PolEmo2.0-OUT type: PL-MTEB/polemo2_out config: default split: test revision: None metrics: - type: accuracy value: 44.73684210526315 - task: type: STS dataset: name: MTEB QBQTC type: C-MTEB/QBQTC config: default split: test revision: 790b0510dc52b1553e8c49f3d2afb48c0e5c48b7 metrics: - type: cos_sim_spearman value: 39.36450754137984 - task: type: Retrieval dataset: name: MTEB Quora-PL type: clarin-knext/quora-pl config: default split: test revision: 0be27e93455051e531182b85e85e425aba12e9d4 metrics: - type: ndcg_at_10 value: 80.76299999999999 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: mteb/quora config: default split: test revision: None metrics: - type: ndcg_at_10 value: 88.022 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 55.719165988934385 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 62.25390069273025 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: mteb/scidocs config: default split: test revision: None metrics: - type: ndcg_at_10 value: 18.243000000000002 - task: type: Retrieval dataset: name: MTEB SCIDOCS-PL type: clarin-knext/scidocs-pl config: default split: test revision: 45452b03f05560207ef19149545f168e596c9337 metrics: - type: ndcg_at_10 value: 14.219000000000001 - task: type: PairClassification dataset: name: MTEB SICK-E-PL type: PL-MTEB/sicke-pl-pairclassification config: default split: test revision: None metrics: - type: cos_sim_ap value: 75.4022630307816 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_spearman value: 79.34269390198548 - task: type: STS dataset: name: MTEB SICK-R-PL type: PL-MTEB/sickr-pl-sts config: default split: test revision: None metrics: - type: cos_sim_spearman value: 74.0651660446132 - task: type: STS dataset: name: MTEB SICKFr type: Lajavaness/SICK-fr config: default split: test revision: e077ab4cf4774a1e36d86d593b150422fafd8e8a metrics: - type: cos_sim_spearman value: 78.62693119733123 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_spearman value: 77.50660544631359 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_spearman value: 85.55415077723738 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_spearman value: 81.67550814479077 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_spearman value: 88.94601412322764 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_spearman value: 84.33844259337481 - task: type: STS dataset: name: MTEB STS17 (ko-ko) type: mteb/sts17-crosslingual-sts config: ko-ko split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 81.58650681159105 - task: type: STS dataset: name: MTEB STS17 (ar-ar) type: mteb/sts17-crosslingual-sts config: ar-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 78.82472265884256 - task: type: STS dataset: name: MTEB STS17 (en-ar) type: mteb/sts17-crosslingual-sts config: en-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 76.43637938260397 - task: type: STS dataset: name: MTEB STS17 (en-de) type: mteb/sts17-crosslingual-sts config: en-de split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 84.71008299464059 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 88.88074713413747 - task: type: STS dataset: name: MTEB STS17 (en-tr) type: mteb/sts17-crosslingual-sts config: en-tr split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 76.36405640457285 - task: type: STS dataset: name: MTEB STS17 (es-en) type: mteb/sts17-crosslingual-sts config: es-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 83.84737910084762 - task: type: STS dataset: name: MTEB STS17 (es-es) type: mteb/sts17-crosslingual-sts config: es-es split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 87.03931621433031 - task: type: STS dataset: name: MTEB STS17 (fr-en) type: mteb/sts17-crosslingual-sts config: fr-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 84.43335591752246 - task: type: STS dataset: name: MTEB STS17 (it-en) type: mteb/sts17-crosslingual-sts config: it-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 83.85268648747021 - task: type: STS dataset: name: MTEB STS17 (nl-en) type: mteb/sts17-crosslingual-sts config: nl-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 82.45786516224341 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 67.20227303970304 - task: type: STS dataset: name: MTEB STS22 (de) type: mteb/sts22-crosslingual-sts config: de split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 60.892838305537126 - task: type: STS dataset: name: MTEB STS22 (es) type: mteb/sts22-crosslingual-sts config: es split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 72.01876318464508 - task: type: STS dataset: name: MTEB STS22 (pl) type: mteb/sts22-crosslingual-sts config: pl split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 42.3879320510127 - task: type: STS dataset: name: MTEB STS22 (tr) type: mteb/sts22-crosslingual-sts config: tr split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 65.54048784845729 - task: type: STS dataset: name: MTEB STS22 (ar) type: mteb/sts22-crosslingual-sts config: ar split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 58.55244068334867 - task: type: STS dataset: name: MTEB STS22 (ru) type: mteb/sts22-crosslingual-sts config: ru split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 66.48710288440624 - task: type: STS dataset: name: MTEB STS22 (zh) type: mteb/sts22-crosslingual-sts config: zh split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 66.585754901838 - task: type: STS dataset: name: MTEB STS22 (fr) type: mteb/sts22-crosslingual-sts config: fr split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 81.03001290557805 - task: type: STS dataset: name: MTEB STS22 (de-en) type: mteb/sts22-crosslingual-sts config: de-en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 62.28001859884359 - task: type: STS dataset: name: MTEB STS22 (es-en) type: mteb/sts22-crosslingual-sts config: es-en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 79.64106342105019 - task: type: STS dataset: name: MTEB STS22 (it) type: mteb/sts22-crosslingual-sts config: it split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 78.27915339361124 - task: type: STS dataset: name: MTEB STS22 (pl-en) type: mteb/sts22-crosslingual-sts config: pl-en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 78.28574268257462 - task: type: STS dataset: name: MTEB STS22 (zh-en) type: mteb/sts22-crosslingual-sts config: zh-en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 72.92658860751482 - task: type: STS dataset: name: MTEB STS22 (es-it) type: mteb/sts22-crosslingual-sts config: es-it split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 74.83418886368217 - task: type: STS dataset: name: MTEB STS22 (de-fr) type: mteb/sts22-crosslingual-sts config: de-fr split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 56.01064022625769 - task: type: STS dataset: name: MTEB STS22 (de-pl) type: mteb/sts22-crosslingual-sts config: de-pl split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 53.64332829635126 - task: type: STS dataset: name: MTEB STS22 (fr-pl) type: mteb/sts22-crosslingual-sts config: fr-pl split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_spearman value: 73.24670207647144 - task: type: STS dataset: name: MTEB STSB type: C-MTEB/STSB config: default split: test revision: 0cde68302b3541bb8b3c340dc0644b0b745b3dc0 metrics: - type: cos_sim_spearman value: 80.7157790971544 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_spearman value: 86.45763616928973 - task: type: STS dataset: name: MTEB STSBenchmarkMultilingualSTS (fr) type: stsb_multi_mt config: fr split: test revision: 93d57ef91790589e3ce9c365164337a8a78b7632 metrics: - type: cos_sim_spearman value: 84.4335500335282 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 84.15276484499303 - task: type: Retrieval dataset: name: MTEB SciFact type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: ndcg_at_10 value: 73.433 - task: type: Retrieval dataset: name: MTEB SciFact-PL type: clarin-knext/scifact-pl config: default split: test revision: 47932a35f045ef8ed01ba82bf9ff67f6e109207e metrics: - type: ndcg_at_10 value: 58.919999999999995 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_ap value: 95.40564890916419 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 63.41856697730145 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 31.709285904909112 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 52.09341030060322 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_spearman value: 30.58262517835034 - task: type: Summarization dataset: name: MTEB SummEvalFr type: lyon-nlp/summarization-summeval-fr-p2p config: default split: test revision: b385812de6a9577b6f4d0f88c6a6e35395a94054 metrics: - type: cos_sim_spearman value: 29.744542072951358 - task: type: Reranking dataset: name: MTEB SyntecReranking type: lyon-nlp/mteb-fr-reranking-syntec-s2p config: default split: test revision: b205c5084a0934ce8af14338bf03feb19499c84d metrics: - type: map value: 88.03333333333333 - task: type: Retrieval dataset: name: MTEB SyntecRetrieval type: lyon-nlp/mteb-fr-retrieval-syntec-s2p config: default split: test revision: 77f7e271bf4a92b24fce5119f3486b583ca016ff metrics: - type: ndcg_at_10 value: 83.043 - task: type: Reranking dataset: name: MTEB T2Reranking type: C-MTEB/T2Reranking config: default split: dev revision: 76631901a18387f85eaa53e5450019b87ad58ef9 metrics: - type: map value: 67.08577894804324 - task: type: Retrieval dataset: name: MTEB T2Retrieval type: C-MTEB/T2Retrieval config: default split: dev revision: 8731a845f1bf500a4f111cf1070785c793d10e64 metrics: - type: ndcg_at_10 value: 84.718 - task: type: Classification dataset: name: MTEB TNews type: C-MTEB/TNews-classification config: default split: validation revision: 317f262bf1e6126357bbe89e875451e4b0938fe4 metrics: - type: accuracy value: 48.726 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: mteb/trec-covid config: default split: test revision: None metrics: - type: ndcg_at_10 value: 57.56 - task: type: Retrieval dataset: name: MTEB TRECCOVID-PL type: clarin-knext/trec-covid-pl config: default split: test revision: 81bcb408f33366c2a20ac54adafad1ae7e877fdd metrics: - type: ndcg_at_10 value: 59.355999999999995 - task: type: BitextMining dataset: name: MTEB Tatoeba (sqi-eng) type: mteb/tatoeba-bitext-mining config: sqi-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 82.765 - task: type: BitextMining dataset: name: MTEB Tatoeba (fry-eng) type: mteb/tatoeba-bitext-mining config: fry-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 73.69942196531792 - task: type: BitextMining dataset: name: MTEB Tatoeba (kur-eng) type: mteb/tatoeba-bitext-mining config: kur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 32.86585365853657 - task: type: BitextMining dataset: name: MTEB Tatoeba (tur-eng) type: mteb/tatoeba-bitext-mining config: tur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 95.81666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (deu-eng) type: mteb/tatoeba-bitext-mining config: deu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 97.75 - task: type: BitextMining dataset: name: MTEB Tatoeba (nld-eng) type: mteb/tatoeba-bitext-mining config: nld-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 93.78333333333335 - task: type: BitextMining dataset: name: MTEB Tatoeba (ron-eng) type: mteb/tatoeba-bitext-mining config: ron-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 90.72333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (ang-eng) type: mteb/tatoeba-bitext-mining config: ang-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 42.45202558635395 - task: type: BitextMining dataset: name: MTEB Tatoeba (ido-eng) type: mteb/tatoeba-bitext-mining config: ido-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 77.59238095238095 - task: type: BitextMining dataset: name: MTEB Tatoeba (jav-eng) type: mteb/tatoeba-bitext-mining config: jav-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 35.69686411149825 - task: type: BitextMining dataset: name: MTEB Tatoeba (isl-eng) type: mteb/tatoeba-bitext-mining config: isl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 82.59333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (slv-eng) type: mteb/tatoeba-bitext-mining config: slv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 84.1456922987907 - task: type: BitextMining dataset: name: MTEB Tatoeba (cym-eng) type: mteb/tatoeba-bitext-mining config: cym-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 52.47462133594857 - task: type: BitextMining dataset: name: MTEB Tatoeba (kaz-eng) type: mteb/tatoeba-bitext-mining config: kaz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 67.62965440356746 - task: type: BitextMining dataset: name: MTEB Tatoeba (est-eng) type: mteb/tatoeba-bitext-mining config: est-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 79.48412698412699 - task: type: BitextMining dataset: name: MTEB Tatoeba (heb-eng) type: mteb/tatoeba-bitext-mining config: heb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 75.85 - task: type: BitextMining dataset: name: MTEB Tatoeba (gla-eng) type: mteb/tatoeba-bitext-mining config: gla-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 27.32600866497127 - task: type: BitextMining dataset: name: MTEB Tatoeba (mar-eng) type: mteb/tatoeba-bitext-mining config: mar-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 84.38 - task: type: BitextMining dataset: name: MTEB Tatoeba (lat-eng) type: mteb/tatoeba-bitext-mining config: lat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 42.98888712165028 - task: type: BitextMining dataset: name: MTEB Tatoeba (bel-eng) type: mteb/tatoeba-bitext-mining config: bel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 85.55690476190476 - task: type: BitextMining dataset: name: MTEB Tatoeba (pms-eng) type: mteb/tatoeba-bitext-mining config: pms-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 46.68466031323174 - task: type: BitextMining dataset: name: MTEB Tatoeba (gle-eng) type: mteb/tatoeba-bitext-mining config: gle-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 32.73071428571428 - task: type: BitextMining dataset: name: MTEB Tatoeba (pes-eng) type: mteb/tatoeba-bitext-mining config: pes-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 88.26333333333334 - task: type: BitextMining dataset: name: MTEB Tatoeba (nob-eng) type: mteb/tatoeba-bitext-mining config: nob-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 96.61666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (bul-eng) type: mteb/tatoeba-bitext-mining config: bul-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.30666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (cbk-eng) type: mteb/tatoeba-bitext-mining config: cbk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 70.03714285714285 - task: type: BitextMining dataset: name: MTEB Tatoeba (hun-eng) type: mteb/tatoeba-bitext-mining config: hun-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 89.09 - task: type: BitextMining dataset: name: MTEB Tatoeba (uig-eng) type: mteb/tatoeba-bitext-mining config: uig-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 59.570476190476185 - task: type: BitextMining dataset: name: MTEB Tatoeba (rus-eng) type: mteb/tatoeba-bitext-mining config: rus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 92.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (spa-eng) type: mteb/tatoeba-bitext-mining config: spa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 97.68333333333334 - task: type: BitextMining dataset: name: MTEB Tatoeba (hye-eng) type: mteb/tatoeba-bitext-mining config: hye-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 80.40880503144653 - task: type: BitextMining dataset: name: MTEB Tatoeba (tel-eng) type: mteb/tatoeba-bitext-mining config: tel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 89.7008547008547 - task: type: BitextMining dataset: name: MTEB Tatoeba (afr-eng) type: mteb/tatoeba-bitext-mining config: afr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 81.84833333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (mon-eng) type: mteb/tatoeba-bitext-mining config: mon-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 71.69696969696969 - task: type: BitextMining dataset: name: MTEB Tatoeba (arz-eng) type: mteb/tatoeba-bitext-mining config: arz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 55.76985790822269 - task: type: BitextMining dataset: name: MTEB Tatoeba (hrv-eng) type: mteb/tatoeba-bitext-mining config: hrv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.66666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (nov-eng) type: mteb/tatoeba-bitext-mining config: nov-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 68.36668519547896 - task: type: BitextMining dataset: name: MTEB Tatoeba (gsw-eng) type: mteb/tatoeba-bitext-mining config: gsw-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 36.73992673992674 - task: type: BitextMining dataset: name: MTEB Tatoeba (nds-eng) type: mteb/tatoeba-bitext-mining config: nds-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 63.420952380952365 - task: type: BitextMining dataset: name: MTEB Tatoeba (ukr-eng) type: mteb/tatoeba-bitext-mining config: ukr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.28999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (uzb-eng) type: mteb/tatoeba-bitext-mining config: uzb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 40.95392490046146 - task: type: BitextMining dataset: name: MTEB Tatoeba (lit-eng) type: mteb/tatoeba-bitext-mining config: lit-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 77.58936507936508 - task: type: BitextMining dataset: name: MTEB Tatoeba (ina-eng) type: mteb/tatoeba-bitext-mining config: ina-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.28999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (lfn-eng) type: mteb/tatoeba-bitext-mining config: lfn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 63.563650793650794 - task: type: BitextMining dataset: name: MTEB Tatoeba (zsm-eng) type: mteb/tatoeba-bitext-mining config: zsm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 94.35 - task: type: BitextMining dataset: name: MTEB Tatoeba (ita-eng) type: mteb/tatoeba-bitext-mining config: ita-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.43 - task: type: BitextMining dataset: name: MTEB Tatoeba (cmn-eng) type: mteb/tatoeba-bitext-mining config: cmn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 95.73333333333332 - task: type: BitextMining dataset: name: MTEB Tatoeba (lvs-eng) type: mteb/tatoeba-bitext-mining config: lvs-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 79.38666666666667 - task: type: BitextMining dataset: name: MTEB Tatoeba (glg-eng) type: mteb/tatoeba-bitext-mining config: glg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 89.64 - task: type: BitextMining dataset: name: MTEB Tatoeba (ceb-eng) type: mteb/tatoeba-bitext-mining config: ceb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 21.257184628237262 - task: type: BitextMining dataset: name: MTEB Tatoeba (bre-eng) type: mteb/tatoeba-bitext-mining config: bre-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 13.592316017316017 - task: type: BitextMining dataset: name: MTEB Tatoeba (ben-eng) type: mteb/tatoeba-bitext-mining config: ben-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 73.22666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (swg-eng) type: mteb/tatoeba-bitext-mining config: swg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 51.711309523809526 - task: type: BitextMining dataset: name: MTEB Tatoeba (arq-eng) type: mteb/tatoeba-bitext-mining config: arq-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 24.98790634904795 - task: type: BitextMining dataset: name: MTEB Tatoeba (kab-eng) type: mteb/tatoeba-bitext-mining config: kab-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 17.19218192918193 - task: type: BitextMining dataset: name: MTEB Tatoeba (fra-eng) type: mteb/tatoeba-bitext-mining config: fra-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 93.26666666666667 - task: type: BitextMining dataset: name: MTEB Tatoeba (por-eng) type: mteb/tatoeba-bitext-mining config: por-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 94.57333333333334 - task: type: BitextMining dataset: name: MTEB Tatoeba (tat-eng) type: mteb/tatoeba-bitext-mining config: tat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 42.35127206127206 - task: type: BitextMining dataset: name: MTEB Tatoeba (oci-eng) type: mteb/tatoeba-bitext-mining config: oci-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 51.12318903318903 - task: type: BitextMining dataset: name: MTEB Tatoeba (pol-eng) type: mteb/tatoeba-bitext-mining config: pol-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 94.89999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (war-eng) type: mteb/tatoeba-bitext-mining config: war-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 23.856320290390055 - task: type: BitextMining dataset: name: MTEB Tatoeba (aze-eng) type: mteb/tatoeba-bitext-mining config: aze-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 79.52833333333334 - task: type: BitextMining dataset: name: MTEB Tatoeba (vie-eng) type: mteb/tatoeba-bitext-mining config: vie-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 95.93333333333334 - task: type: BitextMining dataset: name: MTEB Tatoeba (nno-eng) type: mteb/tatoeba-bitext-mining config: nno-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 90.75333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (cha-eng) type: mteb/tatoeba-bitext-mining config: cha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 30.802919708029197 - task: type: BitextMining dataset: name: MTEB Tatoeba (mhr-eng) type: mteb/tatoeba-bitext-mining config: mhr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 15.984076294076294 - task: type: BitextMining dataset: name: MTEB Tatoeba (dan-eng) type: mteb/tatoeba-bitext-mining config: dan-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.82666666666667 - task: type: BitextMining dataset: name: MTEB Tatoeba (ell-eng) type: mteb/tatoeba-bitext-mining config: ell-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (amh-eng) type: mteb/tatoeba-bitext-mining config: amh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 76.36054421768706 - task: type: BitextMining dataset: name: MTEB Tatoeba (pam-eng) type: mteb/tatoeba-bitext-mining config: pam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 9.232711399711398 - task: type: BitextMining dataset: name: MTEB Tatoeba (hsb-eng) type: mteb/tatoeba-bitext-mining config: hsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 45.640803181175855 - task: type: BitextMining dataset: name: MTEB Tatoeba (srp-eng) type: mteb/tatoeba-bitext-mining config: srp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 86.29 - task: type: BitextMining dataset: name: MTEB Tatoeba (epo-eng) type: mteb/tatoeba-bitext-mining config: epo-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 88.90833333333332 - task: type: BitextMining dataset: name: MTEB Tatoeba (kzj-eng) type: mteb/tatoeba-bitext-mining config: kzj-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 11.11880248978075 - task: type: BitextMining dataset: name: MTEB Tatoeba (awa-eng) type: mteb/tatoeba-bitext-mining config: awa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 48.45839345839346 - task: type: BitextMining dataset: name: MTEB Tatoeba (fao-eng) type: mteb/tatoeba-bitext-mining config: fao-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 65.68157033805888 - task: type: BitextMining dataset: name: MTEB Tatoeba (mal-eng) type: mteb/tatoeba-bitext-mining config: mal-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 94.63852498786997 - task: type: BitextMining dataset: name: MTEB Tatoeba (ile-eng) type: mteb/tatoeba-bitext-mining config: ile-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 81.67904761904761 - task: type: BitextMining dataset: name: MTEB Tatoeba (bos-eng) type: mteb/tatoeba-bitext-mining config: bos-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 89.35969868173258 - task: type: BitextMining dataset: name: MTEB Tatoeba (cor-eng) type: mteb/tatoeba-bitext-mining config: cor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 5.957229437229437 - task: type: BitextMining dataset: name: MTEB Tatoeba (cat-eng) type: mteb/tatoeba-bitext-mining config: cat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 91.50333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (eus-eng) type: mteb/tatoeba-bitext-mining config: eus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 63.75498778998778 - task: type: BitextMining dataset: name: MTEB Tatoeba (yue-eng) type: mteb/tatoeba-bitext-mining config: yue-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 82.99190476190476 - task: type: BitextMining dataset: name: MTEB Tatoeba (swe-eng) type: mteb/tatoeba-bitext-mining config: swe-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 92.95 - task: type: BitextMining dataset: name: MTEB Tatoeba (dtp-eng) type: mteb/tatoeba-bitext-mining config: dtp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 9.054042624042623 - task: type: BitextMining dataset: name: MTEB Tatoeba (kat-eng) type: mteb/tatoeba-bitext-mining config: kat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 72.77064981488574 - task: type: BitextMining dataset: name: MTEB Tatoeba (jpn-eng) type: mteb/tatoeba-bitext-mining config: jpn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 93.14 - task: type: BitextMining dataset: name: MTEB Tatoeba (csb-eng) type: mteb/tatoeba-bitext-mining config: csb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 29.976786498525627 - task: type: BitextMining dataset: name: MTEB Tatoeba (xho-eng) type: mteb/tatoeba-bitext-mining config: xho-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 67.6525821596244 - task: type: BitextMining dataset: name: MTEB Tatoeba (orv-eng) type: mteb/tatoeba-bitext-mining config: orv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 33.12964812964813 - task: type: BitextMining dataset: name: MTEB Tatoeba (ind-eng) type: mteb/tatoeba-bitext-mining config: ind-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 92.30666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (tuk-eng) type: mteb/tatoeba-bitext-mining config: tuk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 34.36077879427633 - task: type: BitextMining dataset: name: MTEB Tatoeba (max-eng) type: mteb/tatoeba-bitext-mining config: max-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 52.571845212690285 - task: type: BitextMining dataset: name: MTEB Tatoeba (swh-eng) type: mteb/tatoeba-bitext-mining config: swh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 58.13107263107262 - task: type: BitextMining dataset: name: MTEB Tatoeba (hin-eng) type: mteb/tatoeba-bitext-mining config: hin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 93.33333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (dsb-eng) type: mteb/tatoeba-bitext-mining config: dsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 42.87370133925458 - task: type: BitextMining dataset: name: MTEB Tatoeba (ber-eng) type: mteb/tatoeba-bitext-mining config: ber-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 20.394327616827614 - task: type: BitextMining dataset: name: MTEB Tatoeba (tam-eng) type: mteb/tatoeba-bitext-mining config: tam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 84.29967426710098 - task: type: BitextMining dataset: name: MTEB Tatoeba (slk-eng) type: mteb/tatoeba-bitext-mining config: slk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 88.80666666666667 - task: type: BitextMining dataset: name: MTEB Tatoeba (tgl-eng) type: mteb/tatoeba-bitext-mining config: tgl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 67.23062271062273 - task: type: BitextMining dataset: name: MTEB Tatoeba (ast-eng) type: mteb/tatoeba-bitext-mining config: ast-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 78.08398950131233 - task: type: BitextMining dataset: name: MTEB Tatoeba (mkd-eng) type: mteb/tatoeba-bitext-mining config: mkd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 77.85166666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (khm-eng) type: mteb/tatoeba-bitext-mining config: khm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 67.63004001231148 - task: type: BitextMining dataset: name: MTEB Tatoeba (ces-eng) type: mteb/tatoeba-bitext-mining config: ces-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 89.77000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (tzl-eng) type: mteb/tatoeba-bitext-mining config: tzl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 40.2654503616042 - task: type: BitextMining dataset: name: MTEB Tatoeba (urd-eng) type: mteb/tatoeba-bitext-mining config: urd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 83.90333333333334 - task: type: BitextMining dataset: name: MTEB Tatoeba (ara-eng) type: mteb/tatoeba-bitext-mining config: ara-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 77.80666666666666 - task: type: BitextMining dataset: name: MTEB Tatoeba (kor-eng) type: mteb/tatoeba-bitext-mining config: kor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 84.08 - task: type: BitextMining dataset: name: MTEB Tatoeba (yid-eng) type: mteb/tatoeba-bitext-mining config: yid-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 60.43098607367475 - task: type: BitextMining dataset: name: MTEB Tatoeba (fin-eng) type: mteb/tatoeba-bitext-mining config: fin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 88.19333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (tha-eng) type: mteb/tatoeba-bitext-mining config: tha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 90.55352798053529 - task: type: BitextMining dataset: name: MTEB Tatoeba (wuu-eng) type: mteb/tatoeba-bitext-mining config: wuu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: f1 value: 88.44999999999999 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringP2P type: C-MTEB/ThuNewsClusteringP2P config: default split: test revision: 5798586b105c0434e4f0fe5e767abe619442cf93 metrics: - type: v_measure value: 57.25416429643288 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringS2S type: C-MTEB/ThuNewsClusteringS2S config: default split: test revision: 8a8b2caeda43f39e13c4bc5bea0f8a667896e10d metrics: - type: v_measure value: 56.616646560243524 - task: type: Retrieval dataset: name: MTEB Touche2020 type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: ndcg_at_10 value: 22.819 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.02579999999999 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 57.60045274476514 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 50.346666699466205 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_ap value: 71.88199004440489 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_ap value: 85.41587779677383 - task: type: Retrieval dataset: name: MTEB VideoRetrieval type: C-MTEB/VideoRetrieval config: default split: dev revision: 58c2597a5943a2ba48f4668c3b90d796283c5639 metrics: - type: ndcg_at_10 value: 72.792 - task: type: Classification dataset: name: MTEB Waimai type: C-MTEB/waimai-classification config: default split: test revision: 339287def212450dcaa9df8c22bf93e9980c7023 metrics: - type: accuracy value: 82.58000000000001 - task: type: Retrieval dataset: name: MTEB XPQARetrieval (fr) type: jinaai/xpqa config: fr split: test revision: c99d599f0a6ab9b85b065da6f9d94f9cf731679f metrics: - type: ndcg_at_10 value: 67.327 --- ***See Disclaimer below*** ---- # A Teradata Vantage compatible Embeddings Model # Alibaba-NLP/gte-multilingual-base ## Overview of this Model An Embedding Model which maps text (sentence/ paragraphs) into a vector. The [Alibaba-NLP/gte-multilingual-base](https://huggingface.co/Alibaba-NLP/gte-multilingual-base) model well known for its effectiveness in capturing semantic meanings in text data. It's a state-of-the-art model trained on a large corpus, capable of generating high-quality text embeddings. - 305.37M params (Sizes in ONNX format - "fp32": 1197.36MB, "int8": 324.17MB, "uint8": 324.17MB) - 8192 maximum input tokens - 768 dimensions of output vector - Licence: apache-2.0. The released models can be used for commercial purposes free of charge. - Reference to Original Model: https://huggingface.co/Alibaba-NLP/gte-multilingual-base ## Quickstart: Deploying this Model in Teradata Vantage We have pre-converted the model into the ONNX format compatible with BYOM 6.0, eliminating the need for manual conversion. **Note:** Ensure you have access to a Teradata Database with BYOM 6.0 installed. To get started, clone the pre-converted model directly from the Teradata HuggingFace repository. ```python import teradataml as tdml import getpass from huggingface_hub import hf_hub_download model_name = "gte-multilingual-base" number_dimensions_output = 768 model_file_name = "model.onnx" # Step 1: Download Model from Teradata HuggingFace Page hf_hub_download(repo_id=f"Teradata/{model_name}", filename=f"onnx/{model_file_name}", local_dir="./") hf_hub_download(repo_id=f"Teradata/{model_name}", filename=f"tokenizer.json", local_dir="./") # Step 2: Create Connection to Vantage tdml.create_context(host = input('enter your hostname'), username=input('enter your username'), password = getpass.getpass("enter your password")) # Step 3: Load Models into Vantage # a) Embedding model tdml.save_byom(model_id = model_name, # must be unique in the models table model_file = f"onnx/{model_file_name}", table_name = 'embeddings_models' ) # b) Tokenizer tdml.save_byom(model_id = model_name, # must be unique in the models table model_file = 'tokenizer.json', table_name = 'embeddings_tokenizers') # Step 4: Test ONNXEmbeddings Function # Note that ONNXEmbeddings expects the 'payload' column to be 'txt'. # If it has got a different name, just rename it in a subquery/CTE. input_table = "emails.emails" embeddings_query = f""" SELECT * from mldb.ONNXEmbeddings( on {input_table} as InputTable on (select * from embeddings_models where model_id = '{model_name}') as ModelTable DIMENSION on (select model as tokenizer from embeddings_tokenizers where model_id = '{model_name}') as TokenizerTable DIMENSION using Accumulate('id', 'txt') ModelOutputTensor('sentence_embedding') EnableMemoryCheck('false') OutputFormat('FLOAT32({number_dimensions_output})') OverwriteCachedModel('true') ) a """ DF_embeddings = tdml.DataFrame.from_query(embeddings_query) DF_embeddings ``` ## What Can I Do with the Embeddings? Teradata Vantage includes pre-built in-database functions to process embeddings further. Explore the following examples: - **Semantic Clustering with TD_KMeans:** [Semantic Clustering Python Notebook](https://github.com/Teradata/jupyter-demos/blob/main/UseCases/Language_Models_InVantage/Semantic_Clustering_Python.ipynb) - **Semantic Distance with TD_VectorDistance:** [Semantic Similarity Python Notebook](https://github.com/Teradata/jupyter-demos/blob/main/UseCases/Language_Models_InVantage/Semantic_Similarity_Python.ipynb) - **RAG-Based Application with TD_VectorDistance:** [RAG and Bedrock Query PDF Notebook](https://github.com/Teradata/jupyter-demos/blob/main/UseCases/Language_Models_InVantage/RAG_and_Bedrock_QueryPDF.ipynb) ## Deep Dive into Model Conversion to ONNX **The steps below outline how we converted the open-source Hugging Face model into an ONNX file compatible with the in-database ONNXEmbeddings function.** You do not need to perform these steps—they are provided solely for documentation and transparency. However, they may be helpful if you wish to convert another model to the required format. ### Part 1. Importing and Converting Model using optimum We start by importing the pre-trained [Alibaba-NLP/gte-multilingual-base](https://huggingface.co/Alibaba-NLP/gte-multilingual-base) model from Hugging Face. To enhance performance and ensure compatibility with various execution environments, we'll use the [Optimum](https://github.com/huggingface/optimum) utility to convert the model into the ONNX (Open Neural Network Exchange) format. After conversion to ONNX, we are fixing the opset in the ONNX file for compatibility with ONNX runtime used in Teradata Vantage We are generating ONNX files for multiple different precisions: fp32, int8, uint8 You can find the detailed conversion steps in the file [convert.py](./convert.py) ### Part 2. Running the model in Python with onnxruntime & compare results Once the fixes are applied, we proceed to test the correctness of the ONNX model by calculating cosine similarity between two texts using native SentenceTransformers and ONNX runtime, comparing the results. If the results are identical, it confirms that the ONNX model gives the same result as the native models, validating its correctness and suitability for further use in the database. ```python import onnxruntime as rt from sentence_transformers.util import cos_sim from sentence_transformers import SentenceTransformer import transformers sentences_1 = 'How is the weather today?' sentences_2 = 'What is the current weather like today?' # Calculate ONNX result tokenizer = transformers.AutoTokenizer.from_pretrained("Alibaba-NLP/gte-multilingual-base") predef_sess = rt.InferenceSession("onnx/model.onnx") enc1 = tokenizer(sentences_1) embeddings_1_onnx = predef_sess.run(None, {"input_ids": [enc1.input_ids], "attention_mask": [enc1.attention_mask]}) enc2 = tokenizer(sentences_2) embeddings_2_onnx = predef_sess.run(None, {"input_ids": [enc2.input_ids], "attention_mask": [enc2.attention_mask]}) # Calculate embeddings with SentenceTransformer model = SentenceTransformer(model_id, trust_remote_code=True) embeddings_1_sentence_transformer = model.encode(sentences_1, normalize_embeddings=True, trust_remote_code=True) embeddings_2_sentence_transformer = model.encode(sentences_2, normalize_embeddings=True, trust_remote_code=True) # Compare results print("Cosine similiarity for embeddings calculated with ONNX:" + str(cos_sim(embeddings_1_onnx[1][0], embeddings_2_onnx[1][0]))) print("Cosine similiarity for embeddings calculated with SentenceTransformer:" + str(cos_sim(embeddings_1_sentence_transformer, embeddings_2_sentence_transformer))) ``` You can find the detailed ONNX vs. SentenceTransformer result comparison steps in the file [test_local.py](./test_local.py) ----- DISCLAIMER: The content herein (“Content”) is provided “AS IS” and is not covered by any Teradata Operations, Inc. and its affiliates (“Teradata”) agreements. Its listing here does not constitute certification or endorsement by Teradata. To the extent any of the Content contains or is related to any artificial intelligence (“AI”) or other language learning models (“Models”) that interoperate with the products and services of Teradata, by accessing, bringing, deploying or using such Models, you acknowledge and agree that you are solely responsible for ensuring compliance with all applicable laws, regulations, and restrictions governing the use, deployment, and distribution of AI technologies. This includes, but is not limited to, AI Diffusion Rules, European Union AI Act, AI-related laws and regulations, privacy laws, export controls, and financial or sector-specific regulations. While Teradata may provide support, guidance, or assistance in the deployment or implementation of Models to interoperate with Teradata’s products and/or services, you remain fully responsible for ensuring that your Models, data, and applications comply with all relevant legal and regulatory obligations. Our assistance does not constitute legal or regulatory approval, and Teradata disclaims any liability arising from non-compliance with applicable laws. You must determine the suitability of the Models for any purpose. Given the probabilistic nature of machine learning and modeling, the use of the Models may in some situations result in incorrect output that does not accurately reflect the action generated. You should evaluate the accuracy of any output as appropriate for your use case, including by using human review of the output.
[ "BIOSSES", "SCIFACT" ]
FriendliAI/Phi-3-small-8k-instruct
FriendliAI
text-generation
[ "safetensors", "phi3small", "nlp", "code", "text-generation", "conversational", "custom_code", "multilingual", "license:mit", "region:us" ]
2025-03-06T02:29:49Z
2025-03-06T02:29:50+00:00
19
0
--- language: - multilingual license: mit license_link: https://huggingface.co/microsoft/Phi-3-small-8k-instruct/resolve/main/LICENSE pipeline_tag: text-generation tags: - nlp - code inference: parameters: temperature: 0.7 widget: - messages: - role: user content: Can you provide ways to eat combinations of bananas and dragonfruits? --- 🎉 **Phi-3.5**: [[mini-instruct]](https://huggingface.co/microsoft/Phi-3.5-mini-instruct); [[MoE-instruct]](https://huggingface.co/microsoft/Phi-3.5-MoE-instruct) ; [[vision-instruct]](https://huggingface.co/microsoft/Phi-3.5-vision-instruct) ## Model Summary The Phi-3-Small-8K-Instruct is a 7B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Small version in two variants [8K](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) which is the context length (in tokens) that it can support. The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3-Small-8K-Instruct showcased a robust and state-of-the-art performance among models of the same-size and next-size-up. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook) | | Short Context | Long Context | | ------- | ------------- | ------------ | | Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)| | Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)| | Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)| | Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct-onnx-cuda)| ## Intended Uses **Primary use cases** The model is intended for broad commercial and research use in English. The model provides uses for general purpose AI systems and applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3-Small-8K-Instruct has been integrated in the development version (4.40.2) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * Install tiktoken (0.6.0) ans triton (2.3.0) * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Phi-3-Small-8K-Instruct is also available in [Azure AI](https://ai.azure.com/explore/models?&selectedCollection=phi). ### Tokenizer Phi-3-Small-8K-Instruct supports a vocabulary size of up to `100352` tokens. ### Chat Format Given the nature of the training data, the Phi-3-Small-8K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|endoftext|><|user|>\nQuestion <|end|>\n<|assistant|> ``` For example: ```markdown <|endoftext|><|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|endoftext|><|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model_id = "microsoft/Phi-3-small-8k-instruct" model = AutoModelForCausalLM.from_pretrained( model_id, torch_dtype="auto", trust_remote_code=True, ) assert torch.cuda.is_available(), "This model needs a GPU to run ..." device = torch.cuda.current_device() model = model.to(device) tokenizer = AutoTokenizer.from_pretrained(model_id) messages = [ {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, device=device ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` *Some applications/frameworks might not include a BOS token (`<|endoftext|>`) at the start of the conversation. Please ensure that it is included since it provides more reliable results.* ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3 Small-8K-Instruct has 7B parameters and is a dense decoder-only Transformer model with alternating dense and blocksparse attentions. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 8K tokens * GPUs: 1024 H100-80G * Training time: 18 days * Training data: 4.8T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. * Release dates The model weight is released on May 21, 2024. ### Datasets Our training data includes a wide variety of sources, totaling 4.8 trillion tokens (including 10% multilingual), and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report). ## Benchmarks We report the results for Phi-3-Small-8K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mixtral-8x7b, Gemini-Pro, Gemma 7B, Llama-3-8B-Instruct, GPT-3.5-Turbo-1106, and GPT-4-Turbo-1106. All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. |Benchmark|Phi-3-Small-8K-Instruct<br>7b|Gemma<br>7B|Mixtral<br>8x7B|Llama-3-Instruct<br>8b|GPT-3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)| |---------|-----------------------|--------|-------------|-------------------|-----------------|----------|------------------------| |AGI Eval<br>5-shot|45.1|42.1|45.2|42.0|48.4|49.0|59.6| |MMLU<br>5-shot|75.7|63.6|70.5|66.5|71.4|66.7|84.0| |BigBench Hard<br>3-shot|79.1|59.6|69.7|51.5|68.3|75.6|87.7| |ANLI<br>7-shot|58.1|48.7|55.2|57.3|58.1|64.2|71.7| |HellaSwag<br>5-shot|77.0|49.8|70.4|71.1|78.8|76.2|88.3| |ARC Challenge<br>10-shot|90.7|78.3|87.3|82.8|87.4|88.3|95.6| |ARC Easy<br>10-shot|97.0|91.4|95.6|93.4|96.3|96.1|98.8| |BoolQ<br>2-shot|84.8|66.0|76.6|80.9|79.1|86.4|91.3| |CommonsenseQA<br>10-shot|80.0|76.2|78.1|79.0|79.6|81.8|86.7| |MedQA<br>2-shot|65.4|49.6|62.2|60.5|63.4|58.2|83.7| |OpenBookQA<br>10-shot|88.0|78.6|85.8|82.6|86.0|86.4|93.4| |PIQA<br>5-shot|86.9|78.1|86.0|75.7|86.6|86.2|90.1| |Social IQA<br>5-shot|79.2|65.5|75.9|73.9|68.3|75.4|81.7| |TruthfulQA (MC2)<br>10-shot|70.2|52.1|60.1|63.2|67.7|72.6|85.2| |WinoGrande<br>5-shot|81.5|55.6|62.0|65.0|68.8|72.2|86.7| |TriviaQA<br>5-shot|58.1|72.3|82.2|67.7|85.8|80.2|73.3| |GSM8K Chain of Thought<br>8-shot|89.6|59.8|64.7|77.4|78.1|80.4|94.2| |HumanEval<br>0-shot|61.0|34.1|37.8|60.4|62.2|64.4|79.9| |MBPP<br>3-shot|71.7|51.5|60.2|67.7|77.8|73.2|86.7| |Average|75.7|61.8|69.8|69.4|74.3|75.4|85.2| We take a closer look at different categories across 80 public benchmark datasets at the table below: |Benchmark|Phi-3-Small-8K-Instruct<br>7b|Gemma<br>7B|Mixtral<br>8x7B|Llama-3-Instruct<br>8b|GPT-3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)| |--------|------------------------|--------|-------------|-------------------|-------------------|----------|------------------------| |Popular aggregated benchmark|71.1|59.4|66.2|59.9|67.0|67.5|80.5| |Reasoning|82.4|69.1|77.0|75.7|78.3|80.4|89.3| |Language understanding|70.6|58.4|64.9|65.4|70.4|75.3|81.6| |Code generation|60.7|45.6|52.7|56.4|70.4|66.7|76.1| |Math|51.6|35.8|40.3|41.1|52.8|50.9|67.1| |Factual knowledge|38.6|46.7|58.6|43.1|63.4|54.6|45.9| |Multilingual|62.5|63.2|63.4|65.0|69.1|76.5|82.0| |Robustness|72.9|38.4|51.0|64.5|69.3|69.7|84.6| ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) * [Tiktoken](https://github.com/openai/tiktoken) * [Triton](https://github.com/openai/triton) ## Hardware Note that by default, the Phi-3-Small model uses flash attention 2 and Triton blocksparse attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: + Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [8K](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) ## Cross Platform Support ONNX runtime ecosystem now supports Phi3 small models across platforms and hardware. Optimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA). Along with DML, ONNX Runtime provides cross platform support for Phi3 Small across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-small-8k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
Lowenzahn/KoBioMed-Llama-3.1-8B
Lowenzahn
null
[ "safetensors", "llama", "biology", "medical", "Llama", "pre-trained", "ko", "en", "base_model:meta-llama/Llama-3.1-8B", "base_model:finetune:meta-llama/Llama-3.1-8B", "license:llama3.1", "region:us" ]
2025-03-14T09:10:45Z
2025-03-17T12:22:34+00:00
19
1
--- base_model: - meta-llama/Llama-3.1-8B language: - ko - en license: llama3.1 tags: - biology - medical - Llama - pre-trained --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/646704281dd5854d4de2cdda/vBbfKhWIBJVIjqSv6yVDk.png) # KoBioMed-Llama-3.1-8B ## Instroduction We introduce KoBioMed-Llama-3.1-8B, a bilingual (English and Korean) generative model specialized in the BioMedical domain, developed by ezCaretech. This model has been continual pre-trained (CPT) on a dataset from PubMed abstracts and their translated Korean counterparts, undergoing extensive preprocessing that includes cleansing, de-duplication, and quality filtering. Our KoBioMed-Llama-3.1-8B has achieved state-of-the-art performance on both Korean and English BioMedical benchmarks. We hope this model will contribute significantly to the biomedical and medical research community. This repository contains an 8 Billion generative language model with the following key features: - Developed by: AI Team, ezCaretech R&D Center - Language Support: English and Korean - Context Length: 8,192 tokens - Vocab Size: 12,800 - License: llama3.1 ### Notice! - **This is a pre-trained model. It will be a great starting point for post-training, such as instruction tuning.** - **This model was developed with support from the Korea Artificial Intelligence Industry Cluster Agency (AICA).** - **The model is currently in post-training (Instruction Tuning, DPO) and is scheduled to be released within March 2025.** ## Evaluation We evaluated the KoBioMed-Llama-3.1-8B using various Korean and English biomedical benchmarks. - Benchmark evaluations were carried out using EleutherAI/lm-evaluation-harness and performed with 5-shot examples. - The subsets used for the KMMLU and MMLU evaluations are listed below. - KMMLU: 'kmmlu_direct_biology' - MMLU: 'mmlu_college_biology', 'mmlu_clinical_knowledge', 'mmlu_anatomy', 'mmlu_college_medicine', 'mmlu_medical_genetics', 'mmlu_professional_medicine' <table> <tr> <th>Models</th> <th>KMMLU</th> <th>KorMedMCQA</th> <th>MedMCQA</th> <th>MMLU</th> <th>PubMedQA</th> <th>Mean</th> </tr> <tr> <td><a href="https://huggingface.co/Lowenzahn/KoBioMed-Llama-3.1-8B">KoBioMed-Llama-3.1-8B</a></td> <td align="center"><strong>0.4010</strong></td> <td align="center"><strong>0.5705</strong></td> <td align="center"><u>0.5367</u></td> <td align="center">0.6837</td> <td align="center"><strong>0.7800</strong></td> <td align="center"><strong>0.5944</strong></td> </tr> <tr> <td><a href="https://huggingface.co/meta-llama/Llama-3.1-8B">Llama-3.1-8B</a></td> <td align="center"><u>0.3620</u></td> <td align="center">0.5105</td> <td align="center"><strong>0.5635</strong></td> <td align="center"><strong>0.7159</strong></td> <td align="center">0.7600</td> <td align="center"><u>0.5824</u></td> </tr> <tr> <td><a href="https://huggingface.co/mistralai/Mistral-7B-v0.3">Mistral-7B-v0.3</a></td> <td align="center">0.3130</td> <td align="center">0.3958</td> <td align="center">0.4927</td> <td align="center">0.6693</td> <td align="center">0.7740</td> <td align="center">0.5290</td> </tr> <tr> <td><a href="https://huggingface.co/beomi/Llama-3-Open-Ko-8B">Llama-3-Open-Ko-8B</a></td> <td align="center">0.3340</td> <td align="center">0.4941</td> <td align="center">0.4743</td> <td align="center">0.6251</td> <td align="center">0.7320</td> <td align="center">0.5319</td> </tr> <tr> <td><a href="https://huggingface.co/upstage/SOLAR-10.7B-v1.0">SOLAR-10.7B-v1.0</a></td> <td align="center">0.3200</td> <td align="center"><u>0.5146</u></td> <td align="center">0.5075</td> <td align="center"><u>0.7050</u></td> <td align="center"><u>0.7760</u></td> <td align="center">0.5646</td> </tr> </table> ## Quickstart Here is a code snippet for model inference. ``` python from transformers import AutoTokenizer, AutoModelForCausalLM import torch repo = 'Lowenzahn/KoBioMed-Llama-3.1-8B' # Load model model = AutoModelForCausalLM.from_pretrained( repo, trust_remote_code=True, device_map="auto", torch_dtype=torch.bfloat16, ) # Load tokenizer tokenizer = AutoTokenizer.from_pretrained(repo) # Inference prompts = ["Machine learning is"] inputs = tokenizer(prompts, return_tensors="pt") gen_kwargs = {"max_new_tokens": 1024, "top_p": 0.8, "temperature": 0.8, "do_sample": False, "repetition_penalty": 1.2} output = model.generate(inputs['input_ids'], **gen_kwargs) output = tokenizer.decode(output[0].tolist(), skip_special_tokens=True) print(output) ``` ## Limitations KoBioMed-Llama-3.1-8B demonstrates strong performance in the biomedical domain, but it can sometimes generate inappropriate responses. While we have made considerable efforts to avoid providing sensitive data, racial discrimination, harm, or biased information in the training data, issues may still arise. We emphasize that the text generated by KoBioMed-Llama-3.1-8B does not reflect the views of the ezCaretech R&D center AI Team. - The model may generate responses containing biased information related to age, gender, or race. - The model may generate responses containing personal information, harmful content, or other inappropriate information. - Since the model does not reflect the most up-to-date information, its responses may be outdated or contradictory. - The performance of model may degrade on tasks unrelated to the biomedical and healthcare domains. - KoBioMed-Llama-3.1-8B can make mistakes. Critical information should be verified independently. ## Training Data This model was trained on preprocessed abstracts of papers published in PubMed from 2000 to 2023. The preprocessing includes the following steps: - Removal of URLs - Removal of HTML tags - Removal of reference citations - Removal of Identifiable information - Min-Hash based duplication removal - Scoring model based low quality text removal ## License This model is released under llama3.1 license. ## Supported by This model was developed with support from the Korea Artificial Intelligence Industry Cluster Agency (AICA). ## Contact 조형민(Hyeongmin Cho), [email protected] </br> 김인후(Inhu Kim), [email protected] </br> 이동형(Donghyoung Lee), [email protected] </br> 박달호(Dalho Park), [email protected] </br> ## Citation **KoBioMed-Llama-3.1-8B** ``` @article{kobiomedllama, title={KoBioMed-Llama-3.1-8B}, author={Hyeongmin Cho and Inhu Kim and Donghyoung Lee and Sanghwan Kim and Dalho Park and Inchul Kang and Kyul Kim and Jihoon Cho and Jongbeom Park}, year={2025}, url={https://huggingface.co/Lowenzahn/KoBioMed-Llama-3.1-8B} } ```
[ "PUBMEDQA" ]
kalex/bert-finetuned-ner
kalex
token-classification
[ "transformers", "pytorch", "tensorboard", "bert", "token-classification", "generated_from_trainer", "dataset:ncbi_disease", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-04-16T23:04:01Z
2022-04-17T03:43:25+00:00
18
0
--- datasets: - ncbi_disease license: apache-2.0 tags: - generated_from_trainer model-index: - name: bert-finetuned-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-ner This model is a fine-tuned version of [bert-base-uncased](https://huggingface.co/bert-base-uncased) on the ncbi_disease dataset. It achieves the following results on the evaluation set: - Loss: 0.0591 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | 0.1127 | 1.0 | 680 | 0.0593 | | 0.0442 | 2.0 | 1360 | 0.0557 | | 0.0181 | 3.0 | 2040 | 0.0591 | ### Framework versions - Transformers 4.18.0 - Pytorch 1.10.0+cu111 - Datasets 2.1.0 - Tokenizers 0.12.1
[ "NCBI DISEASE" ]
Dizex/InstaFoodBERT-NER
Dizex
token-classification
[ "transformers", "pytorch", "bert", "token-classification", "Instagram", "NER", "Named Entity Recognition", "Food Entity Extraction", "Social Media", "Informal text", "en", "dataset:Dizex/InstaFoodSet", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-11-06T19:35:59Z
2022-11-17T20:32:15+00:00
18
0
--- datasets: - Dizex/InstaFoodSet language: en license: mit tags: - Instagram - NER - Named Entity Recognition - Food Entity Extraction - Social Media - Informal text widget: - text: 'Today''s meal: Fresh olive poké bowl topped with chia seeds. Very delicious!' example_title: Food example 1 - text: Tartufo Pasta with garlic flavoured butter and olive oil, egg yolk, parmigiano and pasta water. example_title: Food example 2 --- # InstaFoodBERT-NER ## Model description **InstaFoodBERT-NER** is a fine-tuned BERT model that is ready to use for **Named Entity Recognition** of Food entities on informal text (social media like). It has been trained to recognize a single entity: food (FOOD). Specifically, this model is a *bert-base-cased* model that was fine-tuned on a dataset consisting of 400 English Instagram posts related to food. The [dataset](https://huggingface.co/datasets/Dizex/InstaFoodSet) is open source. ## Intended uses #### How to use You can use this model with Transformers *pipeline* for NER. ```python from transformers import AutoTokenizer, AutoModelForTokenClassification from transformers import pipeline tokenizer = AutoTokenizer.from_pretrained("Dizex/InstaFoodBERT-NER") model = AutoModelForTokenClassification.from_pretrained("Dizex/InstaFoodBERT-NER") pipe = pipeline("ner", model=model, tokenizer=tokenizer) example = "Today's meal: Fresh olive poké bowl topped with chia seeds. Very delicious!" ner_entity_results = pipe(example) print(ner_entity_results) ```
[ "CHIA" ]
aimarsg/pharmacoNER
aimarsg
token-classification
[ "transformers", "pytorch", "tensorboard", "roberta", "token-classification", "generated_from_trainer", "dataset:pharmaconer", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-02-08T20:41:00Z
2023-02-08T20:53:19+00:00
18
0
--- datasets: - pharmaconer license: apache-2.0 metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer model-index: - name: pharmacoNER results: - task: type: token-classification name: Token Classification dataset: name: pharmaconer type: pharmaconer config: PharmaCoNER split: validation args: PharmaCoNER metrics: - type: precision value: 0.9057634526085769 name: Precision - type: recall value: 0.9025585193249864 name: Recall - type: f1 value: 0.9041581458759373 name: F1 - type: accuracy value: 0.9948434782608696 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pharmacoNER This model is a fine-tuned version of [PlanTL-GOB-ES/bsc-bio-ehr-es](https://huggingface.co/PlanTL-GOB-ES/bsc-bio-ehr-es) on the pharmaconer dataset. It achieves the following results on the evaluation set: - Loss: 0.0251 - Precision: 0.9058 - Recall: 0.9026 - F1: 0.9042 - Accuracy: 0.9948 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.0272 | 1.0 | 1017 | 0.0288 | 0.8047 | 0.8503 | 0.8269 | 0.9914 | | 0.0114 | 2.0 | 2034 | 0.0240 | 0.8950 | 0.8998 | 0.8974 | 0.9945 | | 0.006 | 3.0 | 3051 | 0.0251 | 0.9058 | 0.9026 | 0.9042 | 0.9948 | ### Framework versions - Transformers 4.26.0 - Pytorch 1.13.1+cu116 - Datasets 2.9.0 - Tokenizers 0.13.2
[ "PHARMACONER" ]
Pedrambbk/Opinerium
Pedrambbk
text2text-generation
[ "transformers", "pytorch", "t5", "text2text-generation", "en", "license:afl-3.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-02-14T09:31:36Z
2024-02-05T13:36:07+00:00
18
0
--- language: - en library_name: transformers license: afl-3.0 widget: - text: 'Generate an opinion-based question from the text:' --- # Opinerium: Fine-Tuned flan-T5 for Generating Subjective Inquiries ## Abstract This model is the culmination of extensive research into generating subjective inquiries to enhance public interaction with media content. Our approach diverges from the norm by shifting focus from objective to subjective question generation, aiming to elicit personal preferences and opinions based on given texts. Employing fine-tuning techniques on flan-T5 and GPT3 models for Seq2Seq generation, this model has undergone rigorous evaluation against a custom dataset of 40,000 news articles, supplemented with human-generated questions. The comparative analysis highlights Opinerium's superiority, especially when measured against a suite of lexical and semantic metrics. ## Introduction Opinerium is a groundbreaking model fine-tuned from the flan-T5-large architecture, designed to generate poll or opinion-based questions from textual content. This innovation aims to foster public engagement by inviting personal perspectives on various topics, primarily focusing on news media posts. Unlike traditional models that target factual questions with definitive answers, Opinerium delves into the realm of subjective questioning, enabling a deeper interaction with trending media topics. ## Model Training Opinerium was meticulously fine-tuned using the flan-T5 variants from the Hugging Face platform, specifically tailored for the task of generating subjective questions. The fine-tuning process was meticulously crafted to address the unique challenges of subjective question generation, such as capturing nuances in tone, understanding context deeply, and generating engaging, open-ended questions that prompt personal reflection. ### Training Details The training was conducted on a Tesla P100-16GB GPU, utilizing the Transformer library in PyTorch. We adopted a comprehensive approach to hyperparameter optimization, exploring various configurations to find the optimal balance between model performance and computational efficiency. Key training parameters included: - Batch size: 32 for training, ensuring robust gradient estimates while maintaining a manageable computational load. - Gradient accumulation: Set to 64, this technique allowed us to effectively simulate a larger batch size, enhancing the stability and quality of the model updates. - Learning rate: Initially set to 3e-4, with careful adjustments based on performance metrics to ensure steady and effective learning. - Optimizer: AdaFactor was chosen for its efficiency and effectiveness in handling sparse data and adapting learning rates dynamically. ## Dataset The training dataset comprised 40,000 news articles spanning a wide array of topics, ensuring the model's exposure to diverse content and question formats. Each article was paired with binary subjective questions, providing a rich ground for learning how to formulate inquiries that elicit personal opinions. The multilingual nature of the original articles added an extra layer of complexity, which was mitigated by translating all content into English to leverage the extensive training data available for English-centric models. ## Usage To utilize Opinerium for generating subjective inquiries, simply prepend your input text with the prompt "generate an opinion-based question from the text:" This signals the model to analyze the content and craft a question designed to engage users in sharing their perspectives. ## Example ```plaintext Title: Standard charging socket for all devices until 2024 Context: Other electrical devices will have to have a standard charging socket in the EU from mid-2024. Negotiators from the EU states and the European Parliament agreed on USB-C as the standard charging socket to prevent thousands of tons of electrical waste from precisely charging sockets. Poll: Do you think a universal charging socket helps sustainability? ``` ## Conclusion Opinerium stands at the forefront of subjective question generation, offering a novel tool for engaging with content across multiple domains. By fostering the creation of opinion-based inquiries, it encourages more interactive and thought-provoking discussions, contributing to a richer public discourse. --- ## Hugging Face Web UI Usage For optimal results when using Opinerium on the Hugging Face web UI, prefix your text with: ``` generate an opinion-based question from the text: ``` This prompt is essential for directing the model to generate subjective questions from your input. ---
[ "CRAFT" ]
system-technologies/biogpt
system-technologies
text-generation
[ "transformers", "pytorch", "biogpt", "text-generation", "en", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-22T16:15:05Z
2023-03-22T17:10:41+00:00
18
0
--- language: en license: mit widget: - text: COVID-19 is duplicated_from: microsoft/biogpt --- ## BioGPT Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98%, 38.42% and 40.76% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms. You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> from transformers import BioGptTokenizer, BioGptForCausalLM >>> model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") >>> tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") >>> generator = pipeline('text-generation', model=model, tokenizer=tokenizer) >>> set_seed(42) >>> generator("COVID-19 is", max_length=20, num_return_sequences=5, do_sample=True) [{'generated_text': 'COVID-19 is a disease that spreads worldwide and is currently found in a growing proportion of the population'}, {'generated_text': 'COVID-19 is one of the largest viral epidemics in the world.'}, {'generated_text': 'COVID-19 is a common condition affecting an estimated 1.1 million people in the United States alone.'}, {'generated_text': 'COVID-19 is a pandemic, the incidence has been increased in a manner similar to that in other'}, {'generated_text': 'COVID-19 is transmitted via droplets, air-borne, or airborne transmission.'}] ``` Here is how to use this model to get the features of a given text in PyTorch: ```python from transformers import BioGptTokenizer, BioGptForCausalLM tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") text = "Replace me by any text you'd like." encoded_input = tokenizer(text, return_tensors='pt') output = model(**encoded_input) ``` Beam-search decoding: ```python import torch from transformers import BioGptTokenizer, BioGptForCausalLM, set_seed tokenizer = BioGptTokenizer.from_pretrained("microsoft/biogpt") model = BioGptForCausalLM.from_pretrained("microsoft/biogpt") sentence = "COVID-19 is" inputs = tokenizer(sentence, return_tensors="pt") set_seed(42) with torch.no_grad(): beam_output = model.generate(**inputs, min_length=100, max_length=1024, num_beams=5, early_stopping=True ) tokenizer.decode(beam_output[0], skip_special_tokens=True) 'COVID-19 is a global pandemic caused by severe acute respiratory syndrome coronavirus 2 (SARS-CoV-2), the causative agent of coronavirus disease 2019 (COVID-19), which has spread to more than 200 countries and territories, including the United States (US), Canada, Australia, New Zealand, the United Kingdom (UK), and the United States of America (USA), as of March 11, 2020, with more than 800,000 confirmed cases and more than 800,000 deaths.' ``` ## Citation If you find BioGPT useful in your research, please cite the following paper: ```latex @article{10.1093/bib/bbac409, author = {Luo, Renqian and Sun, Liai and Xia, Yingce and Qin, Tao and Zhang, Sheng and Poon, Hoifung and Liu, Tie-Yan}, title = "{BioGPT: generative pre-trained transformer for biomedical text generation and mining}", journal = {Briefings in Bioinformatics}, volume = {23}, number = {6}, year = {2022}, month = {09}, abstract = "{Pre-trained language models have attracted increasing attention in the biomedical domain, inspired by their great success in the general natural language domain. Among the two main branches of pre-trained language models in the general language domain, i.e. BERT (and its variants) and GPT (and its variants), the first one has been extensively studied in the biomedical domain, such as BioBERT and PubMedBERT. While they have achieved great success on a variety of discriminative downstream biomedical tasks, the lack of generation ability constrains their application scope. In this paper, we propose BioGPT, a domain-specific generative Transformer language model pre-trained on large-scale biomedical literature. We evaluate BioGPT on six biomedical natural language processing tasks and demonstrate that our model outperforms previous models on most tasks. Especially, we get 44.98\%, 38.42\% and 40.76\% F1 score on BC5CDR, KD-DTI and DDI end-to-end relation extraction tasks, respectively, and 78.2\% accuracy on PubMedQA, creating a new record. Our case study on text generation further demonstrates the advantage of BioGPT on biomedical literature to generate fluent descriptions for biomedical terms.}", issn = {1477-4054}, doi = {10.1093/bib/bbac409}, url = {https://doi.org/10.1093/bib/bbac409}, note = {bbac409}, eprint = {https://academic.oup.com/bib/article-pdf/23/6/bbac409/47144271/bbac409.pdf}, } ```
[ "BC5CDR", "PUBMEDQA" ]
hmahmoud/flan-t5-large-lfqa-fr-v3
hmahmoud
text2text-generation
[ "transformers", "pytorch", "tensorboard", "safetensors", "t5", "text2text-generation", "flan-t5", "qa", "lfqa", "information retrieval", "fr", "dataset:vblagoje/lfqa", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-04-26T14:00:38Z
2023-06-20T12:50:53+00:00
18
0
--- datasets: - vblagoje/lfqa language: - fr license: apache-2.0 metrics: - rouge tags: - flan-t5 - qa - lfqa - information retrieval widget: - text: 'Please answer to the following question : Comment fonctionne un modèle de langue ? Que signifi un modèle de question réponse générative ? context : Les modèles de langage basés sur le deep learning sont des modèles dapprentissage automatique qui utilisent des techniques dapprentissage profond pour effectuer des tâches de langage.En traitement automatique des langues, un modèle de langage est un modèle statistique qui modélise la distribution de séquences de mots, plus généralement de séquences de symboles discrets (lettres, phonèmes, mots), dans une langue naturelle. Un modèle de langage peut par exemple prédire le mot suivant une séquence de mots1.BERT, GPT-3 et Bloom sont des modèles de langage.Les modèles de Question Réponse (QA) permette d''automatiser la réponse aux questions fréquemment posées en utilisant une base de connaissances (documents) comme contexte. Les réponses aux questions des clients peuvent être tirées de ces documents.Il existe différentes variantes de modèle de question réponse : question réponse extractive : le modèle extrait la réponse d''un contexte. Le contexte ici peut être un texte fourni, un tableau ou même du HTML ! Ceci est généralement résolu avec des modèles de type BERT. question réponse générative ouverte : le modèle génère du texte libre directement en fonction du contexte. question réponse générative fermée : dans ce cas, aucun contexte n''est fourni. La réponse est entièrement générée par un modèle.Les modèles de langage basés sur le deep learning sont des modèles dapprentissage automatique qui utilisent des techniques dapprentissage profond pour effectuer des tâches de langage.En traitement automatique des langues, un modèle de langage est un modèle statistique qui modélise la distribution de séquences de mots, plus généralement de séquences de symboles discrets (lettres, phonèmes, mots), dans une langue naturelle. Un modèle de langage peut par exemple prédire le mot suivant une séquence de mots.Les modèles de Question Réponse (QA) permette d''automatiser la réponse aux questions fréquemment posées en utilisant une base de connaissances (documents) comme contexte. Les réponses aux questions des clients peuvent être tirées de ces documents.Il existe différentes variantes de modèle de question réponse : question réponse extractive : le modèle extrait la réponse d''un contexte. Le contexte ici peut être un texte fourni, un tableau ou même du HTML ! Ceci est généralement résolu avec des modèles de type BERT. question réponse générative ouverte : le modèle génère du texte libre directement en fonction du contexte. question réponse générative fermée : dans ce cas, aucun contexte n''est fourni. La réponse est entièrement générée par un modèle. ' example_title: Les modèles de langage inference: parameters: max_length: 512 num_return_sequences: 1 min_length: 80 no_repeat_ngram_size: 4 do_sample: false num_beams: 8 early_stopping: true model-index: - name: flan-t5-large-lfqa-fr-v3 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # flan-t5-large-lfqa-fr This model is a fine-tuned version of [google/flan-t5-large](https://huggingface.co/google/flan-t5-large) on some examples (50000) of the vblagoje/lfqa dataset translated automatically to French using Helsinki-NLP/opus-mt-en-fr model. Therefore the main task this model can perform is abstractive question answering given certain context paragraphs which can be used to answer that question. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - gradient_accumulation_steps: 16 - total_train_batch_size: 16 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 1.0 ### Training results ### Usage ```python from transformers import AutoTokenizer, AutoModel, AutoModelForSeq2SeqLM model_name = "hmahmoud/flan-t5-large-lfqa-fr-v3" tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForSeq2SeqLM.from_pretrained(model_name) query = "Comment fonctionne un modèle de langue ? Que signifi un modèle de question réponse générative ?" document = "Les modèles de langage basés sur le deep learning sont des modèles dapprentissage automatique qui utilisent des techniques dapprentissage profond pour effectuer des tâches de langage.En traitement automatique des langues, un modèle de langage est un modèle statistique qui modélise la distribution de séquences de mots, plus généralement de séquences de symboles discrets (lettres, phonèmes, mots), dans une langue naturelle. Un modèle de langage peut par exemple prédire le mot suivant une séquence de mots1.BERT, GPT-3 et Bloom sont des modèles de langage.Les modèles de Question Réponse (QA) permette d'automatiser la réponse aux questions fréquemment posées en utilisant une base de connaissances (documents) comme contexte. Les réponses aux questions des clients peuvent être tirées de ces documents.Il existe différentes variantes de modèle de question réponse : question réponse extractive : le modèle extrait la réponse d'un contexte. Le contexte ici peut être un texte fourni, un tableau ou même du HTML ! Ceci est généralement résolu avec des modèles de type BERT. question réponse générative ouverte : le modèle génère du texte libre directement en fonction du contexte. question réponse générative fermée : dans ce cas, aucun contexte n'est fourni. La réponse est entièrement générée par un modèle.Les modèles de langage basés sur le deep learning sont des modèles dapprentissage automatique qui utilisent des techniques dapprentissage profond pour effectuer des tâches de langage.En traitement automatique des langues, un modèle de langage est un modèle statistique qui modélise la distribution de séquences de mots, plus généralement de séquences de symboles discrets (lettres, phonèmes, mots), dans une langue naturelle. Un modèle de langage peut par exemple prédire le mot suivant une séquence de mots.Les modèles de Question Réponse (QA) permette d'automatiser la réponse aux questions fréquemment posées en utilisant une base de connaissances (documents) comme contexte. Les réponses aux questions des clients peuvent être tirées de ces documents.Il existe différentes variantes de modèle de question réponse : question réponse extractive : le modèle extrait la réponse d'un contexte. Le contexte ici peut être un texte fourni, un tableau ou même du HTML ! Ceci est généralement résolu avec des modèles de type BERT. question réponse générative ouverte : le modèle génère du texte libre directement en fonction du contexte. question réponse générative fermée : dans ce cas, aucun contexte n'est fourni. La réponse est entièrement générée par un modèle." query_and_docs = "Please answer to the following question : {} context: {}".format(query, document) model_input = tokenizer(query_and_docs, truncation=True, padding=True, return_tensors="pt") generated_answers_encoded = model.generate(input_ids=model_input["input_ids"].to(device), attention_mask=model_input["attention_mask"].to(device), min_length=80, max_length=512, do_sample=False, early_stopping=True, num_beams=8, temperature=None, top_k=None, top_p=None, eos_token_id=tokenizer.eos_token_id, no_repeat_ngram_size=4, num_return_sequences=1) tokenizer.batch_decode(generated_answers_encoded, skip_special_tokens=True,clean_up_tokenization_spaces=True) ```
[ "CAS" ]
nimeeshachan/mlma_nchan19_biogpt_gpt2
nimeeshachan
token-classification
[ "transformers", "pytorch", "tensorboard", "gpt2", "token-classification", "generated_from_trainer", "dataset:ncbi_disease", "license:mit", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-05-15T03:10:59Z
2023-05-15T16:05:48+00:00
18
0
--- datasets: - ncbi_disease license: mit metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer model-index: - name: mlma_nchan19_biogpt_gpt2 results: - task: type: token-classification name: Token Classification dataset: name: ncbi_disease type: ncbi_disease config: ncbi_disease split: validation args: ncbi_disease metrics: - type: precision value: 0.44350580781414994 name: Precision - type: recall value: 0.5336721728081322 name: Recall - type: f1 value: 0.4844290657439446 name: F1 - type: accuracy value: 0.956318798864211 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # mlma_nchan19_biogpt_gpt2 This model is a fine-tuned version of [microsoft/biogpt](https://huggingface.co/microsoft/biogpt) on the ncbi_disease dataset. It achieves the following results on the evaluation set: - Loss: 0.1545 - Precision: 0.4435 - Recall: 0.5337 - F1: 0.4844 - Accuracy: 0.9563 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.3269 | 1.0 | 679 | 0.1626 | 0.3330 | 0.3850 | 0.3571 | 0.9469 | | 0.1703 | 2.0 | 1358 | 0.1466 | 0.3958 | 0.5070 | 0.4446 | 0.9544 | | 0.0988 | 3.0 | 2037 | 0.1545 | 0.4435 | 0.5337 | 0.4844 | 0.9563 | ### Framework versions - Transformers 4.29.1 - Pytorch 2.0.0+cu118 - Datasets 2.12.0 - Tokenizers 0.13.3
[ "NCBI DISEASE" ]
Monero/Guanaco-13b-Merged-4bit
Monero
text-generation
[ "transformers", "llama", "text-generation", "dataset:timdettmers/guanaco-13b", "dataset:JosephusCheung/GuanacoDataset", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-05-22T20:08:06Z
2023-05-22T22:20:39+00:00
18
1
--- datasets: - timdettmers/guanaco-13b - JosephusCheung/GuanacoDataset --- <center><h1><b>Guanaco</b> - Generative Universal Assistant for Natural-language Adaptive Context-aware Omnilingual outputs</h1></center> <p><strong><font size="5">Information</font></strong></p> Guanaco 13b LoRa from timdettmers/guanaco-13b that was merged to Llama 13b and is compatible with transformers 4.28.0 <br>This was made using https://huggingface.co/timdettmers/guanaco-13b and https://huggingface.co/datasets/JosephusCheung/GuanacoDataset The details of the guanaco dataset and parameters of the LoRa that Tim Dettmers' released is not available at this time. <html> <head> <style> table { border:1px solid #b3adad; border-collapse:collapse; padding:5px; } table th { border:1px solid #b3adad; padding:5px; background: #f0f0f0; color: #313030; } table td { border:1px solid #b3adad; text-align:center; padding:5px; background: #ffffff; color: #313030; } </style> </head> <body> <table> <thead> <tr> <th>Model:</th> <th>Wikitext2</th> <th>Ptb-New</th> <th>C4-New</th> </tr> </thead> <tbody> <tr> <td>Guanaco 13b 4bit TS 128g</td> <td>5.960791110992432</td> <td>10.849588394165039</td> <td>7.968928813934326</td> </tr> </tbody> </table> </body> </html> More information can be found here and below: https://huggingface.co/datasets/JosephusCheung/GuanacoDataset Below is a description of Guanaco from https://guanaco-model.github.io/: Guanaco is an advanced instruction-following language model built on Meta's LLaMA 13B model. Expanding upon the initial 52K dataset from the Alpaca model, an additional 534,530 entries have been incorporated, covering English, Simplified Chinese, Traditional Chinese (Taiwan), Traditional Chinese (Hong Kong), Japanese, Deutsch, and various linguistic and grammatical tasks. This wealth of data enables Guanaco to perform exceptionally well in multilingual environments. In an effort to foster openness and replicability in research, we have made the [Guanaco Dataset](https://huggingface.co/datasets/JosephusCheung/GuanacoDataset) publicly accessible and released the [model weights](https://huggingface.co/JosephusCheung/Guanaco). By providing these resources, we aim to inspire more researchers to pursue related research and collectively advance the development of instruction-following language models. When utilizing the Guanaco model, please bear in mind the following points: * The Guanaco model has not been filtered for harmful, biased, or explicit content. As a result, outputs that do not adhere to ethical norms may be generated during use. Please exercise caution when using the model in research or practical applications. 1\. Improved context and prompt role support: --------------------------------------------- The new format is designed to be similar to ChatGPT, allowing for better integration with the Alpaca format and enhancing the overall user experience. Instruction is utilized as a few-shot context to support diverse inputs and responses, making it easier for the model to understand and provide accurate responses to user queries. The format is as follows: ### Instruction: User: History User Input Assistant: History Assistant Answer ### Input: System: Knowledge User: New User Input ### Response: New Assistant Answer This structured format allows for easier tracking of the conversation history and maintaining context throughout a multi-turn dialogue. 2\. Role-playing support: ------------------------- Guanaco now offers advanced role-playing support, similar to Character.AI, in English, Simplified Chinese, Traditional Chinese, Japanese, and Deutsch, making it more versatile for users from different linguistic backgrounds. Users can instruct the model to assume specific roles, historical figures, or fictional characters, as well as personalities based on their input. This allows for more engaging and immersive conversations. The model can use various sources of information to provide knowledge and context for the character's background and behavior, such as encyclopedic entries, first-person narrations, or a list of personality traits. The model will consistently output responses in the format "Character Name: Reply" to maintain the chosen role throughout the conversation, enhancing the user's experience. 3\. Rejection of answers and avoidance of erroneous responses: -------------------------------------------------------------- The model has been updated to handle situations where it lacks sufficient knowledge or is unable to provide a valid response more effectively. Reserved keywords have been introduced to indicate different scenarios and provide clearer communication with the user: * NO IDEA: Indicates that the model lacks the necessary knowledge to provide an accurate answer, and will explain this to the user, encouraging them to seek alternative sources. * FORBIDDEN: Indicates that the model refuses to answer due to specific reasons (e.g., legal, ethical, or safety concerns), which will be inferred based on the context of the query. * SFW: Indicates that the model refuses to answer a question because it has been filtered for NSFW content, ensuring a safer and more appropriate user experience. 4\. Continuation of responses for ongoing topics: ------------------------------------------------- The Guanaco model can now continue answering questions or discussing topics upon the user's request, making it more adaptable and better suited for extended conversations. The contextual structure consisting of System, Assistant, and User roles allows the model to engage in multi-turn dialogues, maintain context-aware conversations, and provide more coherent responses. The model can now accommodate role specification and character settings, providing a more immersive and tailored conversational experience based on the user's preferences. It is important to remember that Guanaco is a 7B-parameter model, and any knowledge-based content should be considered potentially inaccurate. We strongly recommend providing verifiable sources, such as Wikipedia, for knowledge-based answers. In the absence of sources, it is crucial to inform users of this limitation to prevent the dissemination of false information and to maintain transparency. 5\. Multimodal Visual Question Answering (VQA) Support: ------------------------------------------------------- Guanaco expands its capabilities into the realm of multimodal interactions, now offering support for Visual Question Answering (VQA). The model achieves this by integrating data from the blip2-flan-t5-xxl for multilingual VQA tasks, marking a significant milestone in the development of multimodal chatbots. This new feature allows the model to interpret and respond to queries that involve both text and visual inputs, providing a richer, more interactive, and comprehensive user experience. Users can now ask questions about an image, and the model will analyze the visual content in conjunction with the textual query to provide a response. A noteworthy addition is the [Guanaco VQA Dataset](https://huggingface.co/datasets/JosephusCheung/GuanacoVQADataset), publicly accessible now. Now as a multimodal chatbot, Guanaco can bridge the gap between visual and linguistic understanding, making it an incredibly versatile tool for a wide array of applications. However, as always, we encourage responsible and ethical use of this model. Please note that while Guanaco strives to provide accurate and helpful responses, it is still crucial to cross-verify the information from reliable sources for knowledge-based queries.
[ "BEAR" ]
AntoineBlanot/roberta-nli
AntoineBlanot
zero-shot-classification
[ "transformers", "pytorch", "roberta", "feature-extraction", "zero-shot-classification", "en", "dataset:multi_nli", "dataset:snli", "dataset:scitail", "endpoints_compatible", "region:us" ]
2023-05-25T02:29:17Z
2023-06-05T08:41:52+00:00
18
0
--- datasets: - multi_nli - snli - scitail language: - en metrics: - accuracy - f1 pipeline_tag: zero-shot-classification --- # RoBERTa NLI (Natural Language Inference) This model is a fine-tuned model of [roberta-large](https://huggingface.co/roberta-large) after being trained on a **mixture of NLI datasets**. This model can classify a pair of sentence (a <u>premise</u> and a <u>claim</u>) into 3 classes: - 'entailment': the claim can logically be inferred from the premise - 'contradiction': the claim contradicts the premise - 'neutral': the premise is unrelated or do not provide sufficient information to validate the claim This model can also be used for **zero-shot classification tasks** ! Please take a look at this [repo](https://github.com/AntoineBlanot/zero-nlp) for more information on zero-shot classification tasks. # Usage This model has been trained in an efficient way and thus cannot be load directly from HuggingFace's hub. To use that model, please follow instructions on this [repo](https://github.com/AntoineBlanot/efficient-llm). For **zero-shot classification** tasks, please take a look at this [repo](https://github.com/AntoineBlanot/zero-nlp). # Data used for training - multi_nli - snli - scitail # Evaluation results | Data | Accuracy | |:---:|:---------:| | MNLI (val. m) | 0.894 | | MNLI (val. mm) | 0.895 | | SNLI (val.) | 0.920 | | SciTail (val.) | 0.934 |
[ "SCITAIL" ]
openaccess-ai-collective/pythia-6.9b-deduped-8k
openaccess-ai-collective
text-generation
[ "transformers", "pytorch", "gpt_neox", "text-generation", "causal-lm", "pythia", "en", "dataset:EleutherAI/the_pile_deduplicated", "arxiv:2101.00027", "arxiv:2201.07311", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-06-01T04:29:40Z
2023-06-01T07:23:38+00:00
18
1
--- datasets: - EleutherAI/the_pile_deduplicated language: - en license: apache-2.0 tags: - pytorch - causal-lm - pythia --- [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) The *Pythia Scaling Suite* is a collection of models developed to facilitate interpretability research. It contains two sets of eight models of sizes 70M, 160M, 410M, 1B, 1.4B, 2.8B, 6.9B, and 12B. For each size, there are two models: one trained on the Pile, and one trained on the Pile after the dataset has been globally deduplicated. All 8 model sizes are trained on the exact same data, in the exact same order. We also provide 154 intermediate checkpoints per model, hosted on Hugging Face as branches. The Pythia model suite was designed to promote scientific research on large language models, especially interpretability research. Despite not centering downstream performance as a design goal, we find the models <a href="#evaluations">match or exceed</a> the performance of similar and same-sized models, such as those in the OPT and GPT-Neo suites. <details> <summary style="font-weight:600">Details on previous early release and naming convention.</summary> Previously, we released an early version of the Pythia suite to the public. However, we decided to retrain the model suite to address a few hyperparameter discrepancies. This model card <a href="#changelog">lists the changes</a>; see appendix B in the Pythia paper for further discussion. We found no difference in benchmark performance between the two Pythia versions. The old models are [still available](https://huggingface.co/models?other=pythia_v0), but we suggest the retrained suite if you are just starting to use Pythia.<br> **This is the current release.** Please note that all models in the *Pythia* suite were renamed in January 2023. For clarity, a <a href="#naming-convention-and-parameter-count">table comparing the old and new names</a> is provided in this model card, together with exact parameter counts. </details> <br> # Pythia-6.9B-deduped - 8K Context Window ## Model Details - Developed by: [EleutherAI](http://eleuther.ai) - Model type: Transformer-based Language Model - Language: English - Learn more: [Pythia's GitHub repository](https://github.com/EleutherAI/pythia) for training procedure, config files, and details on how to use. - Library: [GPT-NeoX](https://github.com/EleutherAI/gpt-neox) - License: Apache 2.0 - Contact: to ask questions about this model, join the [EleutherAI Discord](https://discord.gg/zBGx3azzUn), and post them in `#release-discussion`. Please read the existing *Pythia* documentation before asking about it in the EleutherAI Discord. For general correspondence: [contact@eleuther. ai](mailto:[email protected]). <figure> | Pythia model | Non-Embedding Params | Layers | Model Dim | Heads | Batch Size | Learning Rate | Equivalent Models | | -----------: | -------------------: | :----: | :-------: | :---: | :--------: | :-------------------: | :--------------------: | | 70M | 18,915,328 | 6 | 512 | 8 | 2M | 1.0 x 10<sup>-3</sup> | — | | 160M | 85,056,000 | 12 | 768 | 12 | 4M | 6.0 x 10<sup>-4</sup> | GPT-Neo 125M, OPT-125M | | 410M | 302,311,424 | 24 | 1024 | 16 | 4M | 3.0 x 10<sup>-4</sup> | OPT-350M | | 1.0B | 805,736,448 | 16 | 2048 | 8 | 2M | 3.0 x 10<sup>-4</sup> | — | | 1.4B | 1,208,602,624 | 24 | 2048 | 16 | 4M | 2.0 x 10<sup>-4</sup> | GPT-Neo 1.3B, OPT-1.3B | | 2.8B | 2,517,652,480 | 32 | 2560 | 32 | 2M | 1.6 x 10<sup>-4</sup> | GPT-Neo 2.7B, OPT-2.7B | | 6.9B | 6,444,163,072 | 32 | 4096 | 32 | 2M | 1.2 x 10<sup>-4</sup> | OPT-6.7B | | 12B | 11,327,027,200 | 36 | 5120 | 40 | 2M | 1.2 x 10<sup>-4</sup> | — | <figcaption>Engineering details for the <i>Pythia Suite</i>. Deduped and non-deduped models of a given size have the same hyperparameters. “Equivalent” models have <b>exactly</b> the same architecture, and the same number of non-embedding parameters.</figcaption> </figure> ## Uses and Limitations ### Intended Use The primary intended use of Pythia is research on the behavior, functionality, and limitations of large language models. This suite is intended to provide a controlled setting for performing scientific experiments. We also provide 154 checkpoints per model: initial `step0`, 10 log-spaced checkpoints `step{1,2,4...512}`, and 143 evenly-spaced checkpoints from `step1000` to `step143000`. These checkpoints are hosted on Hugging Face as branches. Note that branch `143000` corresponds exactly to the model checkpoint on the `main` branch of each model. You may also further fine-tune and adapt Pythia-6.9B-deduped for deployment, as long as your use is in accordance with the Apache 2.0 license. Pythia models work with the Hugging Face [Transformers Library](https://huggingface.co/docs/transformers/index). If you decide to use pre-trained Pythia-6.9B-deduped as a basis for your fine-tuned model, please conduct your own risk and bias assessment. ### Out-of-scope use The Pythia Suite is **not** intended for deployment. It is not a in itself a product and cannot be used for human-facing interactions. For example, the model may generate harmful or offensive text. Please evaluate the risks associated with your particular use case. Pythia models are English-language only, and are not suitable for translation or generating text in other languages. Pythia-6.9B-deduped has not been fine-tuned for downstream contexts in which language models are commonly deployed, such as writing genre prose, or commercial chatbots. This means Pythia-6.9B-deduped will **not** respond to a given prompt the way a product like ChatGPT does. This is because, unlike this model, ChatGPT was fine-tuned using methods such as Reinforcement Learning from Human Feedback (RLHF) to better “follow” human instructions. ### Limitations and biases The core functionality of a large language model is to take a string of text and predict the next token. The token used by the model need not produce the most “accurate” text. Never rely on Pythia-6.9B-deduped to produce factually accurate output. This model was trained on [the Pile](https://pile.eleuther.ai/), a dataset known to contain profanity and texts that are lewd or otherwise offensive. See [Section 6 of the Pile paper](https://arxiv.org/abs/2101.00027) for a discussion of documented biases with regards to gender, religion, and race. Pythia-6.9B-deduped may produce socially unacceptable or undesirable text, *even if* the prompt itself does not include anything explicitly offensive. If you plan on using text generated through, for example, the Hosted Inference API, we recommend having a human curate the outputs of this language model before presenting it to other people. Please inform your audience that the text was generated by Pythia-6.9B-deduped. ### Quickstart Pythia models can be loaded and used via the following code, demonstrated here for the third `pythia-70m-deduped` checkpoint: ```python from transformers import GPTNeoXForCausalLM, AutoTokenizer model = GPTNeoXForCausalLM.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) tokenizer = AutoTokenizer.from_pretrained( "EleutherAI/pythia-70m-deduped", revision="step3000", cache_dir="./pythia-70m-deduped/step3000", ) inputs = tokenizer("Hello, I am", return_tensors="pt") tokens = model.generate(**inputs) tokenizer.decode(tokens[0]) ``` Revision/branch `step143000` corresponds exactly to the model checkpoint on the `main` branch of each model.<br> For more information on how to use all Pythia models, see [documentation on GitHub](https://github.com/EleutherAI/pythia). ## Training ### Training data Pythia-6.9B-deduped was trained on the Pile **after the dataset has been globally deduplicated**.<br> [The Pile](https://pile.eleuther.ai/) is a 825GiB general-purpose dataset in English. It was created by EleutherAI specifically for training large language models. It contains texts from 22 diverse sources, roughly broken down into five categories: academic writing (e.g. arXiv), internet (e.g. CommonCrawl), prose (e.g. Project Gutenberg), dialogue (e.g. YouTube subtitles), and miscellaneous (e.g. GitHub, Enron Emails). See [the Pile paper](https://arxiv.org/abs/2101.00027) for a breakdown of all data sources, methodology, and a discussion of ethical implications. Consult [the datasheet](https://arxiv.org/abs/2201.07311) for more detailed documentation about the Pile and its component datasets. The Pile can be downloaded from the [official website](https://pile.eleuther.ai/), or from a [community mirror](https://the-eye.eu/public/AI/pile/). ### Training procedure All models were trained on the exact same data, in the exact same order. Each model saw 299,892,736,000 tokens during training, and 143 checkpoints for each model are saved every 2,097,152,000 tokens, spaced evenly throughout training, from `step1000` to `step143000` (which is the same as `main`). In addition, we also provide frequent early checkpoints: `step0` and `step{1,2,4...512}`. This corresponds to training for just under 1 epoch on the Pile for non-deduplicated models, and about 1.5 epochs on the deduplicated Pile. All *Pythia* models trained for 143000 steps at a batch size of 2M (2,097,152 tokens).<br> See [GitHub](https://github.com/EleutherAI/pythia) for more details on training procedure, including [how to reproduce it](https://github.com/EleutherAI/pythia/blob/main/README.md#reproducing-training).<br> Pythia uses the same tokenizer as [GPT-NeoX- 20B](https://huggingface.co/EleutherAI/gpt-neox-20b). ## Evaluations All 16 *Pythia* models were evaluated using the [LM Evaluation Harness](https://github.com/EleutherAI/lm-evaluation-harness). You can access the results by model and step at `results/json/*` in the [GitHub repository](https://github.com/EleutherAI/pythia/tree/main/results/json/).<br> Expand the sections below to see plots of evaluation results for all Pythia and Pythia-deduped models compared with OPT and BLOOM. <details> <summary>LAMBADA – OpenAI</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/lambada_openai_v1.png" style="width:auto"/> </details> <details> <summary>Physical Interaction: Question Answering (PIQA)</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/piqa_v1.png" style="width:auto"/> </details> <details> <summary>WinoGrande</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/winogrande_v1.png" style="width:auto"/> </details> <details> <summary>AI2 Reasoning Challenge—Easy Set</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/arc_easy_v1.png" style="width:auto"/> </details> <details> <summary>SciQ</summary> <img src="/EleutherAI/pythia-12b/resolve/main/eval_plots/sciq_v1.png" style="width:auto"/> </details> ## Changelog This section compares differences between previously released [Pythia v0](https://huggingface.co/models?other=pythia_v0) and the current models. See Appendix B of the Pythia paper for further discussion of these changes and the motivation behind them. We found that retraining Pythia had no impact on benchmark performance. - All model sizes are now trained with uniform batch size of 2M tokens. Previously, the models of size 160M, 410M, and 1.4B parameters were trained with batch sizes of 4M tokens. - We added checkpoints at initialization (step 0) and steps {1,2,4,8,16,32,64, 128,256,512} in addition to every 1000 training steps. - Flash Attention was used in the new retrained suite. - We remedied a minor inconsistency that existed in the original suite: all models of size 2.8B parameters or smaller had a learning rate (LR) schedule which decayed to a minimum LR of 10% the starting LR rate, but the 6.9B and 12B models all used an LR schedule which decayed to a minimum LR of 0. In the redone training runs, we rectified this inconsistency: all models now were trained with LR decaying to a minimum of 0.1× their maximum LR. ### Naming convention and parameter count *Pythia* models were renamed in January 2023. It is possible that the old naming convention still persists in some documentation by accident. The current naming convention (70M, 160M, etc.) is based on total parameter count. <figure style="width:32em"> | current Pythia suffix | old suffix | total params | non-embedding params | | --------------------: | ---------: | -------------: | -------------------: | | 70M | 19M | 70,426,624 | 18,915,328 | | 160M | 125M | 162,322,944 | 85,056,000 | | 410M | 350M | 405,334,016 | 302,311,424 | | 1B | 800M | 1,011,781,632 | 805,736,448 | | 1.4B | 1.3B | 1,414,647,808 | 1,208,602,624 | | 2.8B | 2.7B | 2,775,208,960 | 2,517,652,480 | | 6.9B | 6.7B | 6,857,302,016 | 6,444,163,072 | | 12B | 13B | 11,846,072,320 | 11,327,027,200 | </figure>
[ "SCIQ" ]
Tune-A-Video-library/df-cpt-mo-di-bear-guitar
Tune-A-Video-library
text-to-video
[ "diffusers", "tune-a-video", "text-to-video", "arxiv:2212.11565", "arxiv:2112.10752", "base_model:nitrosocke/mo-di-diffusion", "base_model:finetune:nitrosocke/mo-di-diffusion", "license:creativeml-openrail-m", "diffusers:TuneAVideoPipeline", "region:us" ]
2023-06-09T10:05:29Z
2023-09-24T03:58:29+00:00
18
2
--- base_model: nitrosocke/mo-di-diffusion license: creativeml-openrail-m tags: - tune-a-video - text-to-video - diffusers training_prompt: A bear is playing guitar. inference: false --- # Tune-A-Video - Modern Disney ## Model Description This is a diffusers compatible checkpoint. When used with DiffusionPipeline, returns an instance of TuneAVideoPipeline >df-cpt is used to indicate that its a diffusers compatible equivalent of Tune-A-Video-library/mo-di-bear-guitar . - Base model: [nitrosocke/mo-di-diffusion](https://huggingface.co/nitrosocke/mo-di-diffusion) - Training prompt: a bear is playing guitar. ![sample-train](samples/train.gif) ## Samples ![sample-500](samples/princess.gif) Test prompt: "A princess playing a guitar, modern disney style" ## Usage ### Loading with a pre-existing Text2Image checkpoint ```python import torch from diffusers import TuneAVideoPipeline, DDIMScheduler, UNet3DConditionModel from diffusers.utils import export_to_video from PIL import Image # Use any pretrained Text2Image checkpoint based on stable diffusion pretrained_model_path = "nitrosocke/mo-di-diffusion" unet = UNet3DConditionModel.from_pretrained( "Tune-A-Video-library/df-cpt-mo-di-bear-guitar", subfolder="unet", torch_dtype=torch.float16 ).to("cuda") pipe = TuneAVideoPipeline.from_pretrained(pretrained_model_path, unet=unet, torch_dtype=torch.float16).to("cuda") prompt = "A princess playing a guitar, modern disney style" generator = torch.Generator(device="cuda").manual_seed(42) video_frames = pipe(prompt, video_length=3, generator=generator, num_inference_steps=50, output_type="np").frames # Saving to gif. pil_frames = [Image.fromarray(frame) for frame in video_frames] duration = len(pil_frames) / 8 pil_frames[0].save( "animation.gif", save_all=True, append_images=pil_frames[1:], # append rest of the images duration=duration * 1000, # in milliseconds loop=0, ) # Saving to video video_path = export_to_video(video_frames) ``` ### Loading a saved Tune-A-Video checkpoint ```python import torch from diffusers import DiffusionPipeline, DDIMScheduler from diffusers.utils import export_to_video from PIL import Image pipe = DiffusionPipeline.from_pretrained( "Tune-A-Video-library/df-cpt-mo-di-bear-guitar", torch_dtype=torch.float16 ).to("cuda") prompt = "A princess playing a guitar, modern disney style" generator = torch.Generator(device="cuda").manual_seed(42) video_frames = pipe(prompt, video_length=3, generator=generator, num_inference_steps=50, output_type="np").frames # Saving to gif. pil_frames = [Image.fromarray(frame) for frame in video_frames] duration = len(pil_frames) / 8 pil_frames[0].save( "animation.gif", save_all=True, append_images=pil_frames[1:], # append rest of the images duration=duration * 1000, # in milliseconds loop=0, ) # Saving to video video_path = export_to_video(video_frames) ``` ## Related Papers: - [Tune-A-Video](https://arxiv.org/abs/2212.11565): One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation - [Stable Diffusion](https://arxiv.org/abs/2112.10752): High-Resolution Image Synthesis with Latent Diffusion Models
[ "BEAR" ]
IIC/roberta-large-bne-meddocan
IIC
token-classification
[ "transformers", "pytorch", "safetensors", "roberta", "text-classification", "biomedical", "clinical", "spanish", "roberta-large-bne", "token-classification", "es", "dataset:bigbio/meddocan", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-21T15:44:31Z
2023-07-18T07:10:39+00:00
18
0
--- datasets: - bigbio/meddocan language: es license: apache-2.0 metrics: - f1 pipeline_tag: token-classification tags: - biomedical - clinical - spanish - roberta-large-bne model-index: - name: IIC/roberta-large-bne-meddocan results: - task: type: token-classification dataset: name: meddocan type: bigbio/meddocan split: test metrics: - type: f1 value: 0.977 name: f1 --- # roberta-large-bne-meddocan This model is a finetuned version of roberta-large-bne for the meddocan dataset used in a benchmark in the paper TODO. The model has a F1 of 0.977 Please refer to the original publication for more information TODO LINK ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 3e-05 | | classifier dropout | 0.2 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtex TODO ```
[ "MEDDOCAN" ]
IIC/XLM_R_Galen-meddocan
IIC
token-classification
[ "transformers", "pytorch", "xlm-roberta", "text-classification", "biomedical", "clinical", "spanish", "XLM_R_Galen", "token-classification", "es", "dataset:bigbio/meddocan", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-21T15:51:34Z
2023-06-21T15:53:44+00:00
18
0
--- datasets: - bigbio/meddocan language: es license: mit metrics: - f1 pipeline_tag: token-classification tags: - biomedical - clinical - spanish - XLM_R_Galen model-index: - name: IIC/XLM_R_Galen-meddocan results: - task: type: token-classification dataset: name: meddocan type: bigbio/meddocan split: test metrics: - type: f1 value: 0.947 name: f1 --- # XLM_R_Galen-meddocan This model is a finetuned version of XLM_R_Galen for the meddocan dataset used in a benchmark in the paper TODO. The model has a F1 of 0.947 Please refer to the original publication for more information TODO LINK ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 4e-05 | | classifier dropout | 0.2 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtex TODO ```
[ "MEDDOCAN" ]
zwellington/bart-pubhealth-expanded-hi-grad
zwellington
text2text-generation
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "dataset:clupubhealth", "base_model:facebook/bart-large", "base_model:finetune:facebook/bart-large", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-08T15:25:28Z
2023-08-09T12:17:02+00:00
18
0
--- base_model: facebook/bart-large datasets: - clupubhealth license: apache-2.0 metrics: - rouge tags: - generated_from_trainer model-index: - name: bart-pubhealth-expanded-hi-grad results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: clupubhealth type: clupubhealth config: expanded split: test args: expanded metrics: - type: rouge value: 30.2592 name: Rouge1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-pubhealth-expanded-hi-grad This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the clupubhealth dataset. It achieves the following results on the evaluation set: - Loss: 2.0581 - Rouge1: 30.2592 - Rouge2: 11.7027 - Rougel: 24.1706 - Rougelsum: 24.3596 - Gen Len: 19.95 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 950 - total_train_batch_size: 15200 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 3.7893 | 0.49 | 2 | 2.3943 | 20.5187 | 5.4764 | 15.9378 | 16.2797 | 20.0 | | 3.4045 | 0.98 | 4 | 2.1599 | 24.0858 | 7.8207 | 19.0412 | 19.1609 | 19.88 | | 3.2488 | 1.47 | 6 | 2.1026 | 27.3466 | 9.369 | 21.1419 | 21.3136 | 19.865 | | 3.1823 | 1.96 | 8 | 2.1324 | 28.825 | 9.6007 | 22.0963 | 22.3776 | 19.82 | | 3.1263 | 2.44 | 10 | 2.1105 | 29.2694 | 10.5001 | 23.2842 | 23.5473 | 19.85 | | 3.0834 | 2.93 | 12 | 2.0837 | 28.5975 | 10.2016 | 22.048 | 22.1341 | 19.915 | | 3.0283 | 3.42 | 14 | 2.0773 | 28.5813 | 10.447 | 22.7456 | 22.8496 | 19.91 | | 3.0301 | 3.91 | 16 | 2.0730 | 30.1049 | 11.4375 | 24.083 | 24.3045 | 19.945 | | 2.9851 | 4.4 | 18 | 2.0775 | 29.2224 | 10.2722 | 22.7019 | 23.0038 | 19.95 | | 2.9769 | 4.89 | 20 | 2.0777 | 29.6981 | 10.7044 | 23.2487 | 23.5232 | 19.96 | | 2.9623 | 5.38 | 22 | 2.0711 | 29.0438 | 10.5105 | 23.1751 | 23.415 | 19.92 | | 2.9421 | 5.87 | 24 | 2.0676 | 29.096 | 10.6599 | 23.1381 | 23.3765 | 19.985 | | 2.9234 | 6.36 | 26 | 2.0646 | 29.6561 | 10.9096 | 23.2384 | 23.4265 | 19.985 | | 2.9107 | 6.85 | 28 | 2.0616 | 29.7134 | 11.1686 | 23.272 | 23.4475 | 19.985 | | 2.9077 | 7.33 | 30 | 2.0593 | 29.5055 | 11.0256 | 23.4406 | 23.6653 | 19.955 | | 2.9072 | 7.82 | 32 | 2.0585 | 30.0504 | 11.433 | 23.9176 | 24.1728 | 19.95 | | 2.8951 | 8.31 | 34 | 2.0583 | 29.9401 | 11.602 | 23.948 | 24.1323 | 19.95 | | 2.8955 | 8.8 | 36 | 2.0584 | 30.1158 | 11.4745 | 24.0509 | 24.2465 | 19.94 | | 2.8774 | 9.29 | 38 | 2.0582 | 30.0476 | 11.4465 | 23.8956 | 24.0527 | 19.945 | | 2.8851 | 9.78 | 40 | 2.0581 | 30.2592 | 11.7027 | 24.1706 | 24.3596 | 19.95 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "PUBHEALTH" ]
jncraton/gte-small-ct2-int8
jncraton
sentence-similarity
[ "sentence-transformers", "mteb", "sentence-similarity", "Sentence Transformers", "en", "arxiv:2308.03281", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-16T20:38:41Z
2023-08-16T20:48:49+00:00
18
0
--- language: - en license: mit tags: - mteb - sentence-similarity - sentence-transformers - Sentence Transformers model-index: - name: gte-small results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.22388059701493 - type: ap value: 36.09895941426988 - type: f1 value: 67.3205651539195 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 91.81894999999999 - type: ap value: 88.5240138417305 - type: f1 value: 91.80367382706962 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.032 - type: f1 value: 47.4490665674719 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 30.725 - type: map_at_10 value: 46.604 - type: map_at_100 value: 47.535 - type: map_at_1000 value: 47.538000000000004 - type: map_at_3 value: 41.833 - type: map_at_5 value: 44.61 - type: mrr_at_1 value: 31.223 - type: mrr_at_10 value: 46.794000000000004 - type: mrr_at_100 value: 47.725 - type: mrr_at_1000 value: 47.727000000000004 - type: mrr_at_3 value: 42.07 - type: mrr_at_5 value: 44.812000000000005 - type: ndcg_at_1 value: 30.725 - type: ndcg_at_10 value: 55.440999999999995 - type: ndcg_at_100 value: 59.134 - type: ndcg_at_1000 value: 59.199 - type: ndcg_at_3 value: 45.599000000000004 - type: ndcg_at_5 value: 50.637 - type: precision_at_1 value: 30.725 - type: precision_at_10 value: 8.364 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 18.848000000000003 - type: precision_at_5 value: 13.77 - type: recall_at_1 value: 30.725 - type: recall_at_10 value: 83.64200000000001 - type: recall_at_100 value: 99.14699999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 56.543 - type: recall_at_5 value: 68.848 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 47.90178078197678 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 40.25728393431922 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 61.720297062897764 - type: mrr value: 75.24139295607439 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 89.43527309184616 - type: cos_sim_spearman value: 88.17128615100206 - type: euclidean_pearson value: 87.89922623089282 - type: euclidean_spearman value: 87.96104039655451 - type: manhattan_pearson value: 87.9818290932077 - type: manhattan_spearman value: 88.00923426576885 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 84.0844155844156 - type: f1 value: 84.01485017302213 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 38.36574769259432 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 35.4857033165287 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 30.261 - type: map_at_10 value: 42.419000000000004 - type: map_at_100 value: 43.927 - type: map_at_1000 value: 44.055 - type: map_at_3 value: 38.597 - type: map_at_5 value: 40.701 - type: mrr_at_1 value: 36.91 - type: mrr_at_10 value: 48.02 - type: mrr_at_100 value: 48.658 - type: mrr_at_1000 value: 48.708 - type: mrr_at_3 value: 44.945 - type: mrr_at_5 value: 46.705000000000005 - type: ndcg_at_1 value: 36.91 - type: ndcg_at_10 value: 49.353 - type: ndcg_at_100 value: 54.456 - type: ndcg_at_1000 value: 56.363 - type: ndcg_at_3 value: 43.483 - type: ndcg_at_5 value: 46.150999999999996 - type: precision_at_1 value: 36.91 - type: precision_at_10 value: 9.700000000000001 - type: precision_at_100 value: 1.557 - type: precision_at_1000 value: 0.202 - type: precision_at_3 value: 21.078 - type: precision_at_5 value: 15.421999999999999 - type: recall_at_1 value: 30.261 - type: recall_at_10 value: 63.242 - type: recall_at_100 value: 84.09100000000001 - type: recall_at_1000 value: 96.143 - type: recall_at_3 value: 46.478 - type: recall_at_5 value: 53.708 - type: map_at_1 value: 31.145 - type: map_at_10 value: 40.996 - type: map_at_100 value: 42.266999999999996 - type: map_at_1000 value: 42.397 - type: map_at_3 value: 38.005 - type: map_at_5 value: 39.628 - type: mrr_at_1 value: 38.344 - type: mrr_at_10 value: 46.827000000000005 - type: mrr_at_100 value: 47.446 - type: mrr_at_1000 value: 47.489 - type: mrr_at_3 value: 44.448 - type: mrr_at_5 value: 45.747 - type: ndcg_at_1 value: 38.344 - type: ndcg_at_10 value: 46.733000000000004 - type: ndcg_at_100 value: 51.103 - type: ndcg_at_1000 value: 53.075 - type: ndcg_at_3 value: 42.366 - type: ndcg_at_5 value: 44.242 - type: precision_at_1 value: 38.344 - type: precision_at_10 value: 8.822000000000001 - type: precision_at_100 value: 1.417 - type: precision_at_1000 value: 0.187 - type: precision_at_3 value: 20.403 - type: precision_at_5 value: 14.306 - type: recall_at_1 value: 31.145 - type: recall_at_10 value: 56.909 - type: recall_at_100 value: 75.274 - type: recall_at_1000 value: 87.629 - type: recall_at_3 value: 43.784 - type: recall_at_5 value: 49.338 - type: map_at_1 value: 38.83 - type: map_at_10 value: 51.553000000000004 - type: map_at_100 value: 52.581 - type: map_at_1000 value: 52.638 - type: map_at_3 value: 48.112 - type: map_at_5 value: 50.095 - type: mrr_at_1 value: 44.513999999999996 - type: mrr_at_10 value: 54.998000000000005 - type: mrr_at_100 value: 55.650999999999996 - type: mrr_at_1000 value: 55.679 - type: mrr_at_3 value: 52.602000000000004 - type: mrr_at_5 value: 53.931 - type: ndcg_at_1 value: 44.513999999999996 - type: ndcg_at_10 value: 57.67400000000001 - type: ndcg_at_100 value: 61.663999999999994 - type: ndcg_at_1000 value: 62.743 - type: ndcg_at_3 value: 51.964 - type: ndcg_at_5 value: 54.773 - type: precision_at_1 value: 44.513999999999996 - type: precision_at_10 value: 9.423 - type: precision_at_100 value: 1.2309999999999999 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 23.323 - type: precision_at_5 value: 16.163 - type: recall_at_1 value: 38.83 - type: recall_at_10 value: 72.327 - type: recall_at_100 value: 89.519 - type: recall_at_1000 value: 97.041 - type: recall_at_3 value: 57.206 - type: recall_at_5 value: 63.88399999999999 - type: map_at_1 value: 25.484 - type: map_at_10 value: 34.527 - type: map_at_100 value: 35.661 - type: map_at_1000 value: 35.739 - type: map_at_3 value: 32.199 - type: map_at_5 value: 33.632 - type: mrr_at_1 value: 27.458 - type: mrr_at_10 value: 36.543 - type: mrr_at_100 value: 37.482 - type: mrr_at_1000 value: 37.543 - type: mrr_at_3 value: 34.256 - type: mrr_at_5 value: 35.618 - type: ndcg_at_1 value: 27.458 - type: ndcg_at_10 value: 39.396 - type: ndcg_at_100 value: 44.742 - type: ndcg_at_1000 value: 46.708 - type: ndcg_at_3 value: 34.817 - type: ndcg_at_5 value: 37.247 - type: precision_at_1 value: 27.458 - type: precision_at_10 value: 5.976999999999999 - type: precision_at_100 value: 0.907 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 14.878 - type: precision_at_5 value: 10.35 - type: recall_at_1 value: 25.484 - type: recall_at_10 value: 52.317 - type: recall_at_100 value: 76.701 - type: recall_at_1000 value: 91.408 - type: recall_at_3 value: 40.043 - type: recall_at_5 value: 45.879 - type: map_at_1 value: 16.719 - type: map_at_10 value: 25.269000000000002 - type: map_at_100 value: 26.442 - type: map_at_1000 value: 26.557 - type: map_at_3 value: 22.56 - type: map_at_5 value: 24.082 - type: mrr_at_1 value: 20.896 - type: mrr_at_10 value: 29.982999999999997 - type: mrr_at_100 value: 30.895 - type: mrr_at_1000 value: 30.961 - type: mrr_at_3 value: 27.239 - type: mrr_at_5 value: 28.787000000000003 - type: ndcg_at_1 value: 20.896 - type: ndcg_at_10 value: 30.814000000000004 - type: ndcg_at_100 value: 36.418 - type: ndcg_at_1000 value: 39.182 - type: ndcg_at_3 value: 25.807999999999996 - type: ndcg_at_5 value: 28.143 - type: precision_at_1 value: 20.896 - type: precision_at_10 value: 5.821 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.136 - type: precision_at_3 value: 12.562000000000001 - type: precision_at_5 value: 9.254 - type: recall_at_1 value: 16.719 - type: recall_at_10 value: 43.155 - type: recall_at_100 value: 67.831 - type: recall_at_1000 value: 87.617 - type: recall_at_3 value: 29.259 - type: recall_at_5 value: 35.260999999999996 - type: map_at_1 value: 29.398999999999997 - type: map_at_10 value: 39.876 - type: map_at_100 value: 41.205999999999996 - type: map_at_1000 value: 41.321999999999996 - type: map_at_3 value: 36.588 - type: map_at_5 value: 38.538 - type: mrr_at_1 value: 35.9 - type: mrr_at_10 value: 45.528 - type: mrr_at_100 value: 46.343 - type: mrr_at_1000 value: 46.388 - type: mrr_at_3 value: 42.862 - type: mrr_at_5 value: 44.440000000000005 - type: ndcg_at_1 value: 35.9 - type: ndcg_at_10 value: 45.987 - type: ndcg_at_100 value: 51.370000000000005 - type: ndcg_at_1000 value: 53.400000000000006 - type: ndcg_at_3 value: 40.841 - type: ndcg_at_5 value: 43.447 - type: precision_at_1 value: 35.9 - type: precision_at_10 value: 8.393 - type: precision_at_100 value: 1.283 - type: precision_at_1000 value: 0.166 - type: precision_at_3 value: 19.538 - type: precision_at_5 value: 13.975000000000001 - type: recall_at_1 value: 29.398999999999997 - type: recall_at_10 value: 58.361 - type: recall_at_100 value: 81.081 - type: recall_at_1000 value: 94.004 - type: recall_at_3 value: 43.657000000000004 - type: recall_at_5 value: 50.519999999999996 - type: map_at_1 value: 21.589 - type: map_at_10 value: 31.608999999999998 - type: map_at_100 value: 33.128 - type: map_at_1000 value: 33.247 - type: map_at_3 value: 28.671999999999997 - type: map_at_5 value: 30.233999999999998 - type: mrr_at_1 value: 26.712000000000003 - type: mrr_at_10 value: 36.713 - type: mrr_at_100 value: 37.713 - type: mrr_at_1000 value: 37.771 - type: mrr_at_3 value: 34.075 - type: mrr_at_5 value: 35.451 - type: ndcg_at_1 value: 26.712000000000003 - type: ndcg_at_10 value: 37.519999999999996 - type: ndcg_at_100 value: 43.946000000000005 - type: ndcg_at_1000 value: 46.297 - type: ndcg_at_3 value: 32.551 - type: ndcg_at_5 value: 34.660999999999994 - type: precision_at_1 value: 26.712000000000003 - type: precision_at_10 value: 7.066 - type: precision_at_100 value: 1.216 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 15.906 - type: precision_at_5 value: 11.437999999999999 - type: recall_at_1 value: 21.589 - type: recall_at_10 value: 50.090999999999994 - type: recall_at_100 value: 77.43900000000001 - type: recall_at_1000 value: 93.35900000000001 - type: recall_at_3 value: 36.028999999999996 - type: recall_at_5 value: 41.698 - type: map_at_1 value: 25.121666666666663 - type: map_at_10 value: 34.46258333333334 - type: map_at_100 value: 35.710499999999996 - type: map_at_1000 value: 35.82691666666666 - type: map_at_3 value: 31.563249999999996 - type: map_at_5 value: 33.189750000000004 - type: mrr_at_1 value: 29.66441666666667 - type: mrr_at_10 value: 38.5455 - type: mrr_at_100 value: 39.39566666666667 - type: mrr_at_1000 value: 39.45325 - type: mrr_at_3 value: 36.003333333333345 - type: mrr_at_5 value: 37.440916666666666 - type: ndcg_at_1 value: 29.66441666666667 - type: ndcg_at_10 value: 39.978416666666675 - type: ndcg_at_100 value: 45.278666666666666 - type: ndcg_at_1000 value: 47.52275 - type: ndcg_at_3 value: 35.00058333333334 - type: ndcg_at_5 value: 37.34908333333333 - type: precision_at_1 value: 29.66441666666667 - type: precision_at_10 value: 7.094500000000001 - type: precision_at_100 value: 1.1523333333333332 - type: precision_at_1000 value: 0.15358333333333332 - type: precision_at_3 value: 16.184166666666663 - type: precision_at_5 value: 11.6005 - type: recall_at_1 value: 25.121666666666663 - type: recall_at_10 value: 52.23975000000001 - type: recall_at_100 value: 75.48408333333333 - type: recall_at_1000 value: 90.95316666666668 - type: recall_at_3 value: 38.38458333333333 - type: recall_at_5 value: 44.39933333333333 - type: map_at_1 value: 23.569000000000003 - type: map_at_10 value: 30.389 - type: map_at_100 value: 31.396 - type: map_at_1000 value: 31.493 - type: map_at_3 value: 28.276 - type: map_at_5 value: 29.459000000000003 - type: mrr_at_1 value: 26.534000000000002 - type: mrr_at_10 value: 33.217999999999996 - type: mrr_at_100 value: 34.054 - type: mrr_at_1000 value: 34.12 - type: mrr_at_3 value: 31.058000000000003 - type: mrr_at_5 value: 32.330999999999996 - type: ndcg_at_1 value: 26.534000000000002 - type: ndcg_at_10 value: 34.608 - type: ndcg_at_100 value: 39.391999999999996 - type: ndcg_at_1000 value: 41.837999999999994 - type: ndcg_at_3 value: 30.564999999999998 - type: ndcg_at_5 value: 32.509 - type: precision_at_1 value: 26.534000000000002 - type: precision_at_10 value: 5.414 - type: precision_at_100 value: 0.847 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 12.986 - type: precision_at_5 value: 9.202 - type: recall_at_1 value: 23.569000000000003 - type: recall_at_10 value: 44.896 - type: recall_at_100 value: 66.476 - type: recall_at_1000 value: 84.548 - type: recall_at_3 value: 33.79 - type: recall_at_5 value: 38.512 - type: map_at_1 value: 16.36 - type: map_at_10 value: 23.57 - type: map_at_100 value: 24.698999999999998 - type: map_at_1000 value: 24.834999999999997 - type: map_at_3 value: 21.093 - type: map_at_5 value: 22.418 - type: mrr_at_1 value: 19.718 - type: mrr_at_10 value: 27.139999999999997 - type: mrr_at_100 value: 28.097 - type: mrr_at_1000 value: 28.177999999999997 - type: mrr_at_3 value: 24.805 - type: mrr_at_5 value: 26.121 - type: ndcg_at_1 value: 19.718 - type: ndcg_at_10 value: 28.238999999999997 - type: ndcg_at_100 value: 33.663 - type: ndcg_at_1000 value: 36.763 - type: ndcg_at_3 value: 23.747 - type: ndcg_at_5 value: 25.796000000000003 - type: precision_at_1 value: 19.718 - type: precision_at_10 value: 5.282 - type: precision_at_100 value: 0.9390000000000001 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 11.264000000000001 - type: precision_at_5 value: 8.341 - type: recall_at_1 value: 16.36 - type: recall_at_10 value: 38.669 - type: recall_at_100 value: 63.184 - type: recall_at_1000 value: 85.33800000000001 - type: recall_at_3 value: 26.214 - type: recall_at_5 value: 31.423000000000002 - type: map_at_1 value: 25.618999999999996 - type: map_at_10 value: 34.361999999999995 - type: map_at_100 value: 35.534 - type: map_at_1000 value: 35.634 - type: map_at_3 value: 31.402 - type: map_at_5 value: 32.815 - type: mrr_at_1 value: 30.037000000000003 - type: mrr_at_10 value: 38.284 - type: mrr_at_100 value: 39.141999999999996 - type: mrr_at_1000 value: 39.2 - type: mrr_at_3 value: 35.603 - type: mrr_at_5 value: 36.867 - type: ndcg_at_1 value: 30.037000000000003 - type: ndcg_at_10 value: 39.87 - type: ndcg_at_100 value: 45.243 - type: ndcg_at_1000 value: 47.507 - type: ndcg_at_3 value: 34.371 - type: ndcg_at_5 value: 36.521 - type: precision_at_1 value: 30.037000000000003 - type: precision_at_10 value: 6.819 - type: precision_at_100 value: 1.0699999999999998 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 15.392 - type: precision_at_5 value: 10.821 - type: recall_at_1 value: 25.618999999999996 - type: recall_at_10 value: 52.869 - type: recall_at_100 value: 76.395 - type: recall_at_1000 value: 92.19500000000001 - type: recall_at_3 value: 37.943 - type: recall_at_5 value: 43.342999999999996 - type: map_at_1 value: 23.283 - type: map_at_10 value: 32.155 - type: map_at_100 value: 33.724 - type: map_at_1000 value: 33.939 - type: map_at_3 value: 29.018 - type: map_at_5 value: 30.864000000000004 - type: mrr_at_1 value: 28.063 - type: mrr_at_10 value: 36.632 - type: mrr_at_100 value: 37.606 - type: mrr_at_1000 value: 37.671 - type: mrr_at_3 value: 33.992 - type: mrr_at_5 value: 35.613 - type: ndcg_at_1 value: 28.063 - type: ndcg_at_10 value: 38.024 - type: ndcg_at_100 value: 44.292 - type: ndcg_at_1000 value: 46.818 - type: ndcg_at_3 value: 32.965 - type: ndcg_at_5 value: 35.562 - type: precision_at_1 value: 28.063 - type: precision_at_10 value: 7.352 - type: precision_at_100 value: 1.514 - type: precision_at_1000 value: 0.23800000000000002 - type: precision_at_3 value: 15.481 - type: precision_at_5 value: 11.542 - type: recall_at_1 value: 23.283 - type: recall_at_10 value: 49.756 - type: recall_at_100 value: 78.05 - type: recall_at_1000 value: 93.854 - type: recall_at_3 value: 35.408 - type: recall_at_5 value: 42.187000000000005 - type: map_at_1 value: 19.201999999999998 - type: map_at_10 value: 26.826 - type: map_at_100 value: 27.961000000000002 - type: map_at_1000 value: 28.066999999999997 - type: map_at_3 value: 24.237000000000002 - type: map_at_5 value: 25.811 - type: mrr_at_1 value: 20.887 - type: mrr_at_10 value: 28.660000000000004 - type: mrr_at_100 value: 29.660999999999998 - type: mrr_at_1000 value: 29.731 - type: mrr_at_3 value: 26.155 - type: mrr_at_5 value: 27.68 - type: ndcg_at_1 value: 20.887 - type: ndcg_at_10 value: 31.523 - type: ndcg_at_100 value: 37.055 - type: ndcg_at_1000 value: 39.579 - type: ndcg_at_3 value: 26.529000000000003 - type: ndcg_at_5 value: 29.137 - type: precision_at_1 value: 20.887 - type: precision_at_10 value: 5.065 - type: precision_at_100 value: 0.856 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_3 value: 11.399 - type: precision_at_5 value: 8.392 - type: recall_at_1 value: 19.201999999999998 - type: recall_at_10 value: 44.285000000000004 - type: recall_at_100 value: 69.768 - type: recall_at_1000 value: 88.302 - type: recall_at_3 value: 30.804 - type: recall_at_5 value: 37.039 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 11.244 - type: map_at_10 value: 18.956 - type: map_at_100 value: 20.674 - type: map_at_1000 value: 20.863 - type: map_at_3 value: 15.923000000000002 - type: map_at_5 value: 17.518 - type: mrr_at_1 value: 25.080999999999996 - type: mrr_at_10 value: 35.94 - type: mrr_at_100 value: 36.969 - type: mrr_at_1000 value: 37.013 - type: mrr_at_3 value: 32.617000000000004 - type: mrr_at_5 value: 34.682 - type: ndcg_at_1 value: 25.080999999999996 - type: ndcg_at_10 value: 26.539 - type: ndcg_at_100 value: 33.601 - type: ndcg_at_1000 value: 37.203 - type: ndcg_at_3 value: 21.695999999999998 - type: ndcg_at_5 value: 23.567 - type: precision_at_1 value: 25.080999999999996 - type: precision_at_10 value: 8.143 - type: precision_at_100 value: 1.5650000000000002 - type: precision_at_1000 value: 0.22300000000000003 - type: precision_at_3 value: 15.983 - type: precision_at_5 value: 12.417 - type: recall_at_1 value: 11.244 - type: recall_at_10 value: 31.457 - type: recall_at_100 value: 55.92 - type: recall_at_1000 value: 76.372 - type: recall_at_3 value: 19.784 - type: recall_at_5 value: 24.857000000000003 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.595 - type: map_at_10 value: 18.75 - type: map_at_100 value: 26.354 - type: map_at_1000 value: 27.912 - type: map_at_3 value: 13.794 - type: map_at_5 value: 16.021 - type: mrr_at_1 value: 65.75 - type: mrr_at_10 value: 73.837 - type: mrr_at_100 value: 74.22800000000001 - type: mrr_at_1000 value: 74.234 - type: mrr_at_3 value: 72.5 - type: mrr_at_5 value: 73.387 - type: ndcg_at_1 value: 52.625 - type: ndcg_at_10 value: 39.101 - type: ndcg_at_100 value: 43.836000000000006 - type: ndcg_at_1000 value: 51.086 - type: ndcg_at_3 value: 44.229 - type: ndcg_at_5 value: 41.555 - type: precision_at_1 value: 65.75 - type: precision_at_10 value: 30.45 - type: precision_at_100 value: 9.81 - type: precision_at_1000 value: 2.045 - type: precision_at_3 value: 48.667 - type: precision_at_5 value: 40.8 - type: recall_at_1 value: 8.595 - type: recall_at_10 value: 24.201 - type: recall_at_100 value: 50.096 - type: recall_at_1000 value: 72.677 - type: recall_at_3 value: 15.212 - type: recall_at_5 value: 18.745 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 46.565 - type: f1 value: 41.49914329345582 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 66.60000000000001 - type: map_at_10 value: 76.838 - type: map_at_100 value: 77.076 - type: map_at_1000 value: 77.09 - type: map_at_3 value: 75.545 - type: map_at_5 value: 76.39 - type: mrr_at_1 value: 71.707 - type: mrr_at_10 value: 81.514 - type: mrr_at_100 value: 81.64099999999999 - type: mrr_at_1000 value: 81.645 - type: mrr_at_3 value: 80.428 - type: mrr_at_5 value: 81.159 - type: ndcg_at_1 value: 71.707 - type: ndcg_at_10 value: 81.545 - type: ndcg_at_100 value: 82.477 - type: ndcg_at_1000 value: 82.73899999999999 - type: ndcg_at_3 value: 79.292 - type: ndcg_at_5 value: 80.599 - type: precision_at_1 value: 71.707 - type: precision_at_10 value: 10.035 - type: precision_at_100 value: 1.068 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 30.918 - type: precision_at_5 value: 19.328 - type: recall_at_1 value: 66.60000000000001 - type: recall_at_10 value: 91.353 - type: recall_at_100 value: 95.21 - type: recall_at_1000 value: 96.89999999999999 - type: recall_at_3 value: 85.188 - type: recall_at_5 value: 88.52 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 19.338 - type: map_at_10 value: 31.752000000000002 - type: map_at_100 value: 33.516 - type: map_at_1000 value: 33.694 - type: map_at_3 value: 27.716 - type: map_at_5 value: 29.67 - type: mrr_at_1 value: 38.117000000000004 - type: mrr_at_10 value: 47.323 - type: mrr_at_100 value: 48.13 - type: mrr_at_1000 value: 48.161 - type: mrr_at_3 value: 45.062000000000005 - type: mrr_at_5 value: 46.358 - type: ndcg_at_1 value: 38.117000000000004 - type: ndcg_at_10 value: 39.353 - type: ndcg_at_100 value: 46.044000000000004 - type: ndcg_at_1000 value: 49.083 - type: ndcg_at_3 value: 35.891 - type: ndcg_at_5 value: 36.661 - type: precision_at_1 value: 38.117000000000004 - type: precision_at_10 value: 11.187999999999999 - type: precision_at_100 value: 1.802 - type: precision_at_1000 value: 0.234 - type: precision_at_3 value: 24.126 - type: precision_at_5 value: 17.562 - type: recall_at_1 value: 19.338 - type: recall_at_10 value: 45.735 - type: recall_at_100 value: 71.281 - type: recall_at_1000 value: 89.537 - type: recall_at_3 value: 32.525 - type: recall_at_5 value: 37.671 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 36.995 - type: map_at_10 value: 55.032000000000004 - type: map_at_100 value: 55.86 - type: map_at_1000 value: 55.932 - type: map_at_3 value: 52.125 - type: map_at_5 value: 53.884 - type: mrr_at_1 value: 73.991 - type: mrr_at_10 value: 80.096 - type: mrr_at_100 value: 80.32000000000001 - type: mrr_at_1000 value: 80.331 - type: mrr_at_3 value: 79.037 - type: mrr_at_5 value: 79.719 - type: ndcg_at_1 value: 73.991 - type: ndcg_at_10 value: 63.786 - type: ndcg_at_100 value: 66.78 - type: ndcg_at_1000 value: 68.255 - type: ndcg_at_3 value: 59.501000000000005 - type: ndcg_at_5 value: 61.82299999999999 - type: precision_at_1 value: 73.991 - type: precision_at_10 value: 13.157 - type: precision_at_100 value: 1.552 - type: precision_at_1000 value: 0.17500000000000002 - type: precision_at_3 value: 37.519999999999996 - type: precision_at_5 value: 24.351 - type: recall_at_1 value: 36.995 - type: recall_at_10 value: 65.78699999999999 - type: recall_at_100 value: 77.583 - type: recall_at_1000 value: 87.421 - type: recall_at_3 value: 56.279999999999994 - type: recall_at_5 value: 60.878 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 86.80239999999999 - type: ap value: 81.97305141128378 - type: f1 value: 86.76976305549273 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 21.166 - type: map_at_10 value: 33.396 - type: map_at_100 value: 34.588 - type: map_at_1000 value: 34.637 - type: map_at_3 value: 29.509999999999998 - type: map_at_5 value: 31.719 - type: mrr_at_1 value: 21.762 - type: mrr_at_10 value: 33.969 - type: mrr_at_100 value: 35.099000000000004 - type: mrr_at_1000 value: 35.141 - type: mrr_at_3 value: 30.148000000000003 - type: mrr_at_5 value: 32.324000000000005 - type: ndcg_at_1 value: 21.776999999999997 - type: ndcg_at_10 value: 40.306999999999995 - type: ndcg_at_100 value: 46.068 - type: ndcg_at_1000 value: 47.3 - type: ndcg_at_3 value: 32.416 - type: ndcg_at_5 value: 36.345 - type: precision_at_1 value: 21.776999999999997 - type: precision_at_10 value: 6.433 - type: precision_at_100 value: 0.932 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 13.897 - type: precision_at_5 value: 10.324 - type: recall_at_1 value: 21.166 - type: recall_at_10 value: 61.587 - type: recall_at_100 value: 88.251 - type: recall_at_1000 value: 97.727 - type: recall_at_3 value: 40.196 - type: recall_at_5 value: 49.611 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.04605563155496 - type: f1 value: 92.78007303978372 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 69.65116279069767 - type: f1 value: 52.75775172527262 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 70.34633490248822 - type: f1 value: 68.15345065392562 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.63887020847343 - type: f1 value: 76.08074680233685 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 33.77933406071333 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 32.06504927238196 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.20682480490871 - type: mrr value: 33.41462721527003 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.548 - type: map_at_10 value: 13.086999999999998 - type: map_at_100 value: 16.698 - type: map_at_1000 value: 18.151999999999997 - type: map_at_3 value: 9.576 - type: map_at_5 value: 11.175 - type: mrr_at_1 value: 44.272 - type: mrr_at_10 value: 53.635999999999996 - type: mrr_at_100 value: 54.228 - type: mrr_at_1000 value: 54.26499999999999 - type: mrr_at_3 value: 51.754 - type: mrr_at_5 value: 53.086 - type: ndcg_at_1 value: 42.724000000000004 - type: ndcg_at_10 value: 34.769 - type: ndcg_at_100 value: 32.283 - type: ndcg_at_1000 value: 40.843 - type: ndcg_at_3 value: 39.852 - type: ndcg_at_5 value: 37.858999999999995 - type: precision_at_1 value: 44.272 - type: precision_at_10 value: 26.068 - type: precision_at_100 value: 8.328000000000001 - type: precision_at_1000 value: 2.1 - type: precision_at_3 value: 37.874 - type: precision_at_5 value: 33.065 - type: recall_at_1 value: 5.548 - type: recall_at_10 value: 16.936999999999998 - type: recall_at_100 value: 33.72 - type: recall_at_1000 value: 64.348 - type: recall_at_3 value: 10.764999999999999 - type: recall_at_5 value: 13.361 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 28.008 - type: map_at_10 value: 42.675000000000004 - type: map_at_100 value: 43.85 - type: map_at_1000 value: 43.884 - type: map_at_3 value: 38.286 - type: map_at_5 value: 40.78 - type: mrr_at_1 value: 31.518 - type: mrr_at_10 value: 45.015 - type: mrr_at_100 value: 45.924 - type: mrr_at_1000 value: 45.946999999999996 - type: mrr_at_3 value: 41.348 - type: mrr_at_5 value: 43.428 - type: ndcg_at_1 value: 31.489 - type: ndcg_at_10 value: 50.285999999999994 - type: ndcg_at_100 value: 55.291999999999994 - type: ndcg_at_1000 value: 56.05 - type: ndcg_at_3 value: 41.976 - type: ndcg_at_5 value: 46.103 - type: precision_at_1 value: 31.489 - type: precision_at_10 value: 8.456 - type: precision_at_100 value: 1.125 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 19.09 - type: precision_at_5 value: 13.841000000000001 - type: recall_at_1 value: 28.008 - type: recall_at_10 value: 71.21499999999999 - type: recall_at_100 value: 92.99 - type: recall_at_1000 value: 98.578 - type: recall_at_3 value: 49.604 - type: recall_at_5 value: 59.094 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 70.351 - type: map_at_10 value: 84.163 - type: map_at_100 value: 84.785 - type: map_at_1000 value: 84.801 - type: map_at_3 value: 81.16 - type: map_at_5 value: 83.031 - type: mrr_at_1 value: 80.96 - type: mrr_at_10 value: 87.241 - type: mrr_at_100 value: 87.346 - type: mrr_at_1000 value: 87.347 - type: mrr_at_3 value: 86.25699999999999 - type: mrr_at_5 value: 86.907 - type: ndcg_at_1 value: 80.97 - type: ndcg_at_10 value: 88.017 - type: ndcg_at_100 value: 89.241 - type: ndcg_at_1000 value: 89.34299999999999 - type: ndcg_at_3 value: 85.053 - type: ndcg_at_5 value: 86.663 - type: precision_at_1 value: 80.97 - type: precision_at_10 value: 13.358 - type: precision_at_100 value: 1.525 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.143 - type: precision_at_5 value: 24.451999999999998 - type: recall_at_1 value: 70.351 - type: recall_at_10 value: 95.39800000000001 - type: recall_at_100 value: 99.55199999999999 - type: recall_at_1000 value: 99.978 - type: recall_at_3 value: 86.913 - type: recall_at_5 value: 91.448 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 55.62406719814139 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 61.386700035141736 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.618 - type: map_at_10 value: 12.920000000000002 - type: map_at_100 value: 15.304 - type: map_at_1000 value: 15.656999999999998 - type: map_at_3 value: 9.187 - type: map_at_5 value: 10.937 - type: mrr_at_1 value: 22.8 - type: mrr_at_10 value: 35.13 - type: mrr_at_100 value: 36.239 - type: mrr_at_1000 value: 36.291000000000004 - type: mrr_at_3 value: 31.917 - type: mrr_at_5 value: 33.787 - type: ndcg_at_1 value: 22.8 - type: ndcg_at_10 value: 21.382 - type: ndcg_at_100 value: 30.257 - type: ndcg_at_1000 value: 36.001 - type: ndcg_at_3 value: 20.43 - type: ndcg_at_5 value: 17.622 - type: precision_at_1 value: 22.8 - type: precision_at_10 value: 11.26 - type: precision_at_100 value: 2.405 - type: precision_at_1000 value: 0.377 - type: precision_at_3 value: 19.633 - type: precision_at_5 value: 15.68 - type: recall_at_1 value: 4.618 - type: recall_at_10 value: 22.811999999999998 - type: recall_at_100 value: 48.787000000000006 - type: recall_at_1000 value: 76.63799999999999 - type: recall_at_3 value: 11.952 - type: recall_at_5 value: 15.892000000000001 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.01529458252244 - type: cos_sim_spearman value: 77.92985224770254 - type: euclidean_pearson value: 81.04251429422487 - type: euclidean_spearman value: 77.92838490549133 - type: manhattan_pearson value: 80.95892251458979 - type: manhattan_spearman value: 77.81028089705941 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 83.97885282534388 - type: cos_sim_spearman value: 75.1221970851712 - type: euclidean_pearson value: 80.34455956720097 - type: euclidean_spearman value: 74.5894274239938 - type: manhattan_pearson value: 80.38999766325465 - type: manhattan_spearman value: 74.68524557166975 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 82.95746064915672 - type: cos_sim_spearman value: 85.08683458043946 - type: euclidean_pearson value: 84.56699492836385 - type: euclidean_spearman value: 85.66089116133713 - type: manhattan_pearson value: 84.47553323458541 - type: manhattan_spearman value: 85.56142206781472 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 82.71377893595067 - type: cos_sim_spearman value: 81.03453291428589 - type: euclidean_pearson value: 82.57136298308613 - type: euclidean_spearman value: 81.15839961890875 - type: manhattan_pearson value: 82.55157879373837 - type: manhattan_spearman value: 81.1540163767054 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 86.64197832372373 - type: cos_sim_spearman value: 88.31966852492485 - type: euclidean_pearson value: 87.98692129976983 - type: euclidean_spearman value: 88.6247340837856 - type: manhattan_pearson value: 87.90437827826412 - type: manhattan_spearman value: 88.56278787131457 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 81.84159950146693 - type: cos_sim_spearman value: 83.90678384140168 - type: euclidean_pearson value: 83.19005018860221 - type: euclidean_spearman value: 84.16260415876295 - type: manhattan_pearson value: 83.05030612994494 - type: manhattan_spearman value: 83.99605629718336 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.49935350176666 - type: cos_sim_spearman value: 87.59086606735383 - type: euclidean_pearson value: 88.06537181129983 - type: euclidean_spearman value: 87.6687448086014 - type: manhattan_pearson value: 87.96599131972935 - type: manhattan_spearman value: 87.63295748969642 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.68232799482763 - type: cos_sim_spearman value: 67.99930378085793 - type: euclidean_pearson value: 68.50275360001696 - type: euclidean_spearman value: 67.81588179309259 - type: manhattan_pearson value: 68.5892154749763 - type: manhattan_spearman value: 67.84357259640682 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.37049618406554 - type: cos_sim_spearman value: 85.57014313159492 - type: euclidean_pearson value: 85.57469513908282 - type: euclidean_spearman value: 85.661948135258 - type: manhattan_pearson value: 85.36866831229028 - type: manhattan_spearman value: 85.5043455368843 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 84.83259065376154 - type: mrr value: 95.58455433455433 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 58.817 - type: map_at_10 value: 68.459 - type: map_at_100 value: 68.951 - type: map_at_1000 value: 68.979 - type: map_at_3 value: 65.791 - type: map_at_5 value: 67.583 - type: mrr_at_1 value: 61.667 - type: mrr_at_10 value: 69.368 - type: mrr_at_100 value: 69.721 - type: mrr_at_1000 value: 69.744 - type: mrr_at_3 value: 67.278 - type: mrr_at_5 value: 68.611 - type: ndcg_at_1 value: 61.667 - type: ndcg_at_10 value: 72.70100000000001 - type: ndcg_at_100 value: 74.928 - type: ndcg_at_1000 value: 75.553 - type: ndcg_at_3 value: 68.203 - type: ndcg_at_5 value: 70.804 - type: precision_at_1 value: 61.667 - type: precision_at_10 value: 9.533 - type: precision_at_100 value: 1.077 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 26.444000000000003 - type: precision_at_5 value: 17.599999999999998 - type: recall_at_1 value: 58.817 - type: recall_at_10 value: 84.789 - type: recall_at_100 value: 95.0 - type: recall_at_1000 value: 99.667 - type: recall_at_3 value: 72.8 - type: recall_at_5 value: 79.294 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.8108910891089 - type: cos_sim_ap value: 95.5743678558349 - type: cos_sim_f1 value: 90.43133366385722 - type: cos_sim_precision value: 89.67551622418878 - type: cos_sim_recall value: 91.2 - type: dot_accuracy value: 99.75841584158415 - type: dot_ap value: 94.00786363627253 - type: dot_f1 value: 87.51910341314316 - type: dot_precision value: 89.20041536863967 - type: dot_recall value: 85.9 - type: euclidean_accuracy value: 99.81485148514851 - type: euclidean_ap value: 95.4752113136905 - type: euclidean_f1 value: 90.44334975369456 - type: euclidean_precision value: 89.126213592233 - type: euclidean_recall value: 91.8 - type: manhattan_accuracy value: 99.81584158415842 - type: manhattan_ap value: 95.5163172682464 - type: manhattan_f1 value: 90.51987767584097 - type: manhattan_precision value: 92.3076923076923 - type: manhattan_recall value: 88.8 - type: max_accuracy value: 99.81584158415842 - type: max_ap value: 95.5743678558349 - type: max_f1 value: 90.51987767584097 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 62.63235986949449 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 36.334795589585575 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 52.02955214518782 - type: mrr value: 52.8004838298956 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.63769566275453 - type: cos_sim_spearman value: 30.422379185989335 - type: dot_pearson value: 26.88493071882256 - type: dot_spearman value: 26.505249740971305 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.21 - type: map_at_10 value: 1.654 - type: map_at_100 value: 10.095 - type: map_at_1000 value: 25.808999999999997 - type: map_at_3 value: 0.594 - type: map_at_5 value: 0.9289999999999999 - type: mrr_at_1 value: 78.0 - type: mrr_at_10 value: 87.019 - type: mrr_at_100 value: 87.019 - type: mrr_at_1000 value: 87.019 - type: mrr_at_3 value: 86.333 - type: mrr_at_5 value: 86.733 - type: ndcg_at_1 value: 73.0 - type: ndcg_at_10 value: 66.52900000000001 - type: ndcg_at_100 value: 53.433 - type: ndcg_at_1000 value: 51.324000000000005 - type: ndcg_at_3 value: 72.02199999999999 - type: ndcg_at_5 value: 69.696 - type: precision_at_1 value: 78.0 - type: precision_at_10 value: 70.39999999999999 - type: precision_at_100 value: 55.46 - type: precision_at_1000 value: 22.758 - type: precision_at_3 value: 76.667 - type: precision_at_5 value: 74.0 - type: recall_at_1 value: 0.21 - type: recall_at_10 value: 1.8849999999999998 - type: recall_at_100 value: 13.801 - type: recall_at_1000 value: 49.649 - type: recall_at_3 value: 0.632 - type: recall_at_5 value: 1.009 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 1.797 - type: map_at_10 value: 9.01 - type: map_at_100 value: 14.682 - type: map_at_1000 value: 16.336000000000002 - type: map_at_3 value: 4.546 - type: map_at_5 value: 5.9270000000000005 - type: mrr_at_1 value: 24.490000000000002 - type: mrr_at_10 value: 41.156 - type: mrr_at_100 value: 42.392 - type: mrr_at_1000 value: 42.408 - type: mrr_at_3 value: 38.775999999999996 - type: mrr_at_5 value: 40.102 - type: ndcg_at_1 value: 21.429000000000002 - type: ndcg_at_10 value: 22.222 - type: ndcg_at_100 value: 34.405 - type: ndcg_at_1000 value: 46.599000000000004 - type: ndcg_at_3 value: 25.261 - type: ndcg_at_5 value: 22.695999999999998 - type: precision_at_1 value: 24.490000000000002 - type: precision_at_10 value: 19.796 - type: precision_at_100 value: 7.306 - type: precision_at_1000 value: 1.5350000000000001 - type: precision_at_3 value: 27.211000000000002 - type: precision_at_5 value: 22.857 - type: recall_at_1 value: 1.797 - type: recall_at_10 value: 15.706000000000001 - type: recall_at_100 value: 46.412 - type: recall_at_1000 value: 83.159 - type: recall_at_3 value: 6.1370000000000005 - type: recall_at_5 value: 8.599 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 70.3302 - type: ap value: 14.169121204575601 - type: f1 value: 54.229345975274235 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 58.22297679683077 - type: f1 value: 58.62984908377875 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 49.952922428464255 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 84.68140907194373 - type: cos_sim_ap value: 70.12180123666836 - type: cos_sim_f1 value: 65.77501791258658 - type: cos_sim_precision value: 60.07853403141361 - type: cos_sim_recall value: 72.66490765171504 - type: dot_accuracy value: 81.92167848840674 - type: dot_ap value: 60.49837581423469 - type: dot_f1 value: 58.44186046511628 - type: dot_precision value: 52.24532224532224 - type: dot_recall value: 66.3060686015831 - type: euclidean_accuracy value: 84.73505394289802 - type: euclidean_ap value: 70.3278904593286 - type: euclidean_f1 value: 65.98851124940161 - type: euclidean_precision value: 60.38107752956636 - type: euclidean_recall value: 72.74406332453826 - type: manhattan_accuracy value: 84.73505394289802 - type: manhattan_ap value: 70.00737738537337 - type: manhattan_f1 value: 65.80150784822642 - type: manhattan_precision value: 61.892583120204606 - type: manhattan_recall value: 70.23746701846966 - type: max_accuracy value: 84.73505394289802 - type: max_ap value: 70.3278904593286 - type: max_f1 value: 65.98851124940161 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.44258159661582 - type: cos_sim_ap value: 84.91926704880888 - type: cos_sim_f1 value: 77.07651086632926 - type: cos_sim_precision value: 74.5894554883319 - type: cos_sim_recall value: 79.73514012935017 - type: dot_accuracy value: 85.88116583226608 - type: dot_ap value: 78.9753854779923 - type: dot_f1 value: 72.17757637979255 - type: dot_precision value: 66.80647486729143 - type: dot_recall value: 78.48783492454572 - type: euclidean_accuracy value: 88.5299025885823 - type: euclidean_ap value: 85.08006075642194 - type: euclidean_f1 value: 77.29637336504163 - type: euclidean_precision value: 74.69836253950014 - type: euclidean_recall value: 80.08161379735141 - type: manhattan_accuracy value: 88.55124771995187 - type: manhattan_ap value: 85.00941529932851 - type: manhattan_f1 value: 77.33100233100232 - type: manhattan_precision value: 73.37572573956317 - type: manhattan_recall value: 81.73698798891284 - type: max_accuracy value: 88.55124771995187 - type: max_ap value: 85.08006075642194 - type: max_f1 value: 77.33100233100232 --- # gte-small General Text Embeddings (GTE) model. [Towards General Text Embeddings with Multi-stage Contrastive Learning](https://arxiv.org/abs/2308.03281) The GTE models are trained by Alibaba DAMO Academy. They are mainly based on the BERT framework and currently offer three different sizes of models, including [GTE-large](https://huggingface.co/thenlper/gte-large), [GTE-base](https://huggingface.co/thenlper/gte-base), and [GTE-small](https://huggingface.co/thenlper/gte-small). The GTE models are trained on a large-scale corpus of relevance text pairs, covering a wide range of domains and scenarios. This enables the GTE models to be applied to various downstream tasks of text embeddings, including **information retrieval**, **semantic textual similarity**, **text reranking**, etc. ## Metrics We compared the performance of the GTE models with other popular text embedding models on the MTEB benchmark. For more detailed comparison results, please refer to the [MTEB leaderboard](https://huggingface.co/spaces/mteb/leaderboard). | Model Name | Model Size (GB) | Dimension | Sequence Length | Average (56) | Clustering (11) | Pair Classification (3) | Reranking (4) | Retrieval (15) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [**gte-large**](https://huggingface.co/thenlper/gte-large) | 0.67 | 1024 | 512 | **63.13** | 46.84 | 85.00 | 59.13 | 52.22 | 83.35 | 31.66 | 73.33 | | [**gte-base**](https://huggingface.co/thenlper/gte-base) | 0.22 | 768 | 512 | **62.39** | 46.2 | 84.57 | 58.61 | 51.14 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1.34 | 1024| 512 | 62.25 | 44.49 | 86.03 | 56.61 | 50.56 | 82.05 | 30.19 | 75.24 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 0.44 | 768 | 512 | 61.5 | 43.80 | 85.73 | 55.91 | 50.29 | 81.05 | 30.28 | 73.84 | | [**gte-small**](https://huggingface.co/thenlper/gte-small) | 0.07 | 384 | 512 | **61.36** | 44.89 | 83.54 | 57.7 | 49.46 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | - | 1536 | 8192 | 60.99 | 45.9 | 84.89 | 56.32 | 49.25 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 0.13 | 384 | 512 | 59.93 | 39.92 | 84.67 | 54.32 | 49.04 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 9.73 | 768 | 512 | 59.51 | 43.72 | 85.06 | 56.42 | 42.24 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 0.44 | 768 | 514 | 57.78 | 43.69 | 83.04 | 59.36 | 43.81 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 28.27 | 4096 | 2048 | 57.59 | 38.93 | 81.9 | 55.65 | 48.22 | 77.74 | 33.6 | 66.19 | | [all-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2) | 0.13 | 384 | 512 | 56.53 | 41.81 | 82.41 | 58.44 | 42.69 | 79.8 | 27.9 | 63.21 | | [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) | 0.09 | 384 | 512 | 56.26 | 42.35 | 82.37 | 58.04 | 41.95 | 78.9 | 30.81 | 63.05 | | [contriever-base-msmarco](https://huggingface.co/nthakur/contriever-base-msmarco) | 0.44 | 768 | 512 | 56.00 | 41.1 | 82.54 | 53.14 | 41.88 | 76.51 | 30.36 | 66.68 | | [sentence-t5-base](https://huggingface.co/sentence-transformers/sentence-t5-base) | 0.22 | 768 | 512 | 55.27 | 40.21 | 85.18 | 53.09 | 33.63 | 81.14 | 31.39 | 69.81 | ## Usage Code example ```python import torch.nn.functional as F from torch import Tensor from transformers import AutoTokenizer, AutoModel def average_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor: last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] input_texts = [ "what is the capital of China?", "how to implement quick sort in python?", "Beijing", "sorting algorithms" ] tokenizer = AutoTokenizer.from_pretrained("thenlper/gte-small") model = AutoModel.from_pretrained("thenlper/gte-small") # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt') outputs = model(**batch_dict) embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask']) # (Optionally) normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:1] @ embeddings[1:].T) * 100 print(scores.tolist()) ``` Use with sentence-transformers: ```python from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim sentences = ['That is a happy person', 'That is a very happy person'] model = SentenceTransformer('thenlper/gte-large') embeddings = model.encode(sentences) print(cos_sim(embeddings[0], embeddings[1])) ``` ### Limitation This model exclusively caters to English texts, and any lengthy texts will be truncated to a maximum of 512 tokens. ### Citation If you find our paper or models helpful, please consider citing them as follows: ``` @misc{li2023general, title={Towards General Text Embeddings with Multi-stage Contrastive Learning}, author={Zehan Li and Xin Zhang and Yanzhao Zhang and Dingkun Long and Pengjun Xie and Meishan Zhang}, year={2023}, eprint={2308.03281}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ "BIOSSES", "SCIFACT" ]
vectoriseai/bge-small-en
vectoriseai
sentence-similarity
[ "sentence-transformers", "pytorch", "onnx", "safetensors", "bert", "mteb", "sentence transformers", "sentence-similarity", "en", "license:mit", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-08-25T13:00:02Z
2023-08-28T14:17:25+00:00
18
0
--- language: - en library_name: sentence-transformers license: mit pipeline_tag: sentence-similarity tags: - mteb - sentence transformers model-index: - name: bge-small-en results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 74.34328358208955 - type: ap value: 37.59947775195661 - type: f1 value: 68.548415491933 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 93.04527499999999 - type: ap value: 89.60696356772135 - type: f1 value: 93.03361469382438 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 46.08 - type: f1 value: 45.66249835363254 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 35.205999999999996 - type: map_at_10 value: 50.782000000000004 - type: map_at_100 value: 51.547 - type: map_at_1000 value: 51.554 - type: map_at_3 value: 46.515 - type: map_at_5 value: 49.296 - type: mrr_at_1 value: 35.632999999999996 - type: mrr_at_10 value: 50.958999999999996 - type: mrr_at_100 value: 51.724000000000004 - type: mrr_at_1000 value: 51.731 - type: mrr_at_3 value: 46.669 - type: mrr_at_5 value: 49.439 - type: ndcg_at_1 value: 35.205999999999996 - type: ndcg_at_10 value: 58.835 - type: ndcg_at_100 value: 62.095 - type: ndcg_at_1000 value: 62.255 - type: ndcg_at_3 value: 50.255 - type: ndcg_at_5 value: 55.296 - type: precision_at_1 value: 35.205999999999996 - type: precision_at_10 value: 8.421 - type: precision_at_100 value: 0.984 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 20.365 - type: precision_at_5 value: 14.680000000000001 - type: recall_at_1 value: 35.205999999999996 - type: recall_at_10 value: 84.211 - type: recall_at_100 value: 98.43499999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 61.095 - type: recall_at_5 value: 73.4 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 47.52644476278646 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 39.973045724188964 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 62.28285314871488 - type: mrr value: 74.52743701358659 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 80.09041909160327 - type: cos_sim_spearman value: 79.96266537706944 - type: euclidean_pearson value: 79.50774978162241 - type: euclidean_spearman value: 79.9144715078551 - type: manhattan_pearson value: 79.2062139879302 - type: manhattan_spearman value: 79.35000081468212 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 85.31493506493506 - type: f1 value: 85.2704557977762 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.6837242810816 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 35.38881249555897 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 27.884999999999998 - type: map_at_10 value: 39.574 - type: map_at_100 value: 40.993 - type: map_at_1000 value: 41.129 - type: map_at_3 value: 36.089 - type: map_at_5 value: 38.191 - type: mrr_at_1 value: 34.477999999999994 - type: mrr_at_10 value: 45.411 - type: mrr_at_100 value: 46.089999999999996 - type: mrr_at_1000 value: 46.147 - type: mrr_at_3 value: 42.346000000000004 - type: mrr_at_5 value: 44.292 - type: ndcg_at_1 value: 34.477999999999994 - type: ndcg_at_10 value: 46.123999999999995 - type: ndcg_at_100 value: 51.349999999999994 - type: ndcg_at_1000 value: 53.578 - type: ndcg_at_3 value: 40.824 - type: ndcg_at_5 value: 43.571 - type: precision_at_1 value: 34.477999999999994 - type: precision_at_10 value: 8.841000000000001 - type: precision_at_100 value: 1.4460000000000002 - type: precision_at_1000 value: 0.192 - type: precision_at_3 value: 19.742 - type: precision_at_5 value: 14.421000000000001 - type: recall_at_1 value: 27.884999999999998 - type: recall_at_10 value: 59.087 - type: recall_at_100 value: 80.609 - type: recall_at_1000 value: 95.054 - type: recall_at_3 value: 44.082 - type: recall_at_5 value: 51.593999999999994 - type: map_at_1 value: 30.639 - type: map_at_10 value: 40.047 - type: map_at_100 value: 41.302 - type: map_at_1000 value: 41.425 - type: map_at_3 value: 37.406 - type: map_at_5 value: 38.934000000000005 - type: mrr_at_1 value: 37.707 - type: mrr_at_10 value: 46.082 - type: mrr_at_100 value: 46.745 - type: mrr_at_1000 value: 46.786 - type: mrr_at_3 value: 43.980999999999995 - type: mrr_at_5 value: 45.287 - type: ndcg_at_1 value: 37.707 - type: ndcg_at_10 value: 45.525 - type: ndcg_at_100 value: 49.976 - type: ndcg_at_1000 value: 51.94499999999999 - type: ndcg_at_3 value: 41.704 - type: ndcg_at_5 value: 43.596000000000004 - type: precision_at_1 value: 37.707 - type: precision_at_10 value: 8.465 - type: precision_at_100 value: 1.375 - type: precision_at_1000 value: 0.183 - type: precision_at_3 value: 19.979 - type: precision_at_5 value: 14.115 - type: recall_at_1 value: 30.639 - type: recall_at_10 value: 54.775 - type: recall_at_100 value: 73.678 - type: recall_at_1000 value: 86.142 - type: recall_at_3 value: 43.230000000000004 - type: recall_at_5 value: 48.622 - type: map_at_1 value: 38.038 - type: map_at_10 value: 49.922 - type: map_at_100 value: 51.032 - type: map_at_1000 value: 51.085 - type: map_at_3 value: 46.664 - type: map_at_5 value: 48.588 - type: mrr_at_1 value: 43.95 - type: mrr_at_10 value: 53.566 - type: mrr_at_100 value: 54.318999999999996 - type: mrr_at_1000 value: 54.348 - type: mrr_at_3 value: 51.066 - type: mrr_at_5 value: 52.649 - type: ndcg_at_1 value: 43.95 - type: ndcg_at_10 value: 55.676 - type: ndcg_at_100 value: 60.126000000000005 - type: ndcg_at_1000 value: 61.208 - type: ndcg_at_3 value: 50.20400000000001 - type: ndcg_at_5 value: 53.038 - type: precision_at_1 value: 43.95 - type: precision_at_10 value: 8.953 - type: precision_at_100 value: 1.2109999999999999 - type: precision_at_1000 value: 0.135 - type: precision_at_3 value: 22.256999999999998 - type: precision_at_5 value: 15.524 - type: recall_at_1 value: 38.038 - type: recall_at_10 value: 69.15 - type: recall_at_100 value: 88.31599999999999 - type: recall_at_1000 value: 95.993 - type: recall_at_3 value: 54.663 - type: recall_at_5 value: 61.373 - type: map_at_1 value: 24.872 - type: map_at_10 value: 32.912 - type: map_at_100 value: 33.972 - type: map_at_1000 value: 34.046 - type: map_at_3 value: 30.361 - type: map_at_5 value: 31.704 - type: mrr_at_1 value: 26.779999999999998 - type: mrr_at_10 value: 34.812 - type: mrr_at_100 value: 35.754999999999995 - type: mrr_at_1000 value: 35.809000000000005 - type: mrr_at_3 value: 32.335 - type: mrr_at_5 value: 33.64 - type: ndcg_at_1 value: 26.779999999999998 - type: ndcg_at_10 value: 37.623 - type: ndcg_at_100 value: 42.924 - type: ndcg_at_1000 value: 44.856 - type: ndcg_at_3 value: 32.574 - type: ndcg_at_5 value: 34.842 - type: precision_at_1 value: 26.779999999999998 - type: precision_at_10 value: 5.729 - type: precision_at_100 value: 0.886 - type: precision_at_1000 value: 0.109 - type: precision_at_3 value: 13.559 - type: precision_at_5 value: 9.469 - type: recall_at_1 value: 24.872 - type: recall_at_10 value: 50.400999999999996 - type: recall_at_100 value: 74.954 - type: recall_at_1000 value: 89.56 - type: recall_at_3 value: 36.726 - type: recall_at_5 value: 42.138999999999996 - type: map_at_1 value: 16.803 - type: map_at_10 value: 24.348 - type: map_at_100 value: 25.56 - type: map_at_1000 value: 25.668000000000003 - type: map_at_3 value: 21.811 - type: map_at_5 value: 23.287 - type: mrr_at_1 value: 20.771 - type: mrr_at_10 value: 28.961 - type: mrr_at_100 value: 29.979 - type: mrr_at_1000 value: 30.046 - type: mrr_at_3 value: 26.555 - type: mrr_at_5 value: 28.060000000000002 - type: ndcg_at_1 value: 20.771 - type: ndcg_at_10 value: 29.335 - type: ndcg_at_100 value: 35.188 - type: ndcg_at_1000 value: 37.812 - type: ndcg_at_3 value: 24.83 - type: ndcg_at_5 value: 27.119 - type: precision_at_1 value: 20.771 - type: precision_at_10 value: 5.4350000000000005 - type: precision_at_100 value: 0.9480000000000001 - type: precision_at_1000 value: 0.13 - type: precision_at_3 value: 11.982 - type: precision_at_5 value: 8.831 - type: recall_at_1 value: 16.803 - type: recall_at_10 value: 40.039 - type: recall_at_100 value: 65.83200000000001 - type: recall_at_1000 value: 84.478 - type: recall_at_3 value: 27.682000000000002 - type: recall_at_5 value: 33.535 - type: map_at_1 value: 28.345 - type: map_at_10 value: 37.757000000000005 - type: map_at_100 value: 39.141 - type: map_at_1000 value: 39.262 - type: map_at_3 value: 35.183 - type: map_at_5 value: 36.592 - type: mrr_at_1 value: 34.649 - type: mrr_at_10 value: 43.586999999999996 - type: mrr_at_100 value: 44.481 - type: mrr_at_1000 value: 44.542 - type: mrr_at_3 value: 41.29 - type: mrr_at_5 value: 42.642 - type: ndcg_at_1 value: 34.649 - type: ndcg_at_10 value: 43.161 - type: ndcg_at_100 value: 48.734 - type: ndcg_at_1000 value: 51.046 - type: ndcg_at_3 value: 39.118 - type: ndcg_at_5 value: 41.022 - type: precision_at_1 value: 34.649 - type: precision_at_10 value: 7.603 - type: precision_at_100 value: 1.209 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 18.319 - type: precision_at_5 value: 12.839 - type: recall_at_1 value: 28.345 - type: recall_at_10 value: 53.367 - type: recall_at_100 value: 76.453 - type: recall_at_1000 value: 91.82000000000001 - type: recall_at_3 value: 41.636 - type: recall_at_5 value: 46.760000000000005 - type: map_at_1 value: 22.419 - type: map_at_10 value: 31.716 - type: map_at_100 value: 33.152 - type: map_at_1000 value: 33.267 - type: map_at_3 value: 28.74 - type: map_at_5 value: 30.48 - type: mrr_at_1 value: 28.310999999999996 - type: mrr_at_10 value: 37.039 - type: mrr_at_100 value: 38.09 - type: mrr_at_1000 value: 38.145 - type: mrr_at_3 value: 34.437 - type: mrr_at_5 value: 36.024 - type: ndcg_at_1 value: 28.310999999999996 - type: ndcg_at_10 value: 37.41 - type: ndcg_at_100 value: 43.647999999999996 - type: ndcg_at_1000 value: 46.007 - type: ndcg_at_3 value: 32.509 - type: ndcg_at_5 value: 34.943999999999996 - type: precision_at_1 value: 28.310999999999996 - type: precision_at_10 value: 6.963 - type: precision_at_100 value: 1.1860000000000002 - type: precision_at_1000 value: 0.154 - type: precision_at_3 value: 15.867999999999999 - type: precision_at_5 value: 11.507000000000001 - type: recall_at_1 value: 22.419 - type: recall_at_10 value: 49.28 - type: recall_at_100 value: 75.802 - type: recall_at_1000 value: 92.032 - type: recall_at_3 value: 35.399 - type: recall_at_5 value: 42.027 - type: map_at_1 value: 24.669249999999998 - type: map_at_10 value: 33.332583333333325 - type: map_at_100 value: 34.557833333333335 - type: map_at_1000 value: 34.67141666666666 - type: map_at_3 value: 30.663166666666662 - type: map_at_5 value: 32.14883333333333 - type: mrr_at_1 value: 29.193833333333334 - type: mrr_at_10 value: 37.47625 - type: mrr_at_100 value: 38.3545 - type: mrr_at_1000 value: 38.413166666666676 - type: mrr_at_3 value: 35.06741666666667 - type: mrr_at_5 value: 36.450666666666656 - type: ndcg_at_1 value: 29.193833333333334 - type: ndcg_at_10 value: 38.505416666666676 - type: ndcg_at_100 value: 43.81125 - type: ndcg_at_1000 value: 46.09558333333333 - type: ndcg_at_3 value: 33.90916666666667 - type: ndcg_at_5 value: 36.07666666666666 - type: precision_at_1 value: 29.193833333333334 - type: precision_at_10 value: 6.7251666666666665 - type: precision_at_100 value: 1.1058333333333332 - type: precision_at_1000 value: 0.14833333333333332 - type: precision_at_3 value: 15.554166666666665 - type: precision_at_5 value: 11.079250000000002 - type: recall_at_1 value: 24.669249999999998 - type: recall_at_10 value: 49.75583333333332 - type: recall_at_100 value: 73.06908333333332 - type: recall_at_1000 value: 88.91316666666667 - type: recall_at_3 value: 36.913250000000005 - type: recall_at_5 value: 42.48641666666666 - type: map_at_1 value: 24.044999999999998 - type: map_at_10 value: 30.349999999999998 - type: map_at_100 value: 31.273 - type: map_at_1000 value: 31.362000000000002 - type: map_at_3 value: 28.508 - type: map_at_5 value: 29.369 - type: mrr_at_1 value: 26.994 - type: mrr_at_10 value: 33.12 - type: mrr_at_100 value: 33.904 - type: mrr_at_1000 value: 33.967000000000006 - type: mrr_at_3 value: 31.365 - type: mrr_at_5 value: 32.124 - type: ndcg_at_1 value: 26.994 - type: ndcg_at_10 value: 34.214 - type: ndcg_at_100 value: 38.681 - type: ndcg_at_1000 value: 40.926 - type: ndcg_at_3 value: 30.725 - type: ndcg_at_5 value: 31.967000000000002 - type: precision_at_1 value: 26.994 - type: precision_at_10 value: 5.215 - type: precision_at_100 value: 0.807 - type: precision_at_1000 value: 0.108 - type: precision_at_3 value: 12.986 - type: precision_at_5 value: 8.712 - type: recall_at_1 value: 24.044999999999998 - type: recall_at_10 value: 43.456 - type: recall_at_100 value: 63.675000000000004 - type: recall_at_1000 value: 80.05499999999999 - type: recall_at_3 value: 33.561 - type: recall_at_5 value: 36.767 - type: map_at_1 value: 15.672 - type: map_at_10 value: 22.641 - type: map_at_100 value: 23.75 - type: map_at_1000 value: 23.877000000000002 - type: map_at_3 value: 20.219 - type: map_at_5 value: 21.648 - type: mrr_at_1 value: 18.823 - type: mrr_at_10 value: 26.101999999999997 - type: mrr_at_100 value: 27.038 - type: mrr_at_1000 value: 27.118 - type: mrr_at_3 value: 23.669 - type: mrr_at_5 value: 25.173000000000002 - type: ndcg_at_1 value: 18.823 - type: ndcg_at_10 value: 27.176000000000002 - type: ndcg_at_100 value: 32.42 - type: ndcg_at_1000 value: 35.413 - type: ndcg_at_3 value: 22.756999999999998 - type: ndcg_at_5 value: 25.032 - type: precision_at_1 value: 18.823 - type: precision_at_10 value: 5.034000000000001 - type: precision_at_100 value: 0.895 - type: precision_at_1000 value: 0.132 - type: precision_at_3 value: 10.771 - type: precision_at_5 value: 8.1 - type: recall_at_1 value: 15.672 - type: recall_at_10 value: 37.296 - type: recall_at_100 value: 60.863 - type: recall_at_1000 value: 82.234 - type: recall_at_3 value: 25.330000000000002 - type: recall_at_5 value: 30.964000000000002 - type: map_at_1 value: 24.633 - type: map_at_10 value: 32.858 - type: map_at_100 value: 34.038000000000004 - type: map_at_1000 value: 34.141 - type: map_at_3 value: 30.209000000000003 - type: map_at_5 value: 31.567 - type: mrr_at_1 value: 28.358 - type: mrr_at_10 value: 36.433 - type: mrr_at_100 value: 37.352000000000004 - type: mrr_at_1000 value: 37.41 - type: mrr_at_3 value: 34.033 - type: mrr_at_5 value: 35.246 - type: ndcg_at_1 value: 28.358 - type: ndcg_at_10 value: 37.973 - type: ndcg_at_100 value: 43.411 - type: ndcg_at_1000 value: 45.747 - type: ndcg_at_3 value: 32.934999999999995 - type: ndcg_at_5 value: 35.013 - type: precision_at_1 value: 28.358 - type: precision_at_10 value: 6.418 - type: precision_at_100 value: 1.02 - type: precision_at_1000 value: 0.133 - type: precision_at_3 value: 14.677000000000001 - type: precision_at_5 value: 10.335999999999999 - type: recall_at_1 value: 24.633 - type: recall_at_10 value: 50.048 - type: recall_at_100 value: 73.821 - type: recall_at_1000 value: 90.046 - type: recall_at_3 value: 36.284 - type: recall_at_5 value: 41.370000000000005 - type: map_at_1 value: 23.133 - type: map_at_10 value: 31.491999999999997 - type: map_at_100 value: 33.062000000000005 - type: map_at_1000 value: 33.256 - type: map_at_3 value: 28.886 - type: map_at_5 value: 30.262 - type: mrr_at_1 value: 28.063 - type: mrr_at_10 value: 36.144 - type: mrr_at_100 value: 37.14 - type: mrr_at_1000 value: 37.191 - type: mrr_at_3 value: 33.762 - type: mrr_at_5 value: 34.997 - type: ndcg_at_1 value: 28.063 - type: ndcg_at_10 value: 36.951 - type: ndcg_at_100 value: 43.287 - type: ndcg_at_1000 value: 45.777 - type: ndcg_at_3 value: 32.786 - type: ndcg_at_5 value: 34.65 - type: precision_at_1 value: 28.063 - type: precision_at_10 value: 7.055 - type: precision_at_100 value: 1.476 - type: precision_at_1000 value: 0.22899999999999998 - type: precision_at_3 value: 15.481 - type: precision_at_5 value: 11.186 - type: recall_at_1 value: 23.133 - type: recall_at_10 value: 47.285 - type: recall_at_100 value: 76.176 - type: recall_at_1000 value: 92.176 - type: recall_at_3 value: 35.223 - type: recall_at_5 value: 40.142 - type: map_at_1 value: 19.547 - type: map_at_10 value: 26.374 - type: map_at_100 value: 27.419 - type: map_at_1000 value: 27.539 - type: map_at_3 value: 23.882 - type: map_at_5 value: 25.163999999999998 - type: mrr_at_1 value: 21.442 - type: mrr_at_10 value: 28.458 - type: mrr_at_100 value: 29.360999999999997 - type: mrr_at_1000 value: 29.448999999999998 - type: mrr_at_3 value: 25.97 - type: mrr_at_5 value: 27.273999999999997 - type: ndcg_at_1 value: 21.442 - type: ndcg_at_10 value: 30.897000000000002 - type: ndcg_at_100 value: 35.99 - type: ndcg_at_1000 value: 38.832 - type: ndcg_at_3 value: 25.944 - type: ndcg_at_5 value: 28.126 - type: precision_at_1 value: 21.442 - type: precision_at_10 value: 4.9910000000000005 - type: precision_at_100 value: 0.8109999999999999 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 11.029 - type: precision_at_5 value: 7.911 - type: recall_at_1 value: 19.547 - type: recall_at_10 value: 42.886 - type: recall_at_100 value: 66.64999999999999 - type: recall_at_1000 value: 87.368 - type: recall_at_3 value: 29.143 - type: recall_at_5 value: 34.544000000000004 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 15.572 - type: map_at_10 value: 25.312 - type: map_at_100 value: 27.062 - type: map_at_1000 value: 27.253 - type: map_at_3 value: 21.601 - type: map_at_5 value: 23.473 - type: mrr_at_1 value: 34.984 - type: mrr_at_10 value: 46.406 - type: mrr_at_100 value: 47.179 - type: mrr_at_1000 value: 47.21 - type: mrr_at_3 value: 43.485 - type: mrr_at_5 value: 45.322 - type: ndcg_at_1 value: 34.984 - type: ndcg_at_10 value: 34.344 - type: ndcg_at_100 value: 41.015 - type: ndcg_at_1000 value: 44.366 - type: ndcg_at_3 value: 29.119 - type: ndcg_at_5 value: 30.825999999999997 - type: precision_at_1 value: 34.984 - type: precision_at_10 value: 10.358 - type: precision_at_100 value: 1.762 - type: precision_at_1000 value: 0.23900000000000002 - type: precision_at_3 value: 21.368000000000002 - type: precision_at_5 value: 15.948 - type: recall_at_1 value: 15.572 - type: recall_at_10 value: 39.367999999999995 - type: recall_at_100 value: 62.183 - type: recall_at_1000 value: 80.92200000000001 - type: recall_at_3 value: 26.131999999999998 - type: recall_at_5 value: 31.635999999999996 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.848 - type: map_at_10 value: 19.25 - type: map_at_100 value: 27.193 - type: map_at_1000 value: 28.721999999999998 - type: map_at_3 value: 13.968 - type: map_at_5 value: 16.283 - type: mrr_at_1 value: 68.75 - type: mrr_at_10 value: 76.25 - type: mrr_at_100 value: 76.534 - type: mrr_at_1000 value: 76.53999999999999 - type: mrr_at_3 value: 74.667 - type: mrr_at_5 value: 75.86699999999999 - type: ndcg_at_1 value: 56.00000000000001 - type: ndcg_at_10 value: 41.426 - type: ndcg_at_100 value: 45.660000000000004 - type: ndcg_at_1000 value: 53.02 - type: ndcg_at_3 value: 46.581 - type: ndcg_at_5 value: 43.836999999999996 - type: precision_at_1 value: 68.75 - type: precision_at_10 value: 32.800000000000004 - type: precision_at_100 value: 10.440000000000001 - type: precision_at_1000 value: 1.9980000000000002 - type: precision_at_3 value: 49.667 - type: precision_at_5 value: 42.25 - type: recall_at_1 value: 8.848 - type: recall_at_10 value: 24.467 - type: recall_at_100 value: 51.344 - type: recall_at_1000 value: 75.235 - type: recall_at_3 value: 15.329 - type: recall_at_5 value: 18.892999999999997 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 48.95 - type: f1 value: 43.44563593360779 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 78.036 - type: map_at_10 value: 85.639 - type: map_at_100 value: 85.815 - type: map_at_1000 value: 85.829 - type: map_at_3 value: 84.795 - type: map_at_5 value: 85.336 - type: mrr_at_1 value: 84.353 - type: mrr_at_10 value: 90.582 - type: mrr_at_100 value: 90.617 - type: mrr_at_1000 value: 90.617 - type: mrr_at_3 value: 90.132 - type: mrr_at_5 value: 90.447 - type: ndcg_at_1 value: 84.353 - type: ndcg_at_10 value: 89.003 - type: ndcg_at_100 value: 89.60000000000001 - type: ndcg_at_1000 value: 89.836 - type: ndcg_at_3 value: 87.81400000000001 - type: ndcg_at_5 value: 88.478 - type: precision_at_1 value: 84.353 - type: precision_at_10 value: 10.482 - type: precision_at_100 value: 1.099 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 33.257999999999996 - type: precision_at_5 value: 20.465 - type: recall_at_1 value: 78.036 - type: recall_at_10 value: 94.517 - type: recall_at_100 value: 96.828 - type: recall_at_1000 value: 98.261 - type: recall_at_3 value: 91.12 - type: recall_at_5 value: 92.946 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 20.191 - type: map_at_10 value: 32.369 - type: map_at_100 value: 34.123999999999995 - type: map_at_1000 value: 34.317 - type: map_at_3 value: 28.71 - type: map_at_5 value: 30.607 - type: mrr_at_1 value: 40.894999999999996 - type: mrr_at_10 value: 48.842 - type: mrr_at_100 value: 49.599 - type: mrr_at_1000 value: 49.647000000000006 - type: mrr_at_3 value: 46.785 - type: mrr_at_5 value: 47.672 - type: ndcg_at_1 value: 40.894999999999996 - type: ndcg_at_10 value: 39.872 - type: ndcg_at_100 value: 46.126 - type: ndcg_at_1000 value: 49.476 - type: ndcg_at_3 value: 37.153000000000006 - type: ndcg_at_5 value: 37.433 - type: precision_at_1 value: 40.894999999999996 - type: precision_at_10 value: 10.818 - type: precision_at_100 value: 1.73 - type: precision_at_1000 value: 0.231 - type: precision_at_3 value: 25.051000000000002 - type: precision_at_5 value: 17.531 - type: recall_at_1 value: 20.191 - type: recall_at_10 value: 45.768 - type: recall_at_100 value: 68.82000000000001 - type: recall_at_1000 value: 89.133 - type: recall_at_3 value: 33.296 - type: recall_at_5 value: 38.022 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 39.257 - type: map_at_10 value: 61.467000000000006 - type: map_at_100 value: 62.364 - type: map_at_1000 value: 62.424 - type: map_at_3 value: 58.228 - type: map_at_5 value: 60.283 - type: mrr_at_1 value: 78.515 - type: mrr_at_10 value: 84.191 - type: mrr_at_100 value: 84.378 - type: mrr_at_1000 value: 84.385 - type: mrr_at_3 value: 83.284 - type: mrr_at_5 value: 83.856 - type: ndcg_at_1 value: 78.515 - type: ndcg_at_10 value: 69.78999999999999 - type: ndcg_at_100 value: 72.886 - type: ndcg_at_1000 value: 74.015 - type: ndcg_at_3 value: 65.23 - type: ndcg_at_5 value: 67.80199999999999 - type: precision_at_1 value: 78.515 - type: precision_at_10 value: 14.519000000000002 - type: precision_at_100 value: 1.694 - type: precision_at_1000 value: 0.184 - type: precision_at_3 value: 41.702 - type: precision_at_5 value: 27.046999999999997 - type: recall_at_1 value: 39.257 - type: recall_at_10 value: 72.59299999999999 - type: recall_at_100 value: 84.679 - type: recall_at_1000 value: 92.12 - type: recall_at_3 value: 62.552 - type: recall_at_5 value: 67.616 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 91.5152 - type: ap value: 87.64584669595709 - type: f1 value: 91.50605576428437 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 21.926000000000002 - type: map_at_10 value: 34.049 - type: map_at_100 value: 35.213 - type: map_at_1000 value: 35.265 - type: map_at_3 value: 30.309 - type: map_at_5 value: 32.407000000000004 - type: mrr_at_1 value: 22.55 - type: mrr_at_10 value: 34.657 - type: mrr_at_100 value: 35.760999999999996 - type: mrr_at_1000 value: 35.807 - type: mrr_at_3 value: 30.989 - type: mrr_at_5 value: 33.039 - type: ndcg_at_1 value: 22.55 - type: ndcg_at_10 value: 40.842 - type: ndcg_at_100 value: 46.436 - type: ndcg_at_1000 value: 47.721999999999994 - type: ndcg_at_3 value: 33.209 - type: ndcg_at_5 value: 36.943 - type: precision_at_1 value: 22.55 - type: precision_at_10 value: 6.447 - type: precision_at_100 value: 0.9249999999999999 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.136000000000001 - type: precision_at_5 value: 10.381 - type: recall_at_1 value: 21.926000000000002 - type: recall_at_10 value: 61.724999999999994 - type: recall_at_100 value: 87.604 - type: recall_at_1000 value: 97.421 - type: recall_at_3 value: 40.944 - type: recall_at_5 value: 49.915 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.54765161878704 - type: f1 value: 93.3298945415573 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 75.71591427268582 - type: f1 value: 59.32113870474471 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 75.83053127101547 - type: f1 value: 73.60757944876475 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 78.72562205783457 - type: f1 value: 78.63761662505502 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 33.37935633767996 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.55270546130387 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.462692753143834 - type: mrr value: 31.497569753511563 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.646 - type: map_at_10 value: 12.498 - type: map_at_100 value: 15.486 - type: map_at_1000 value: 16.805999999999997 - type: map_at_3 value: 9.325 - type: map_at_5 value: 10.751 - type: mrr_at_1 value: 43.034 - type: mrr_at_10 value: 52.662 - type: mrr_at_100 value: 53.189 - type: mrr_at_1000 value: 53.25 - type: mrr_at_3 value: 50.929 - type: mrr_at_5 value: 51.92 - type: ndcg_at_1 value: 41.796 - type: ndcg_at_10 value: 33.477000000000004 - type: ndcg_at_100 value: 29.996000000000002 - type: ndcg_at_1000 value: 38.864 - type: ndcg_at_3 value: 38.940000000000005 - type: ndcg_at_5 value: 36.689 - type: precision_at_1 value: 43.034 - type: precision_at_10 value: 24.799 - type: precision_at_100 value: 7.432999999999999 - type: precision_at_1000 value: 1.9929999999999999 - type: precision_at_3 value: 36.842000000000006 - type: precision_at_5 value: 32.135999999999996 - type: recall_at_1 value: 5.646 - type: recall_at_10 value: 15.963 - type: recall_at_100 value: 29.492 - type: recall_at_1000 value: 61.711000000000006 - type: recall_at_3 value: 10.585 - type: recall_at_5 value: 12.753999999999998 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 27.602 - type: map_at_10 value: 41.545 - type: map_at_100 value: 42.644999999999996 - type: map_at_1000 value: 42.685 - type: map_at_3 value: 37.261 - type: map_at_5 value: 39.706 - type: mrr_at_1 value: 31.141000000000002 - type: mrr_at_10 value: 44.139 - type: mrr_at_100 value: 44.997 - type: mrr_at_1000 value: 45.025999999999996 - type: mrr_at_3 value: 40.503 - type: mrr_at_5 value: 42.64 - type: ndcg_at_1 value: 31.141000000000002 - type: ndcg_at_10 value: 48.995 - type: ndcg_at_100 value: 53.788000000000004 - type: ndcg_at_1000 value: 54.730000000000004 - type: ndcg_at_3 value: 40.844 - type: ndcg_at_5 value: 44.955 - type: precision_at_1 value: 31.141000000000002 - type: precision_at_10 value: 8.233 - type: precision_at_100 value: 1.093 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 18.579 - type: precision_at_5 value: 13.533999999999999 - type: recall_at_1 value: 27.602 - type: recall_at_10 value: 69.216 - type: recall_at_100 value: 90.252 - type: recall_at_1000 value: 97.27 - type: recall_at_3 value: 47.987 - type: recall_at_5 value: 57.438 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 70.949 - type: map_at_10 value: 84.89999999999999 - type: map_at_100 value: 85.531 - type: map_at_1000 value: 85.548 - type: map_at_3 value: 82.027 - type: map_at_5 value: 83.853 - type: mrr_at_1 value: 81.69999999999999 - type: mrr_at_10 value: 87.813 - type: mrr_at_100 value: 87.917 - type: mrr_at_1000 value: 87.91799999999999 - type: mrr_at_3 value: 86.938 - type: mrr_at_5 value: 87.53999999999999 - type: ndcg_at_1 value: 81.75 - type: ndcg_at_10 value: 88.55499999999999 - type: ndcg_at_100 value: 89.765 - type: ndcg_at_1000 value: 89.871 - type: ndcg_at_3 value: 85.905 - type: ndcg_at_5 value: 87.41 - type: precision_at_1 value: 81.75 - type: precision_at_10 value: 13.403 - type: precision_at_100 value: 1.528 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.597 - type: precision_at_5 value: 24.69 - type: recall_at_1 value: 70.949 - type: recall_at_10 value: 95.423 - type: recall_at_100 value: 99.509 - type: recall_at_1000 value: 99.982 - type: recall_at_3 value: 87.717 - type: recall_at_5 value: 92.032 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 51.76962893449579 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 62.32897690686379 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.478 - type: map_at_10 value: 11.994 - type: map_at_100 value: 13.977 - type: map_at_1000 value: 14.295 - type: map_at_3 value: 8.408999999999999 - type: map_at_5 value: 10.024 - type: mrr_at_1 value: 22.1 - type: mrr_at_10 value: 33.526 - type: mrr_at_100 value: 34.577000000000005 - type: mrr_at_1000 value: 34.632000000000005 - type: mrr_at_3 value: 30.217 - type: mrr_at_5 value: 31.962000000000003 - type: ndcg_at_1 value: 22.1 - type: ndcg_at_10 value: 20.191 - type: ndcg_at_100 value: 27.954 - type: ndcg_at_1000 value: 33.491 - type: ndcg_at_3 value: 18.787000000000003 - type: ndcg_at_5 value: 16.378999999999998 - type: precision_at_1 value: 22.1 - type: precision_at_10 value: 10.69 - type: precision_at_100 value: 2.1919999999999997 - type: precision_at_1000 value: 0.35200000000000004 - type: precision_at_3 value: 17.732999999999997 - type: precision_at_5 value: 14.499999999999998 - type: recall_at_1 value: 4.478 - type: recall_at_10 value: 21.657 - type: recall_at_100 value: 44.54 - type: recall_at_1000 value: 71.542 - type: recall_at_3 value: 10.778 - type: recall_at_5 value: 14.687 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 82.82325259156718 - type: cos_sim_spearman value: 79.2463589100662 - type: euclidean_pearson value: 80.48318380496771 - type: euclidean_spearman value: 79.34451935199979 - type: manhattan_pearson value: 80.39041824178759 - type: manhattan_spearman value: 79.23002892700211 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 85.74130231431258 - type: cos_sim_spearman value: 78.36856568042397 - type: euclidean_pearson value: 82.48301631890303 - type: euclidean_spearman value: 78.28376980722732 - type: manhattan_pearson value: 82.43552075450525 - type: manhattan_spearman value: 78.22702443947126 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 79.96138619461459 - type: cos_sim_spearman value: 81.85436343502379 - type: euclidean_pearson value: 81.82895226665367 - type: euclidean_spearman value: 82.22707349602916 - type: manhattan_pearson value: 81.66303369445873 - type: manhattan_spearman value: 82.05030197179455 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 80.05481244198648 - type: cos_sim_spearman value: 80.85052504637808 - type: euclidean_pearson value: 80.86728419744497 - type: euclidean_spearman value: 81.033786401512 - type: manhattan_pearson value: 80.90107531061103 - type: manhattan_spearman value: 81.11374116827795 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 84.615220756399 - type: cos_sim_spearman value: 86.46858500002092 - type: euclidean_pearson value: 86.08307800247586 - type: euclidean_spearman value: 86.72691443870013 - type: manhattan_pearson value: 85.96155594487269 - type: manhattan_spearman value: 86.605909505275 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 82.14363913634436 - type: cos_sim_spearman value: 84.48430226487102 - type: euclidean_pearson value: 83.75303424801902 - type: euclidean_spearman value: 84.56762380734538 - type: manhattan_pearson value: 83.6135447165928 - type: manhattan_spearman value: 84.39898212616731 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 85.09909252554525 - type: cos_sim_spearman value: 85.70951402743276 - type: euclidean_pearson value: 87.1991936239908 - type: euclidean_spearman value: 86.07745840612071 - type: manhattan_pearson value: 87.25039137549952 - type: manhattan_spearman value: 85.99938746659761 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.529332093413615 - type: cos_sim_spearman value: 65.38177340147439 - type: euclidean_pearson value: 66.35278011412136 - type: euclidean_spearman value: 65.47147267032997 - type: manhattan_pearson value: 66.71804682408693 - type: manhattan_spearman value: 65.67406521423597 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 82.45802942885662 - type: cos_sim_spearman value: 84.8853341842566 - type: euclidean_pearson value: 84.60915021096707 - type: euclidean_spearman value: 85.11181242913666 - type: manhattan_pearson value: 84.38600521210364 - type: manhattan_spearman value: 84.89045417981723 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 85.92793380635129 - type: mrr value: 95.85834191226348 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 55.74400000000001 - type: map_at_10 value: 65.455 - type: map_at_100 value: 66.106 - type: map_at_1000 value: 66.129 - type: map_at_3 value: 62.719 - type: map_at_5 value: 64.441 - type: mrr_at_1 value: 58.667 - type: mrr_at_10 value: 66.776 - type: mrr_at_100 value: 67.363 - type: mrr_at_1000 value: 67.384 - type: mrr_at_3 value: 64.889 - type: mrr_at_5 value: 66.122 - type: ndcg_at_1 value: 58.667 - type: ndcg_at_10 value: 69.904 - type: ndcg_at_100 value: 72.807 - type: ndcg_at_1000 value: 73.423 - type: ndcg_at_3 value: 65.405 - type: ndcg_at_5 value: 67.86999999999999 - type: precision_at_1 value: 58.667 - type: precision_at_10 value: 9.3 - type: precision_at_100 value: 1.08 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 25.444 - type: precision_at_5 value: 17 - type: recall_at_1 value: 55.74400000000001 - type: recall_at_10 value: 82.122 - type: recall_at_100 value: 95.167 - type: recall_at_1000 value: 100 - type: recall_at_3 value: 70.14399999999999 - type: recall_at_5 value: 76.417 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.86534653465347 - type: cos_sim_ap value: 96.54142419791388 - type: cos_sim_f1 value: 93.07535641547861 - type: cos_sim_precision value: 94.81327800829875 - type: cos_sim_recall value: 91.4 - type: dot_accuracy value: 99.86435643564356 - type: dot_ap value: 96.53682260449868 - type: dot_f1 value: 92.98515104966718 - type: dot_precision value: 95.27806925498426 - type: dot_recall value: 90.8 - type: euclidean_accuracy value: 99.86336633663366 - type: euclidean_ap value: 96.5228676185697 - type: euclidean_f1 value: 92.9735234215886 - type: euclidean_precision value: 94.70954356846472 - type: euclidean_recall value: 91.3 - type: manhattan_accuracy value: 99.85841584158416 - type: manhattan_ap value: 96.50392760934032 - type: manhattan_f1 value: 92.84642321160581 - type: manhattan_precision value: 92.8928928928929 - type: manhattan_recall value: 92.80000000000001 - type: max_accuracy value: 99.86534653465347 - type: max_ap value: 96.54142419791388 - type: max_f1 value: 93.07535641547861 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 61.08285408766616 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 35.640675309010604 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 53.20333913710715 - type: mrr value: 54.088813555725324 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.79465221925075 - type: cos_sim_spearman value: 30.530816059163634 - type: dot_pearson value: 31.364837244718043 - type: dot_spearman value: 30.79726823684003 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.22599999999999998 - type: map_at_10 value: 1.735 - type: map_at_100 value: 8.978 - type: map_at_1000 value: 20.851 - type: map_at_3 value: 0.613 - type: map_at_5 value: 0.964 - type: mrr_at_1 value: 88 - type: mrr_at_10 value: 92.867 - type: mrr_at_100 value: 92.867 - type: mrr_at_1000 value: 92.867 - type: mrr_at_3 value: 92.667 - type: mrr_at_5 value: 92.667 - type: ndcg_at_1 value: 82 - type: ndcg_at_10 value: 73.164 - type: ndcg_at_100 value: 51.878 - type: ndcg_at_1000 value: 44.864 - type: ndcg_at_3 value: 79.184 - type: ndcg_at_5 value: 76.39 - type: precision_at_1 value: 88 - type: precision_at_10 value: 76.2 - type: precision_at_100 value: 52.459999999999994 - type: precision_at_1000 value: 19.692 - type: precision_at_3 value: 82.667 - type: precision_at_5 value: 80 - type: recall_at_1 value: 0.22599999999999998 - type: recall_at_10 value: 1.942 - type: recall_at_100 value: 12.342 - type: recall_at_1000 value: 41.42 - type: recall_at_3 value: 0.637 - type: recall_at_5 value: 1.034 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 3.567 - type: map_at_10 value: 13.116 - type: map_at_100 value: 19.39 - type: map_at_1000 value: 20.988 - type: map_at_3 value: 7.109 - type: map_at_5 value: 9.950000000000001 - type: mrr_at_1 value: 42.857 - type: mrr_at_10 value: 57.404999999999994 - type: mrr_at_100 value: 58.021 - type: mrr_at_1000 value: 58.021 - type: mrr_at_3 value: 54.762 - type: mrr_at_5 value: 56.19 - type: ndcg_at_1 value: 38.775999999999996 - type: ndcg_at_10 value: 30.359 - type: ndcg_at_100 value: 41.284 - type: ndcg_at_1000 value: 52.30200000000001 - type: ndcg_at_3 value: 36.744 - type: ndcg_at_5 value: 34.326 - type: precision_at_1 value: 42.857 - type: precision_at_10 value: 26.122 - type: precision_at_100 value: 8.082 - type: precision_at_1000 value: 1.559 - type: precision_at_3 value: 40.136 - type: precision_at_5 value: 35.510000000000005 - type: recall_at_1 value: 3.567 - type: recall_at_10 value: 19.045 - type: recall_at_100 value: 49.979 - type: recall_at_1000 value: 84.206 - type: recall_at_3 value: 8.52 - type: recall_at_5 value: 13.103000000000002 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 68.8394 - type: ap value: 13.454399712443099 - type: f1 value: 53.04963076364322 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 60.546123372948514 - type: f1 value: 60.86952793277713 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 49.10042955060234 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 85.03308100375514 - type: cos_sim_ap value: 71.08284605869684 - type: cos_sim_f1 value: 65.42539436255494 - type: cos_sim_precision value: 64.14807302231237 - type: cos_sim_recall value: 66.75461741424802 - type: dot_accuracy value: 84.68736961316088 - type: dot_ap value: 69.20524036530992 - type: dot_f1 value: 63.54893953365829 - type: dot_precision value: 63.45698500394633 - type: dot_recall value: 63.641160949868066 - type: euclidean_accuracy value: 85.07480479227513 - type: euclidean_ap value: 71.14592761009864 - type: euclidean_f1 value: 65.43814432989691 - type: euclidean_precision value: 63.95465994962216 - type: euclidean_recall value: 66.99208443271768 - type: manhattan_accuracy value: 85.06288370984085 - type: manhattan_ap value: 71.07289742593868 - type: manhattan_f1 value: 65.37585421412301 - type: manhattan_precision value: 62.816147859922175 - type: manhattan_recall value: 68.15303430079156 - type: max_accuracy value: 85.07480479227513 - type: max_ap value: 71.14592761009864 - type: max_f1 value: 65.43814432989691 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 87.79058485659952 - type: cos_sim_ap value: 83.7183187008759 - type: cos_sim_f1 value: 75.86921142180798 - type: cos_sim_precision value: 73.00683371298405 - type: cos_sim_recall value: 78.96519864490298 - type: dot_accuracy value: 87.0085768618776 - type: dot_ap value: 81.87467488474279 - type: dot_f1 value: 74.04188363990559 - type: dot_precision value: 72.10507114191901 - type: dot_recall value: 76.08561749307053 - type: euclidean_accuracy value: 87.8332751193387 - type: euclidean_ap value: 83.83585648120315 - type: euclidean_f1 value: 76.02582177042369 - type: euclidean_precision value: 73.36388371759989 - type: euclidean_recall value: 78.88820449645827 - type: manhattan_accuracy value: 87.87208444910156 - type: manhattan_ap value: 83.8101950642973 - type: manhattan_f1 value: 75.90454195535027 - type: manhattan_precision value: 72.44419564761039 - type: manhattan_recall value: 79.71204188481676 - type: max_accuracy value: 87.87208444910156 - type: max_ap value: 83.83585648120315 - type: max_f1 value: 76.02582177042369 --- <h1 align="center">FlagEmbedding</h1> <h4 align="center"> <p> <a href=#model-list>Model List</a> | <a href=#usage>Usage</a> | <a href="#evaluation">Evaluation</a> | <a href="#train">Train</a> | <a href="#contact">Contact</a> | <a href="#license">License</a> <p> </h4> More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding). [English](README.md) | [中文](https://github.com/FlagOpen/FlagEmbedding/blob/master/README_zh.md) FlagEmbedding can map any text to a low-dimensional dense vector which can be used for tasks like retrieval, classification, clustering, or semantic search. And it also can be used in vector database for LLMs. ************* 🌟**Updates**🌟 ************* - 08/09/2023: BGE Models are integrated into **Langchain**, you can use it like [**this**](#using-langchain); C-MTEB **leaderboard** is [avaliable](https://huggingface.co/spaces/mteb/leaderboard). - 08/05/2023: Release base-scale and small-scale models, **best performance among the models of the same size 🤗** - 08/02/2023: Release `bge-large-*`(short for BAAI General Embedding) Models, **rank 1st on MTEB and C-MTEB benchmark!** - 08/01/2023: We release the [Chinese Massive Text Embedding Benchmark](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB) (**C-MTEB**), consisting of 31 test dataset. ## Model List `bge` is short for `BAAI general embedding`. | Model | Language | Description | query instruction for retrieval\* | |:-------------------------------|:--------:| :--------:| :--------:| | [BAAI/bge-large-en](https://huggingface.co/BAAI/bge-large-en) | English | rank **1st** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-base-en](https://huggingface.co/BAAI/bge-base-en) | English | rank **2nd** in [MTEB](https://huggingface.co/spaces/mteb/leaderboard) leaderboard | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-small-en](https://huggingface.co/BAAI/bge-small-en) | English | a small-scale model but with competitive performance | `Represent this sentence for searching relevant passages: ` | | [BAAI/bge-large-zh](https://huggingface.co/BAAI/bge-large-zh) | Chinese | rank **1st** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-large-zh-noinstruct](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | Chinese | This model is trained without instruction, and rank **2nd** in [C-MTEB](https://github.com/FlagOpen/FlagEmbedding/tree/master/C_MTEB) benchmark | | | [BAAI/bge-base-zh](https://huggingface.co/BAAI/bge-base-zh) | Chinese | a base-scale model but has similar ability with `bge-large-zh` | `为这个句子生成表示以用于检索相关文章:` | | [BAAI/bge-small-zh](https://huggingface.co/BAAI/bge-small-zh) | Chinese | a small-scale model but with competitive performance | `为这个句子生成表示以用于检索相关文章:` | \*: If you need to search the **long** relevant passages to a **short** query (s2p retrieval task), you need to add the instruction to the query; in other cases, no instruction is needed, just use the original query directly. In all cases, **no instruction** need to be added to passages. ## Usage Here are some examples to use `bge` models with [FlagEmbedding](#using-flagembedding), [Sentence-Transformers](#using-sentence-transformers), [Langchain](#using-langchain), or [Huggingface Transformers](#using-huggingface-transformers). #### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` If it doesn't work for you, you can see [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md) for more methods to install FlagEmbedding. ```python from FlagEmbedding import FlagModel sentences = ["样例数据-1", "样例数据-2"] model = FlagModel('BAAI/bge-large-zh', query_instruction_for_retrieval="为这个句子生成表示以用于检索相关文章:") embeddings_1 = model.encode(sentences) embeddings_2 = model.encode(sentences) similarity = embeddings_1 @ embeddings_2.T print(similarity) # for s2p(short query to long passage) retrieval task, please use encode_queries() which will automatically add the instruction to each query # corpus in retrieval task can still use encode() or encode_corpus(), since they don't need instruction queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] q_embeddings = model.encode_queries(queries) p_embeddings = model.encode(passages) scores = q_embeddings @ p_embeddings.T ``` The value of argument `query_instruction_for_retrieval` see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list). FlagModel will use all available GPUs when encoding, please set `os.environ["CUDA_VISIBLE_DEVICES"]` to choose GPU. #### Using Sentence-Transformers Using this model also is easy when you have [sentence-transformers](https://www.SBERT.net) installed: ``` pip install -U sentence-transformers ``` ```python from sentence_transformers import SentenceTransformer sentences = ["样例数据-1", "样例数据-2"] model = SentenceTransformer('BAAI/bge-large-zh') embeddings_1 = model.encode(sentences, normalize_embeddings=True) embeddings_2 = model.encode(sentences, normalize_embeddings=True) similarity = embeddings_1 @ embeddings_2.T print(similarity) ``` For s2p(short query to long passage) retrieval task, each short query should start with an instruction (instructions see [Model List](https://github.com/FlagOpen/FlagEmbedding/tree/master#model-list)). But the instruction is not needed for passages. ```python from sentence_transformers import SentenceTransformer queries = ['query_1', 'query_2'] passages = ["样例文档-1", "样例文档-2"] instruction = "为这个句子生成表示以用于检索相关文章:" model = SentenceTransformer('BAAI/bge-large-zh') q_embeddings = model.encode([instruction+q for q in queries], normalize_embeddings=True) p_embeddings = model.encode(passages, normalize_embeddings=True) scores = q_embeddings @ p_embeddings.T ``` #### Using Langchain You can use `bge` in langchain like this: ```python from langchain.embeddings import HuggingFaceBgeEmbeddings model_name = "BAAI/bge-small-en" model_kwargs = {'device': 'cuda'} encode_kwargs = {'normalize_embeddings': True} # set True to compute cosine similarity model_norm = HuggingFaceBgeEmbeddings( model_name=model_name, model_kwargs=model_kwargs, encode_kwargs=encode_kwargs ) ``` #### Using HuggingFace Transformers With transformers package, you can use the model like this: First, you pass your input through the transformer model, then you select the last hidden state of first token (i.e., [CLS]) as the sentence embedding. ```python from transformers import AutoTokenizer, AutoModel import torch # Sentences we want sentence embeddings for sentences = ["样例数据-1", "样例数据-2"] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-large-zh') model = AutoModel.from_pretrained('BAAI/bge-large-zh') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, return_tensors='pt') # for s2p(short query to long passage) retrieval task, add an instruction to query (not add instruction for passages) # encoded_input = tokenizer([instruction + q for q in queries], padding=True, truncation=True, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling. In this case, cls pooling. sentence_embeddings = model_output[0][:, 0] # normalize embeddings sentence_embeddings = torch.nn.functional.normalize(sentence_embeddings, p=2, dim=1) print("Sentence embeddings:", sentence_embeddings) ``` ## Evaluation `baai-general-embedding` models achieve **state-of-the-art performance on both MTEB and C-MTEB leaderboard!** More details and evaluation tools see our [scripts](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md). - **MTEB**: | Model Name | Dimension | Sequence Length | Average (56) | Retrieval (15) |Clustering (11) | Pair Classification (3) | Reranking (4) | STS (10) | Summarization (1) | Classification (12) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [**bge-large-en**](https://huggingface.co/BAAI/bge-large-en) | 1024 | 512 | **63.98** | **53.9** | **46.98** | 85.8 | **59.48** | 81.56 | 32.06 | **76.21** | | [**bge-base-en**](https://huggingface.co/BAAI/bge-base-en) | 768 | 512 | 63.36 | 53.0 | 46.32 | 85.86 | 58.7 | 81.84 | 29.27 | 75.27 | | [gte-large](https://huggingface.co/thenlper/gte-large) | 1024 | 512 | 63.13 | 52.22 | 46.84 | 85.00 | 59.13 | 83.35 | 31.66 | 73.33 | | [gte-base](https://huggingface.co/thenlper/gte-base) | 768 | 512 | 62.39 | 51.14 | 46.2 | 84.57 | 58.61 | 82.3 | 31.17 | 73.01 | | [e5-large-v2](https://huggingface.co/intfloat/e5-large-v2) | 1024| 512 | 62.25 | 50.56 | 44.49 | 86.03 | 56.61 | 82.05 | 30.19 | 75.24 | | [**bge-small-en**](https://huggingface.co/BAAI/bge-small-en) | 384 | 512 | 62.11 | 51.82 | 44.31 | 83.78 | 57.97 | 80.72 | 30.53 | 74.37 | | [instructor-xl](https://huggingface.co/hkunlp/instructor-xl) | 768 | 512 | 61.79 | 49.26 | 44.74 | 86.62 | 57.29 | 83.06 | 32.32 | 61.79 | | [e5-base-v2](https://huggingface.co/intfloat/e5-base-v2) | 768 | 512 | 61.5 | 50.29 | 43.80 | 85.73 | 55.91 | 81.05 | 30.28 | 73.84 | | [gte-small](https://huggingface.co/thenlper/gte-small) | 384 | 512 | 61.36 | 49.46 | 44.89 | 83.54 | 57.7 | 82.07 | 30.42 | 72.31 | | [text-embedding-ada-002](https://platform.openai.com/docs/guides/embeddings) | 1536 | 8192 | 60.99 | 49.25 | 45.9 | 84.89 | 56.32 | 80.97 | 30.8 | 70.93 | | [e5-small-v2](https://huggingface.co/intfloat/e5-base-v2) | 384 | 512 | 59.93 | 49.04 | 39.92 | 84.67 | 54.32 | 80.39 | 31.16 | 72.94 | | [sentence-t5-xxl](https://huggingface.co/sentence-transformers/sentence-t5-xxl) | 768 | 512 | 59.51 | 42.24 | 43.72 | 85.06 | 56.42 | 82.63 | 30.08 | 73.42 | | [all-mpnet-base-v2](https://huggingface.co/sentence-transformers/all-mpnet-base-v2) | 768 | 514 | 57.78 | 43.81 | 43.69 | 83.04 | 59.36 | 80.28 | 27.49 | 65.07 | | [sgpt-bloom-7b1-msmarco](https://huggingface.co/bigscience/sgpt-bloom-7b1-msmarco) | 4096 | 2048 | 57.59 | 48.22 | 38.93 | 81.9 | 55.65 | 77.74 | 33.6 | 66.19 | | [all-MiniLM-L12-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L12-v2) | 384 | 512 | 56.53 | 42.69 | 41.81 | 82.41 | 58.44 | 79.8 | 27.9 | 63.21 | | [all-MiniLM-L6-v2](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) | 384 | 512 | 56.26 | 41.95 | 42.35 | 82.37 | 58.04 | 78.9 | 30.81 | 63.05 | | [contriever-base-msmarco](https://huggingface.co/nthakur/contriever-base-msmarco) | 768 | 512 | 56.00 | 41.88 | 41.1 | 82.54 | 53.14 | 76.51 | 30.36 | 66.68 | | [sentence-t5-base](https://huggingface.co/sentence-transformers/sentence-t5-base) | 768 | 512 | 55.27 | 33.63 | 40.21 | 85.18 | 53.09 | 81.14 | 31.39 | 69.81 | - **C-MTEB**: We create a benchmark C-MTEB for chinese text embedding which consists of 31 datasets from 6 tasks. Please refer to [C_MTEB](https://github.com/FlagOpen/FlagEmbedding/blob/master/C_MTEB/README.md) for a detailed introduction. | Model | Embedding dimension | Avg | Retrieval | STS | PairClassification | Classification | Reranking | Clustering | |:-------------------------------|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:|:--------:| | [**bge-large-zh**](https://huggingface.co/BAAI/bge-large-zh) | 1024 | **64.20** | **71.53** | **53.23** | **78.94** | 72.26 | **65.11** | 48.39 | | [**bge-large-zh-noinstruct**](https://huggingface.co/BAAI/bge-large-zh-noinstruct) | 1024 | 63.53 | 70.55 | 50.98 | 76.77 | **72.49** | 64.91 | **50.01** | | [**BAAI/bge-base-zh**](https://huggingface.co/BAAI/bge-base-zh) | 768 | 62.96 | 69.53 | 52.05 | 77.5 | 70.98 | 64.91 | 47.63 | | [**BAAI/bge-small-zh**](https://huggingface.co/BAAI/bge-small-zh) | 512 | 58.27 | 63.07 | 46.87 | 70.35 | 67.78 | 61.48 | 45.09 | | [m3e-base](https://huggingface.co/moka-ai/m3e-base) | 768 | 57.10 |56.91 | 48.15 | 63.99 | 70.28 | 59.34 | 47.68 | | [m3e-large](https://huggingface.co/moka-ai/m3e-large) | 1024 | 57.05 |54.75 | 48.64 | 64.3 | 71.22 | 59.66 | 48.88 | | [text-embedding-ada-002(OpenAI)](https://platform.openai.com/docs/guides/embeddings/what-are-embeddings) | 1536 | 53.02 | 52.0 | 40.61 | 69.56 | 67.38 | 54.28 | 45.68 | | [luotuo](https://huggingface.co/silk-road/luotuo-bert-medium) | 1024 | 49.37 | 44.4 | 39.41 | 66.62 | 65.29 | 49.25 | 44.39 | | [text2vec](https://huggingface.co/shibing624/text2vec-base-chinese) | 768 | 47.63 | 38.79 | 41.71 | 67.41 | 65.18 | 49.45 | 37.66 | | [text2vec-large](https://huggingface.co/GanymedeNil/text2vec-large-chinese) | 1024 | 47.36 | 41.94 | 41.98 | 70.86 | 63.42 | 49.16 | 30.02 | ## Train This section will introduce the way we used to train the general embedding. The training scripts are in [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md), and we provide some examples to do [pre-train](https://github.com/FlagOpen/FlagEmbedding/blob/master/examples/pretrain/README.md) and [fine-tune](https://github.com/FlagOpen/FlagEmbedding/blob/master/examples/finetune/README.md). **1. RetroMAE Pre-train** We pre-train the model following the method [retromae](https://github.com/staoxiao/RetroMAE), which shows promising improvement in retrieval task ([paper](https://aclanthology.org/2022.emnlp-main.35.pdf)). The pre-training was conducted on 24 A100(40G) GPUs with a batch size of 720. In retromae, the mask ratio of encoder and decoder are 0.3, 0.5 respectively. We used the AdamW optimizer and the learning rate is 2e-5. **Pre-training data**: - English: - [Pile](https://pile.eleuther.ai/) - [wikipedia](https://huggingface.co/datasets/wikipedia) - [msmarco](https://huggingface.co/datasets/Tevatron/msmarco-passage-corpus) - Chinese: - [wudao](https://github.com/BAAI-WuDao/Data) **2. Finetune** We fine-tune the model using a contrastive objective. The format of input data is a triple`(query, positive, negative)`. Besides the negative in the triple, we also adopt in-batch negatives strategy. We employ the cross-device negatives sharing method to share negatives among different GPUs, which can dramatically **increase the number of negatives**. We trained our model on 48 A100(40G) GPUs with a large batch size of 32,768 (so there are **65,535** negatives for each query in a batch). We used the AdamW optimizer and the learning rate is 1e-5. The temperature for contrastive loss is 0.01. Besides, we add instruction to the query for s2p(short query to long passage) retrieval task in the training (add nothing to passages). For English, the instruction is `Represent this sentence for searching relevant passages: `; For Chinese, the instruction is `为这个句子生成表示以用于检索相关文章:`. In the evaluation, the instruction should be added for queries in retrieval task, not be added for other tasks. Noted that the instruction is not needed for passages. The finetune script is accessible in this repository: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/blob/master/FlagEmbedding/baai_general_embedding/README.md). You can easily finetune your model with it. **Training data**: - For English, we collect 230M text pairs from [wikipedia](https://huggingface.co/datasets/wikipedia), [cc-net](https://github.com/facebookresearch/cc_net), and so on. - For chinese, we collect 120M text pairs from [wudao](https://github.com/BAAI-WuDao/Data), [simclue](https://github.com/CLUEbenchmark/SimCLUE) and so on. **The data collection is to be released in the future.** We will continually update the embedding models and training codes, hoping to promote the development of the embedding model community. ## License FlagEmbedding is licensed under [MIT License](https://github.com/FlagOpen/FlagEmbedding/blob/master/LICENSE). The released models can be used for commercial purposes free of charge.
[ "BIOSSES", "SCIFACT" ]
mhenrichsen/context-aware-splitter-7b
mhenrichsen
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "da", "dataset:mhenrichsen/context-aware-splits", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-09-18T19:17:46Z
2023-09-19T10:58:01+00:00
18
9
--- datasets: - mhenrichsen/context-aware-splits language: - da license: apache-2.0 --- # Context Aware Splitter 1b model available [here](https://huggingface.co/mhenrichsen/context-aware-splitter-1b). CAS is a text splitter for Retrieval Augmented Generation. It's trained on 12.3k danish texts with a token count of 13.4m. ## What does it do? CAS takes a text (str), reads and understands the contexts and then provides the best splits based on a defined word count. It returns a dict with the keys: - splits: list[str] - topic: str ## Code example ```python from transformers import AutoTokenizer, TextStreamer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained("mhenrichsen/context-aware-splitter-7b") tokenizer = AutoTokenizer.from_pretrained("mhenrichsen/context-aware-splitter-7b") streamer = TextStreamer(tokenizer, skip_special_tokens=True) WORD_SPLIT_COUNT = 50 prompt_template = """### Instruction: Din opgave er at segmentere en given tekst i separate dele, så hver del giver mening og kan læses uafhængigt af de andre. Hvis det giver mening, må der kan være et overlap mellem delene. Hver del skal ideelt indeholde {word_count} ord. ### Input: {text} ### Response: """ artikel = """Kina er stærkt utilfreds med, at Tysklands udenrigsminister, Annalena Baerbock, har omtalt den kinesiske præsident Xi Jinping som en diktator. - Bemærkningerne fra Tyskland er ekstremt absurde, krænker Kinas politiske værdighed alvorligt og er en åben politisk provokation, udtalte talsperson fra det kinesiske udenrigsministerium Mao Ning i går ifølge CNN. Bemærkningen fra udenrigsminister Annalena Baerbock faldt i et interview om krigen i Ukraine med Fox News i sidste uge. - Hvis Putin skulle vinde denne krig, hvilket signal ville det så sende til andre diktatorer i verden, som Xi, som den kinesiske præsident?, sagde hun. Tysklands ambassadør i Kina, Patricia Flor, har som konsekvens af udtalelsen været til en kammeratlig samtale, oplyser det tyske udenrigsministerium til CNN.""" tokens = tokenizer( prompt_template.format(text=artikel, word_count=WORD_SPLIT_COUNT), return_tensors='pt' )['input_ids'] # Generate output generation_output = model.generate( tokens, streamer=streamer, max_length = 8194, eos_token_id = 29913 ) ``` Example: ``` ### Instruction: Din opgave er at segmentere en given tekst i separate dele, så hver del giver mening og kan læses uafhængigt af de andre. Hvis det giver mening, må der kan være et overlap mellem delene. Hver del skal ideelt indeholde 50 ord. ### Input: Munkebjerg er et overvejende middelklassekvarter beliggende i det centrale Odense Munkebjerg grænser op til Hunderup i vest, hvor det afgrænses af Hjallesevej, og byens centrum i nord. Kvarteret har status som et familievenligt boligkvarter med både lejligheder (i området omkring H.C Andersensgade) og parcelhuse som på og omkring Munkebjergvej og Munkebjergskolen. Socialdemokratiet står traditionelt set stærkt i området, som det også ses på resultaterne af stemmer afgivet ved valgstedet Munkebjergskolen fra folketingsvalget i 2011, hvor partiet fik 24,8% af stemmerne. Dog vinder partiet Venstre samt Det Radikale Venstre også bred opbakning i kvarteret med henholdsvis 20,7 og 12,6% af stemmerne ligeledes fra valget i 2011. De fleste af kvarterets børn går på den lokale Munkebjergskolen, mens enkelte går på Odense Friskole og/eller Giersings Realskole. Munkebjergkvarteret er desuden hjemsted for fodboldklubben OKS. Munkebjergkvarteret kaldes i dagligtale for "Munken". ### Response: ``` This returns the following dictionary: ``` {'splits': ['Munkebjerg er et overvejende middelklassekvarter beliggende i det centrale Odense. Munkebjerg grænser op til Hunderup i vest, hvor det afgrænses af Hjallesevej, og byens centrum i nord. Kvarteret har status som et familievenligt boligkvarter med både lejligheder (i området omkring H.C Andersensgade) og parcelhuse som på og omkring Munkebjergvej og Munkebjergskolen.', 'Socialdemokratiet står traditionelt set stærkt i området, som det også ses på resultaterne af stemmer afgivet ved valgstedet Munkebjergskolen fra folketingsvalget i 2011, hvor partiet fik 24,8% af stemmerne. Dog vinder partiet Venstre samt Det Radikale Venstre også bred opbakning i kvarteret med henholdsvis 20,7 og 12,6% af stemmerne ligeledes fra valget i 2011.', "De fleste af kvarterets børn går på den lokale Munkebjergskolen, mens enkelte går på Odense Friskole og/eller Giersings Realskole. Munkebjergkvarteret er desuden hjemsted for fodboldklubben OKS. Munkebjergkvarteret kaldes i dagligtale for 'Munken'."], 'topic': 'Beskrivelse af Munkebjergkvarteret i Odense.'} ``` ## Prompt format The model follows alpaca format. ``` ### Instruction: Din opgave er at segmentere en given tekst i separate dele, så hver del giver mening og kan læses uafhængigt af de andre. Hvis det giver mening, må der kan være et overlap mellem delene. Hver del skal ideelt indeholde {WORD_COUNT} ord. ### Input: {TEXT} ### Response: ```
[ "CAS" ]
medspaner/xlm-roberta-large-spanish-trials-cases-neg-spec
medspaner
token-classification
[ "transformers", "pytorch", "xlm-roberta", "token-classification", "generated_from_trainer", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-02T16:36:42Z
2024-10-01T06:32:31+00:00
18
0
--- license: cc-by-nc-4.0 metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer widget: - text: Pacientes sanos, sin ninguna enfermedad, que no tomen tengan ningún tratamiento model-index: - name: xlm-roberta-large-spanish-trials-cases-neg-spec results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # xlm-roberta-large-spanish-trials-cases-neg-spec This named entity recognition model detects negation and speculation entities, and negated and speculated concepts: - Neg_cue: negation cue (e.g. *no*, *sin*) - Negated: negated entity or event (e.g. *sin **dolor***) - Spec_cue: speculation cue (e.g. *posiblemente*) - Speculated: speculated entity or event (e.g. *posiblemente **sobreviva***) The model achieves the following results on the test set (results are averaged over 5 evaluation rounds): - Precision: 0.871 (±0.006) - Recall: 0.874 (±0.009) - F1: 0.873 (±0.004) - Accuracy: 0.984 (±0.001) ## Model description This model adapts the pre-trained model [xlm-roberta-large-spanish-clinical](https://huggingface.co/llange/xlm-roberta-large-spanish-clinical), presented in [Lange et al. (2022)](https://academic.oup.com/bioinformatics/article/38/12/3267/6575884). It is fine-tuned to conduct medical named entity recognition on texts about in Spanish. The model is fine-tuned on the [CT-EBM-ES corpus (Campillos-Llanos et al. 2021)](https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01395-z) and 100 clinical cases with Creative Commons License. If you use this model, please, cite as follows: ``` @article{campillosetal2024,         title = {{Hybrid tool for semantic annotation and concept extraction of medical texts in Spanish}},         author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\'o}n, Adri{\'a}n},         journal = {BMC Bioinformatics}, year={2024}, publisher={BioMed Central} } ``` ## Intended uses & limitations **Disclosure**: *This model is under development and needs to be improved. It should not be used for medical decision making without human assistance and supervision* This model is intended for a generalist purpose, and may have bias and/or any other undesirable distortions. Third parties who deploy or provide systems and/or services using any of these models (or using systems based on these models) should note that it is their responsibility to mitigate the risks arising from their use. Third parties, in any event, need to comply with applicable regulations, including regulations concerning the use of artificial intelligence. The owner or creator of the models will in no event be liable for any results arising from the use made by third parties of these models. **Descargo de responsabilidad**: *Esta herramienta se encuentra en desarrollo y no debe ser empleada para la toma de decisiones médicas* La finalidad de este modelo es generalista, y se advierte que puede tener sesgos y/u otro tipo de distorsiones indeseables. Terceras partes que desplieguen o proporcionen sistemas y/o servicios usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) han tener presente que es su responsabilidad abordar y minimizar los riesgos derivados de su uso. Las terceras partes, en cualquier circunstancia, deben cumplir con la normativa aplicable, incluyendo la normativa que concierne al uso de la inteligencia artificial. El propietario o creador de los modelos de ningún modo será responsable de los resultados derivados del uso que las terceras partes hagan de estos modelos. ## Training and evaluation data The model is fine-tuned on the [NUBEs corpus (Lima et al. 2020)](https://aclanthology.org/2020.lrec-1.708/), 100 clinical cases with Creative Commons licence and the [Clinical Trials for Evidence-Based-Medicine in Spanish (CT-EBM-SP) corpus](http://www.lllf.uam.es/ESP/nlpdata/wp2/). The CT-EBM-SP corpus is a collection of 1200 texts about clinical trials studies and clinical trials announcements: - 500 abstracts from journals published under a Creative Commons license, e.g. available in PubMed or the Scientific Electronic Library Online (SciELO) - 700 clinical trials announcements published in the European Clinical Trials Register and Repositorio Español de Estudios Clínicos If you use the CT-EBM-ES resource, please, cite as follows: ``` @article{campillosetal-midm2021,         title = {A clinical trials corpus annotated with UMLS© entities to enhance the access to Evidence-Based Medicine},         author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\'o}n, Adri{\'a}n and Moreno-Sandoval, Antonio},         journal = {BMC Medical Informatics and Decision Making},         volume={21}, number={1}, pages={1--19}, year={2021}, publisher={BioMed Central} } ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: we used different seeds for 5 evaluation rounds, and uploaded the model with the best results - optimizer: Adam - num_epochs: average 19.6 epochs (±7.09); trained with early stopping if no improvement after 5 epochs (early stopping patience: 5) ### Training results (test set; average and standard deviation of 5 rounds with different seeds) | Precision | Recall | F1 | Accuracy | |:--------------:|:--------------:|:--------------:|:--------------:| | 0.871 (±0.006) | 0.874 (±0.009) | 0.873 (±0.004) | 0.984 (±0.001) | ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.2+cu113 - Datasets 1.18.4 - Tokenizers 0.11.6
[ "CT-EBM-SP", "SCIELO" ]
medspaner/roberta-es-clinical-trials-cases-temporal-ner
medspaner
token-classification
[ "transformers", "pytorch", "roberta", "token-classification", "generated_from_trainer", "license:cc-by-nc-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-10-03T08:37:59Z
2024-10-01T06:25:39+00:00
18
0
--- license: cc-by-nc-4.0 metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer widget: - text: Edad ≥ 18 años (en todos los centros), o edad ≥12 y <18 años con peso igual o superior a 40kg - text: Estudio realizado en un hospital desde julio de 2010 hasta diciembre de 2011 (18 meses) - text: Pacientes que hayan recibido bifosfonatos diarios, semanales o mensuales durante al menos 3 años. - text: 50 g (40 g la noche anterior y 10 g por la mañana) de L-glutamina model-index: - name: roberta-es-clinical-trials-cases-temporal-ner results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-es-clinical-trials-temporal-ner This named entity recognition model detects temporal expressions (TIMEX) according to the [TimeML scheme](https://en.wikipedia.org/wiki/ISO-TimeML) ([Pustejovsky et al. 2005](http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.85.5610&rep=rep1&type=pdf)), in addition to Age entities: - Age: e.g. *18 años* - Date: e.g. *2022*, *26 de noviembre* - Duration: e.g. *3 horas* - Frequency: e.g. *semanal* - Time: e.g. *noche* The model achieves the following results on the test set (when trained with the training and development set; results are averaged over 5 evaluation rounds): - Precision: 0.898 (±0.008) - Recall: 0.899 (±0.006) - F1: 0.899 (±0.003) - Accuracy: 0.996 (±0.001) ## Model description This model adapts the pre-trained model [bsc-bio-ehr-es](https://huggingface.co/PlanTL-GOB-ES/bsc-bio-ehr-es), presented in [Pio Carriño et al. (2022)](https://aclanthology.org/2022.bionlp-1.19/). It is fine-tuned to conduct temporal named entity recognition on Spanish texts about clinical trials and clinical cases. The model is fine-tuned on the [CT-EBM-ES corpus (Campillos-Llanos et al. 2021)](https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01395-z) and 100 clinical cases with Creative Commons license. If you use this model, please, cite as follows: ``` @article{campillosetal2024,         title = {{Hybrid tool for semantic annotation and concept extraction of medical texts in Spanish}},         author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\'o}n, Adri{\'a}n},         journal = {BMC Bioinformatics}, year={2024}, publisher={BioMed Central} } ``` ## Intended uses & limitations **Disclosure**: *This model is under development and needs to be improved. It should not be used for medical decision making without human assistance and supervision* This model is intended for a generalist purpose, and may have bias and/or any other undesirable distortions. Third parties who deploy or provide systems and/or services using any of these models (or using systems based on these models) should note that it is their responsibility to mitigate the risks arising from their use. Third parties, in any event, need to comply with applicable regulations, including regulations concerning the use of artificial intelligence. The owner or creator of the models will in no event be liable for any results arising from the use made by third parties of these models. **Descargo de responsabilidad**: *Esta herramienta se encuentra en desarrollo y no debe ser empleada para la toma de decisiones médicas* La finalidad de este modelo es generalista, y se advierte que puede tener sesgos y/u otro tipo de distorsiones indeseables. Terceras partes que desplieguen o proporcionen sistemas y/o servicios usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) han tener presente que es su responsabilidad abordar y minimizar los riesgos derivados de su uso. Las terceras partes, en cualquier circunstancia, deben cumplir con la normativa aplicable, incluyendo la normativa que concierne al uso de la inteligencia artificial. El propietario o creador de los modelos de ningún modo será responsable de los resultados derivados del uso que las terceras partes hagan de estos modelos. ## Training and evaluation data To fine-tune the model we used the [Clinical Trials for Evidence-Based-Medicine in Spanish (CT-EBM-SP) corpus](http://www.lllf.uam.es/ESP/nlpdata/wp2/) and 100 clinical cases with Creative Commons license. The CT-EBM-SP corpus is a collection of 1200 texts about clinical trials studies and clinical trials announcements: - 500 abstracts from journals published under a Creative Commons license, e.g. available in PubMed or the Scientific Electronic Library Online (SciELO) - 700 clinical trials announcements published in the European Clinical Trials Register and Repositorio Español de Estudios Clínicos If you use the CT-EBM-ES resource, please, cite as follows: ``` @article{campillosetal-midm2021,         title = {A clinical trials corpus annotated with UMLS© entities to enhance the access to Evidence-Based Medicine},         author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\'o}n, Adri{\'a}n and Moreno-Sandoval, Antonio},         journal = {BMC Medical Informatics and Decision Making},         volume={21}, number={1}, pages={1--19}, year={2021}, publisher={BioMed Central} } ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: we used different seeds for 5 evaluation rounds, and uploaded the model with the best results - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: average of 16.2 epochs (±1.76) ### Training results (test set; average and standard deviation of 5 rounds with different seeds) | Precision | Recall | F1 | Accuracy | |:--------------:|:--------------:|:--------------:|:--------------:| | 0.898 (±0.008) | 0.899 (±0.006) | 0.899 (±0.003) | 0.996 (±0.001) | **Results per class (test set; average and standard deviation of 5 rounds with different seeds)** | Class | Precision | Recall | F1 | Support | |:---------:|:--------------:|:--------------:|:--------------:|:---------:| | Age | 0.924 (±0.013) | 0.946 (±0.009) | 0.934 (±0.006) | 372 | | Date | 0.924 (±0.021) | 0.898 (±0.021) | 0.910 (±0.004) | 412 | | Duration | 0.907 (±0.012) | 0.887 (±0.011) | 0.897 (±0.007) | 629 | | Frequency | 0.858 (±0.053) | 0.890 (±0.017) | 0.873 (±0.029) | 73 | | Time | 0.730 (±0.034) | 0.825 (±0.029) | 0.774 (±0.012) | 113 | ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.2+cu113 - Datasets 1.18.4 - Tokenizers 0.11.6
[ "CT-EBM-SP", "SCIELO" ]
usvsnsp/pythia-70m-ppo
usvsnsp
text-generation
[ "transformers", "safetensors", "gpt_neox", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-10-04T13:53:13Z
2023-10-04T15:45:21+00:00
18
0
--- {} --- Wandb Run: https://wandb.ai/eleutherai/pythia-rlhf/runs/gy2g8jj1 Model Evals: | Tasks |Version|Filter| Metric |Value | |Stderr| |--------------|-------|------|----------|-----:|---|-----:| |arc_challenge |Yaml |none |acc |0.2253|± |0.0122| | | |none |acc_norm |0.2278|± |0.0123| |arc_easy |Yaml |none |acc |0.2551|± |0.0089| | | |none |acc_norm |0.2567|± |0.0090| |lambada_openai|Yaml |none |perplexity| NaN|± | NaN| | | |none |acc |0.0016|± |0.0005| |logiqa |Yaml |none |acc |0.2028|± |0.0158| | | |none |acc_norm |0.2028|± |0.0158| |piqa |Yaml |none |acc |0.4946|± |0.0117| | | |none |acc_norm |0.4924|± |0.0117| |sciq |Yaml |none |acc |0.0140|± |0.0037| | | |none |acc_norm |0.0140|± |0.0037| |winogrande |Yaml |none |acc |0.5036|± |0.0141| |wsc |Yaml |none |acc |0.6346|± |0.0474|
[ "SCIQ" ]
lomahony/pythia-1b-helpful-sft
lomahony
text-generation
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "causal-lm", "pythia", "en", "dataset:Anthropic/hh-rlhf", "arxiv:2101.00027", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-11-08T15:49:11Z
2024-11-26T02:08:22+00:00
18
0
--- datasets: - Anthropic/hh-rlhf language: - en license: apache-2.0 tags: - pytorch - causal-lm - pythia --- [Pythia-1b](https://huggingface.co/EleutherAI/pythia-1b) supervised finetuned using TRLx library with the helpful subset of [Anthropic-hh-rlhf dataset](https://huggingface.co/datasets/Anthropic/hh-rlhf) for 1 epoch. Checkpoints are also uploaded. Fully reproducible finetuning code is available on [GitHub](https://github.com/lauraaisling/trlx-pythia/tree/main) [wandb log](https://wandb.ai/lauraomahony999/sft-pythia/runs/azscanfe) See [Pythia-1b](https://huggingface.co/EleutherAI/pythia-1b) for model details [(paper)](https://arxiv.org/abs/2101.00027). See further details of these models in the paper [Attributing Mode Collapse in the Fine-Tuning of Large Language Models](https://openreview.net/pdf?id=3pDMYjpOxk). You can cite these models if they are helpful as follows: <pre> @inproceedings{o2024attributing, title={Attributing Mode Collapse in the Fine-Tuning of Large Language Models}, author={O’Mahony, Laura and Grinsztajn, Leo and Schoelkopf, Hailey and Biderman, Stella}, booktitle={ICLR 2024, Mathematical and Empirical Understanding of Foundation Models (ME-FoMo) workshop}, year={2024} } </pre> hf (pretrained=lomahony/pythia-1b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 0, batch_size: 16 | Tasks |Version|Filter|n-shot| Metric | Value | |Stderr| |--------------|------:|------|-----:|---------------|------:|---|------| |arc_challenge | 1|none | 0|acc | 0.2543|± |0.0127| | | |none | 0|acc_norm | 0.2739|± |0.0130| |arc_easy | 1|none | 0|acc | 0.5724|± |0.0102| | | |none | 0|acc_norm | 0.4941|± |0.0103| |boolq | 2|none | 0|acc | 0.6199|± |0.0085| |hellaswag | 1|none | 0|acc | 0.3819|± |0.0048| | | |none | 0|acc_norm | 0.4736|± |0.0050| |lambada_openai| 1|none | 0|perplexity | 7.1374|± |0.2014| | | |none | 0|acc | 0.5626|± |0.0069| |openbookqa | 1|none | 0|acc | 0.2040|± |0.0180| | | |none | 0|acc_norm | 0.3140|± |0.0208| |piqa | 1|none | 0|acc | 0.7138|± |0.0105| | | |none | 0|acc_norm | 0.6997|± |0.0107| |sciq | 1|none | 0|acc | 0.8400|± |0.0116| | | |none | 0|acc_norm | 0.7620|± |0.0135| |wikitext | 2|none | 0|word_perplexity|16.9719|± |N/A | | | |none | 0|byte_perplexity| 1.6981|± |N/A | | | |none | 0|bits_per_byte | 0.7639|± |N/A | |winogrande | 1|none | 0|acc | 0.5343|± |0.0140| hf (pretrained=lomahony/pythia-1b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 5, batch_size: 16 | Tasks |Version|Filter|n-shot| Metric | Value | |Stderr| |--------------|------:|------|-----:|---------------|------:|---|------| |arc_challenge | 1|none | 5|acc | 0.2628|± |0.0129| | | |none | 5|acc_norm | 0.2918|± |0.0133| |arc_easy | 1|none | 5|acc | 0.6040|± |0.0100| | | |none | 5|acc_norm | 0.5816|± |0.0101| |boolq | 2|none | 5|acc | 0.5963|± |0.0086| |hellaswag | 1|none | 5|acc | 0.3780|± |0.0048| | | |none | 5|acc_norm | 0.4719|± |0.0050| |lambada_openai| 1|none | 5|perplexity |10.2584|± |0.2936| | | |none | 5|acc | 0.4832|± |0.0070| |openbookqa | 1|none | 5|acc | 0.1980|± |0.0178| | | |none | 5|acc_norm | 0.3220|± |0.0209| |piqa | 1|none | 5|acc | 0.7057|± |0.0106| | | |none | 5|acc_norm | 0.7095|± |0.0106| |sciq | 1|none | 5|acc | 0.8980|± |0.0096| | | |none | 5|acc_norm | 0.9000|± |0.0095| |wikitext | 2|none | 5|word_perplexity|16.9719|± |N/A | | | |none | 5|byte_perplexity| 1.6981|± |N/A | | | |none | 5|bits_per_byte | 0.7639|± |N/A | |winogrande | 1|none | 5|acc | 0.5446|± |0.0140|
[ "SCIQ" ]
RalFinger/origami-style-sdxl-lora
RalFinger
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "template:sd-lora", "paper", "art", "style", "folded", "origami", "handcraft", "paperart", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:other", "region:us" ]
2023-11-22T13:31:11Z
2023-11-22T13:31:13+00:00
18
1
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 license: other license_name: bespoke-lora-trained-license license_link: https://multimodal.art/civitai-licenses?allowNoCredit=True&allowCommercialUse=Sell&allowDerivatives=True&allowDifferentLicense=True tags: - text-to-image - stable-diffusion - lora - diffusers - template:sd-lora - paper - art - style - folded - origami - handcraft - paperart instance_prompt: ral-orgmi widget: - text: 'ral-orgmi, a origami paper samurai standing in front of a mountain ' output: url: 3774222.jpeg - text: 'ral-orgmi, a origami paper bird is sitting on a tree branch ' output: url: 3774206.jpeg - text: 'ral-orgmi, a origami paper policeman in a uniform is standing in the middle of a busy street ' output: url: 3774211.jpeg - text: 'ral-orgmi, origami paper sculpture of a ape in a forest ' output: url: 3774207.jpeg - text: 'ral-orgmi, a origami paper car on a city street ' output: url: 3774212.jpeg - text: 'ral-orgmi, a bear made out of origami paper in a forest ' output: url: 3774205.jpeg - text: 'ral-orgmi, a origami paper man in a hat and coat, in front of a flower field and a village ' output: url: 3774221.jpeg - text: 'ral-orgmi, a origami paper fish that is floating in the water ' output: url: 3774219.jpeg - text: 'ral-orgmi, a dog made out of origami paper sitting in a garden ' output: url: 3774215.jpeg - text: 'ral-orgmi, a clown made out of origami paper in a circus ' output: url: 3774213.jpeg --- # Origami Style [SDXL LoRA] <Gallery /> ([CivitAI](https://civitai.com/models/206575)) ## Model description <p><u>SDXL:<br /></u><span style="color:rgb(193, 194, 197)">Trigger word: </span><strong><span style="color:rgb(193, 194, 197)">ral-orgmi</span></strong><br /><br /><span style="color:rgb(193, 194, 197)">☕ Buy me a coffee: </span><a target="_blank" rel="ugc" href="https://ko-fi.com/ralfingerai">https://ko-fi.com/ralfingerai</a></p> ## Trigger words You should use `ral-orgmi` to trigger the image generation. ## Download model Weights for this model are available in Safetensors format. [Download](/RalFinger/origami-style-sdxl-lora/tree/main) them in the Files & versions tab. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('stabilityai/stable-diffusion-xl-base-1.0', torch_dtype=torch.float16).to('cuda') pipeline.load_lora_weights('RalFinger/origami-style-sdxl-lora', weight_name='ral-orgmi-sdxl.safetensors') image = pipeline('ral-orgmi, a clown made out of origami paper in a circus ').images[0] ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
[ "BEAR" ]
ntc-ai/SDXL-LoRA-slider.dreamscape
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-10T05:09:44Z
2024-02-06T00:27:24+00:00
18
1
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/dreamscape_17_3.0.png widget: - text: dreamscape output: url: images/dreamscape_17_3.0.png - text: dreamscape output: url: images/dreamscape_19_3.0.png - text: dreamscape output: url: images/dreamscape_20_3.0.png - text: dreamscape output: url: images/dreamscape_21_3.0.png - text: dreamscape output: url: images/dreamscape_22_3.0.png inference: false instance_prompt: dreamscape --- # ntcai.xyz slider - dreamscape (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/dreamscape_17_-3.0.png" width=256 height=256 /> | <img src="images/dreamscape_17_0.0.png" width=256 height=256 /> | <img src="images/dreamscape_17_3.0.png" width=256 height=256 /> | | <img src="images/dreamscape_19_-3.0.png" width=256 height=256 /> | <img src="images/dreamscape_19_0.0.png" width=256 height=256 /> | <img src="images/dreamscape_19_3.0.png" width=256 height=256 /> | | <img src="images/dreamscape_20_-3.0.png" width=256 height=256 /> | <img src="images/dreamscape_20_0.0.png" width=256 height=256 /> | <img src="images/dreamscape_20_3.0.png" width=256 height=256 /> | See more at [https://sliders.ntcai.xyz/sliders/app/loras/ed83e177-aabb-4fba-bfa1-023a5beccbed](https://sliders.ntcai.xyz/sliders/app/loras/ed83e177-aabb-4fba-bfa1-023a5beccbed) ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` dreamscape ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.dreamscape', weight_name='dreamscape.safetensors', adapter_name="dreamscape") # Activate the LoRA pipe.set_adapters(["dreamscape"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, dreamscape" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14600+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful <strong>NTC Slider Factory</strong> LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities. Your support on Patreon will allow us to continue developing new models and tools. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.asleep
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-16T16:31:21Z
2024-02-06T00:33:57+00:00
18
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/asleep_17_3.0.png widget: - text: asleep output: url: images/asleep_17_3.0.png - text: asleep output: url: images/asleep_19_3.0.png - text: asleep output: url: images/asleep_20_3.0.png - text: asleep output: url: images/asleep_21_3.0.png - text: asleep output: url: images/asleep_22_3.0.png inference: false instance_prompt: asleep --- # ntcai.xyz slider - asleep (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/asleep_17_-3.0.png" width=256 height=256 /> | <img src="images/asleep_17_0.0.png" width=256 height=256 /> | <img src="images/asleep_17_3.0.png" width=256 height=256 /> | | <img src="images/asleep_19_-3.0.png" width=256 height=256 /> | <img src="images/asleep_19_0.0.png" width=256 height=256 /> | <img src="images/asleep_19_3.0.png" width=256 height=256 /> | | <img src="images/asleep_20_-3.0.png" width=256 height=256 /> | <img src="images/asleep_20_0.0.png" width=256 height=256 /> | <img src="images/asleep_20_3.0.png" width=256 height=256 /> | See more at [https://sliders.ntcai.xyz/sliders/app/loras/f0b67511-4a49-4ed0-81d7-3e1de12b0d16](https://sliders.ntcai.xyz/sliders/app/loras/f0b67511-4a49-4ed0-81d7-3e1de12b0d16) ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` asleep ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.asleep', weight_name='asleep.safetensors', adapter_name="asleep") # Activate the LoRA pipe.set_adapters(["asleep"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, asleep" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14602+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful <strong>NTC Slider Factory</strong> LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities. Your support on Patreon will allow us to continue developing new models and tools. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.maniacal-laughter
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-19T13:36:02Z
2023-12-19T13:36:05+00:00
18
2
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/maniacal laughter.../maniacal laughter_17_3.0.png widget: - text: maniacal laughter output: url: images/maniacal laughter_17_3.0.png - text: maniacal laughter output: url: images/maniacal laughter_19_3.0.png - text: maniacal laughter output: url: images/maniacal laughter_20_3.0.png - text: maniacal laughter output: url: images/maniacal laughter_21_3.0.png - text: maniacal laughter output: url: images/maniacal laughter_22_3.0.png inference: false instance_prompt: maniacal laughter --- # ntcai.xyz slider - maniacal laughter (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/maniacal laughter_17_-3.0.png" width=256 height=256 /> | <img src="images/maniacal laughter_17_0.0.png" width=256 height=256 /> | <img src="images/maniacal laughter_17_3.0.png" width=256 height=256 /> | | <img src="images/maniacal laughter_19_-3.0.png" width=256 height=256 /> | <img src="images/maniacal laughter_19_0.0.png" width=256 height=256 /> | <img src="images/maniacal laughter_19_3.0.png" width=256 height=256 /> | | <img src="images/maniacal laughter_20_-3.0.png" width=256 height=256 /> | <img src="images/maniacal laughter_20_0.0.png" width=256 height=256 /> | <img src="images/maniacal laughter_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` maniacal laughter ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.maniacal-laughter', weight_name='maniacal laughter.safetensors', adapter_name="maniacal laughter") # Activate the LoRA pipe.set_adapters(["maniacal laughter"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, maniacal laughter" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 480+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.snes-screenshot
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-24T07:44:47Z
2023-12-24T07:44:50+00:00
18
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/snes screenshot...realistic/snes screenshot_17_3.0.png widget: - text: snes screenshot output: url: images/snes screenshot_17_3.0.png - text: snes screenshot output: url: images/snes screenshot_19_3.0.png - text: snes screenshot output: url: images/snes screenshot_20_3.0.png - text: snes screenshot output: url: images/snes screenshot_21_3.0.png - text: snes screenshot output: url: images/snes screenshot_22_3.0.png inference: false instance_prompt: snes screenshot --- # ntcai.xyz slider - snes screenshot (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/snes screenshot_17_-3.0.png" width=256 height=256 /> | <img src="images/snes screenshot_17_0.0.png" width=256 height=256 /> | <img src="images/snes screenshot_17_3.0.png" width=256 height=256 /> | | <img src="images/snes screenshot_19_-3.0.png" width=256 height=256 /> | <img src="images/snes screenshot_19_0.0.png" width=256 height=256 /> | <img src="images/snes screenshot_19_3.0.png" width=256 height=256 /> | | <img src="images/snes screenshot_20_-3.0.png" width=256 height=256 /> | <img src="images/snes screenshot_20_0.0.png" width=256 height=256 /> | <img src="images/snes screenshot_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` snes screenshot ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.snes-screenshot', weight_name='snes screenshot.safetensors', adapter_name="snes screenshot") # Activate the LoRA pipe.set_adapters(["snes screenshot"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, snes screenshot" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 590+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
retrainai/instructor-xl
retrainai
sentence-similarity
[ "sentence-transformers", "pytorch", "t5", "text-embedding", "embeddings", "information-retrieval", "beir", "text-classification", "language-model", "text-clustering", "text-semantic-similarity", "text-evaluation", "prompt-retrieval", "text-reranking", "feature-extraction", "sentence-similarity", "transformers", "English", "Sentence Similarity", "natural_questions", "ms_marco", "fever", "hotpot_qa", "mteb", "en", "arxiv:2212.09741", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-12-28T09:33:09Z
2023-12-28T14:53:21+00:00
18
0
--- language: en license: apache-2.0 pipeline_tag: sentence-similarity tags: - text-embedding - embeddings - information-retrieval - beir - text-classification - language-model - text-clustering - text-semantic-similarity - text-evaluation - prompt-retrieval - text-reranking - sentence-transformers - feature-extraction - sentence-similarity - transformers - t5 - English - Sentence Similarity - natural_questions - ms_marco - fever - hotpot_qa - mteb inference: false model-index: - name: final_xl_results results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 85.08955223880596 - type: ap value: 52.66066378722476 - type: f1 value: 79.63340218960269 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 86.542 - type: ap value: 81.92695193008987 - type: f1 value: 86.51466132573681 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 42.964 - type: f1 value: 41.43146249774862 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 29.872 - type: map_at_10 value: 46.342 - type: map_at_100 value: 47.152 - type: map_at_1000 value: 47.154 - type: map_at_3 value: 41.216 - type: map_at_5 value: 44.035999999999994 - type: mrr_at_1 value: 30.939 - type: mrr_at_10 value: 46.756 - type: mrr_at_100 value: 47.573 - type: mrr_at_1000 value: 47.575 - type: mrr_at_3 value: 41.548 - type: mrr_at_5 value: 44.425 - type: ndcg_at_1 value: 29.872 - type: ndcg_at_10 value: 55.65 - type: ndcg_at_100 value: 58.88099999999999 - type: ndcg_at_1000 value: 58.951 - type: ndcg_at_3 value: 45.0 - type: ndcg_at_5 value: 50.09 - type: precision_at_1 value: 29.872 - type: precision_at_10 value: 8.549 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 18.658 - type: precision_at_5 value: 13.669999999999998 - type: recall_at_1 value: 29.872 - type: recall_at_10 value: 85.491 - type: recall_at_100 value: 99.075 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 55.974000000000004 - type: recall_at_5 value: 68.35 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 42.452729850641276 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 32.21141846480423 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 65.34710928952622 - type: mrr value: 77.61124301983028 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_spearman value: 84.15312230525639 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 82.66233766233766 - type: f1 value: 82.04175284777669 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 37.36697339826455 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 30.551241447593092 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 36.797000000000004 - type: map_at_10 value: 48.46 - type: map_at_100 value: 49.968 - type: map_at_1000 value: 50.080000000000005 - type: map_at_3 value: 44.71 - type: map_at_5 value: 46.592 - type: mrr_at_1 value: 45.494 - type: mrr_at_10 value: 54.747 - type: mrr_at_100 value: 55.43599999999999 - type: mrr_at_1000 value: 55.464999999999996 - type: mrr_at_3 value: 52.361000000000004 - type: mrr_at_5 value: 53.727000000000004 - type: ndcg_at_1 value: 45.494 - type: ndcg_at_10 value: 54.989 - type: ndcg_at_100 value: 60.096000000000004 - type: ndcg_at_1000 value: 61.58 - type: ndcg_at_3 value: 49.977 - type: ndcg_at_5 value: 51.964999999999996 - type: precision_at_1 value: 45.494 - type: precision_at_10 value: 10.558 - type: precision_at_100 value: 1.6049999999999998 - type: precision_at_1000 value: 0.203 - type: precision_at_3 value: 23.796 - type: precision_at_5 value: 16.881 - type: recall_at_1 value: 36.797000000000004 - type: recall_at_10 value: 66.83 - type: recall_at_100 value: 88.34100000000001 - type: recall_at_1000 value: 97.202 - type: recall_at_3 value: 51.961999999999996 - type: recall_at_5 value: 57.940000000000005 - type: map_at_1 value: 32.597 - type: map_at_10 value: 43.424 - type: map_at_100 value: 44.78 - type: map_at_1000 value: 44.913 - type: map_at_3 value: 40.315 - type: map_at_5 value: 41.987 - type: mrr_at_1 value: 40.382 - type: mrr_at_10 value: 49.219 - type: mrr_at_100 value: 49.895 - type: mrr_at_1000 value: 49.936 - type: mrr_at_3 value: 46.996 - type: mrr_at_5 value: 48.231 - type: ndcg_at_1 value: 40.382 - type: ndcg_at_10 value: 49.318 - type: ndcg_at_100 value: 53.839999999999996 - type: ndcg_at_1000 value: 55.82899999999999 - type: ndcg_at_3 value: 44.914 - type: ndcg_at_5 value: 46.798 - type: precision_at_1 value: 40.382 - type: precision_at_10 value: 9.274000000000001 - type: precision_at_100 value: 1.497 - type: precision_at_1000 value: 0.198 - type: precision_at_3 value: 21.592 - type: precision_at_5 value: 15.159 - type: recall_at_1 value: 32.597 - type: recall_at_10 value: 59.882000000000005 - type: recall_at_100 value: 78.446 - type: recall_at_1000 value: 90.88000000000001 - type: recall_at_3 value: 46.9 - type: recall_at_5 value: 52.222 - type: map_at_1 value: 43.8 - type: map_at_10 value: 57.293000000000006 - type: map_at_100 value: 58.321 - type: map_at_1000 value: 58.361 - type: map_at_3 value: 53.839999999999996 - type: map_at_5 value: 55.838 - type: mrr_at_1 value: 49.592000000000006 - type: mrr_at_10 value: 60.643 - type: mrr_at_100 value: 61.23499999999999 - type: mrr_at_1000 value: 61.251999999999995 - type: mrr_at_3 value: 58.265 - type: mrr_at_5 value: 59.717 - type: ndcg_at_1 value: 49.592000000000006 - type: ndcg_at_10 value: 63.364 - type: ndcg_at_100 value: 67.167 - type: ndcg_at_1000 value: 67.867 - type: ndcg_at_3 value: 57.912 - type: ndcg_at_5 value: 60.697 - type: precision_at_1 value: 49.592000000000006 - type: precision_at_10 value: 10.088 - type: precision_at_100 value: 1.2930000000000001 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 25.789 - type: precision_at_5 value: 17.541999999999998 - type: recall_at_1 value: 43.8 - type: recall_at_10 value: 77.635 - type: recall_at_100 value: 93.748 - type: recall_at_1000 value: 98.468 - type: recall_at_3 value: 63.223 - type: recall_at_5 value: 70.122 - type: map_at_1 value: 27.721 - type: map_at_10 value: 35.626999999999995 - type: map_at_100 value: 36.719 - type: map_at_1000 value: 36.8 - type: map_at_3 value: 32.781 - type: map_at_5 value: 34.333999999999996 - type: mrr_at_1 value: 29.604999999999997 - type: mrr_at_10 value: 37.564 - type: mrr_at_100 value: 38.505 - type: mrr_at_1000 value: 38.565 - type: mrr_at_3 value: 34.727000000000004 - type: mrr_at_5 value: 36.207 - type: ndcg_at_1 value: 29.604999999999997 - type: ndcg_at_10 value: 40.575 - type: ndcg_at_100 value: 45.613 - type: ndcg_at_1000 value: 47.676 - type: ndcg_at_3 value: 34.811 - type: ndcg_at_5 value: 37.491 - type: precision_at_1 value: 29.604999999999997 - type: precision_at_10 value: 6.1690000000000005 - type: precision_at_100 value: 0.906 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 14.237 - type: precision_at_5 value: 10.056 - type: recall_at_1 value: 27.721 - type: recall_at_10 value: 54.041 - type: recall_at_100 value: 76.62299999999999 - type: recall_at_1000 value: 92.134 - type: recall_at_3 value: 38.582 - type: recall_at_5 value: 44.989000000000004 - type: map_at_1 value: 16.553 - type: map_at_10 value: 25.384 - type: map_at_100 value: 26.655 - type: map_at_1000 value: 26.778000000000002 - type: map_at_3 value: 22.733 - type: map_at_5 value: 24.119 - type: mrr_at_1 value: 20.149 - type: mrr_at_10 value: 29.705 - type: mrr_at_100 value: 30.672 - type: mrr_at_1000 value: 30.737 - type: mrr_at_3 value: 27.032 - type: mrr_at_5 value: 28.369 - type: ndcg_at_1 value: 20.149 - type: ndcg_at_10 value: 30.843999999999998 - type: ndcg_at_100 value: 36.716 - type: ndcg_at_1000 value: 39.495000000000005 - type: ndcg_at_3 value: 25.918999999999997 - type: ndcg_at_5 value: 27.992 - type: precision_at_1 value: 20.149 - type: precision_at_10 value: 5.858 - type: precision_at_100 value: 1.009 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 12.645000000000001 - type: precision_at_5 value: 9.179 - type: recall_at_1 value: 16.553 - type: recall_at_10 value: 43.136 - type: recall_at_100 value: 68.562 - type: recall_at_1000 value: 88.208 - type: recall_at_3 value: 29.493000000000002 - type: recall_at_5 value: 34.751 - type: map_at_1 value: 28.000999999999998 - type: map_at_10 value: 39.004 - type: map_at_100 value: 40.461999999999996 - type: map_at_1000 value: 40.566 - type: map_at_3 value: 35.805 - type: map_at_5 value: 37.672 - type: mrr_at_1 value: 33.782000000000004 - type: mrr_at_10 value: 44.702 - type: mrr_at_100 value: 45.528 - type: mrr_at_1000 value: 45.576 - type: mrr_at_3 value: 42.14 - type: mrr_at_5 value: 43.651 - type: ndcg_at_1 value: 33.782000000000004 - type: ndcg_at_10 value: 45.275999999999996 - type: ndcg_at_100 value: 50.888 - type: ndcg_at_1000 value: 52.879 - type: ndcg_at_3 value: 40.191 - type: ndcg_at_5 value: 42.731 - type: precision_at_1 value: 33.782000000000004 - type: precision_at_10 value: 8.200000000000001 - type: precision_at_100 value: 1.287 - type: precision_at_1000 value: 0.16199999999999998 - type: precision_at_3 value: 19.185 - type: precision_at_5 value: 13.667000000000002 - type: recall_at_1 value: 28.000999999999998 - type: recall_at_10 value: 58.131 - type: recall_at_100 value: 80.869 - type: recall_at_1000 value: 93.931 - type: recall_at_3 value: 44.161 - type: recall_at_5 value: 50.592000000000006 - type: map_at_1 value: 28.047 - type: map_at_10 value: 38.596000000000004 - type: map_at_100 value: 40.116 - type: map_at_1000 value: 40.232 - type: map_at_3 value: 35.205 - type: map_at_5 value: 37.076 - type: mrr_at_1 value: 34.932 - type: mrr_at_10 value: 44.496 - type: mrr_at_100 value: 45.47 - type: mrr_at_1000 value: 45.519999999999996 - type: mrr_at_3 value: 41.743 - type: mrr_at_5 value: 43.352000000000004 - type: ndcg_at_1 value: 34.932 - type: ndcg_at_10 value: 44.901 - type: ndcg_at_100 value: 50.788999999999994 - type: ndcg_at_1000 value: 52.867 - type: ndcg_at_3 value: 39.449 - type: ndcg_at_5 value: 41.929 - type: precision_at_1 value: 34.932 - type: precision_at_10 value: 8.311 - type: precision_at_100 value: 1.3050000000000002 - type: precision_at_1000 value: 0.166 - type: precision_at_3 value: 18.836 - type: precision_at_5 value: 13.447000000000001 - type: recall_at_1 value: 28.047 - type: recall_at_10 value: 57.717 - type: recall_at_100 value: 82.182 - type: recall_at_1000 value: 95.82000000000001 - type: recall_at_3 value: 42.448 - type: recall_at_5 value: 49.071 - type: map_at_1 value: 27.861250000000005 - type: map_at_10 value: 37.529583333333335 - type: map_at_100 value: 38.7915 - type: map_at_1000 value: 38.90558333333335 - type: map_at_3 value: 34.57333333333333 - type: map_at_5 value: 36.187166666666656 - type: mrr_at_1 value: 32.88291666666666 - type: mrr_at_10 value: 41.79750000000001 - type: mrr_at_100 value: 42.63183333333333 - type: mrr_at_1000 value: 42.68483333333333 - type: mrr_at_3 value: 39.313750000000006 - type: mrr_at_5 value: 40.70483333333333 - type: ndcg_at_1 value: 32.88291666666666 - type: ndcg_at_10 value: 43.09408333333333 - type: ndcg_at_100 value: 48.22158333333333 - type: ndcg_at_1000 value: 50.358000000000004 - type: ndcg_at_3 value: 38.129583333333336 - type: ndcg_at_5 value: 40.39266666666666 - type: precision_at_1 value: 32.88291666666666 - type: precision_at_10 value: 7.5584999999999996 - type: precision_at_100 value: 1.1903333333333332 - type: precision_at_1000 value: 0.15658333333333332 - type: precision_at_3 value: 17.495916666666666 - type: precision_at_5 value: 12.373833333333332 - type: recall_at_1 value: 27.861250000000005 - type: recall_at_10 value: 55.215916666666665 - type: recall_at_100 value: 77.392 - type: recall_at_1000 value: 92.04908333333334 - type: recall_at_3 value: 41.37475 - type: recall_at_5 value: 47.22908333333333 - type: map_at_1 value: 25.064999999999998 - type: map_at_10 value: 31.635999999999996 - type: map_at_100 value: 32.596000000000004 - type: map_at_1000 value: 32.695 - type: map_at_3 value: 29.612 - type: map_at_5 value: 30.768 - type: mrr_at_1 value: 28.528 - type: mrr_at_10 value: 34.717 - type: mrr_at_100 value: 35.558 - type: mrr_at_1000 value: 35.626000000000005 - type: mrr_at_3 value: 32.745000000000005 - type: mrr_at_5 value: 33.819 - type: ndcg_at_1 value: 28.528 - type: ndcg_at_10 value: 35.647 - type: ndcg_at_100 value: 40.207 - type: ndcg_at_1000 value: 42.695 - type: ndcg_at_3 value: 31.878 - type: ndcg_at_5 value: 33.634 - type: precision_at_1 value: 28.528 - type: precision_at_10 value: 5.46 - type: precision_at_100 value: 0.84 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 13.547999999999998 - type: precision_at_5 value: 9.325 - type: recall_at_1 value: 25.064999999999998 - type: recall_at_10 value: 45.096000000000004 - type: recall_at_100 value: 65.658 - type: recall_at_1000 value: 84.128 - type: recall_at_3 value: 34.337 - type: recall_at_5 value: 38.849000000000004 - type: map_at_1 value: 17.276 - type: map_at_10 value: 24.535 - type: map_at_100 value: 25.655 - type: map_at_1000 value: 25.782 - type: map_at_3 value: 22.228 - type: map_at_5 value: 23.612 - type: mrr_at_1 value: 21.266 - type: mrr_at_10 value: 28.474 - type: mrr_at_100 value: 29.398000000000003 - type: mrr_at_1000 value: 29.482000000000003 - type: mrr_at_3 value: 26.245 - type: mrr_at_5 value: 27.624 - type: ndcg_at_1 value: 21.266 - type: ndcg_at_10 value: 29.087000000000003 - type: ndcg_at_100 value: 34.374 - type: ndcg_at_1000 value: 37.433 - type: ndcg_at_3 value: 25.040000000000003 - type: ndcg_at_5 value: 27.116 - type: precision_at_1 value: 21.266 - type: precision_at_10 value: 5.258 - type: precision_at_100 value: 0.9299999999999999 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 11.849 - type: precision_at_5 value: 8.699 - type: recall_at_1 value: 17.276 - type: recall_at_10 value: 38.928000000000004 - type: recall_at_100 value: 62.529 - type: recall_at_1000 value: 84.44800000000001 - type: recall_at_3 value: 27.554000000000002 - type: recall_at_5 value: 32.915 - type: map_at_1 value: 27.297 - type: map_at_10 value: 36.957 - type: map_at_100 value: 38.252 - type: map_at_1000 value: 38.356 - type: map_at_3 value: 34.121 - type: map_at_5 value: 35.782000000000004 - type: mrr_at_1 value: 32.275999999999996 - type: mrr_at_10 value: 41.198 - type: mrr_at_100 value: 42.131 - type: mrr_at_1000 value: 42.186 - type: mrr_at_3 value: 38.557 - type: mrr_at_5 value: 40.12 - type: ndcg_at_1 value: 32.275999999999996 - type: ndcg_at_10 value: 42.516 - type: ndcg_at_100 value: 48.15 - type: ndcg_at_1000 value: 50.344 - type: ndcg_at_3 value: 37.423 - type: ndcg_at_5 value: 39.919 - type: precision_at_1 value: 32.275999999999996 - type: precision_at_10 value: 7.155 - type: precision_at_100 value: 1.123 - type: precision_at_1000 value: 0.14200000000000002 - type: precision_at_3 value: 17.163999999999998 - type: precision_at_5 value: 12.127 - type: recall_at_1 value: 27.297 - type: recall_at_10 value: 55.238 - type: recall_at_100 value: 79.2 - type: recall_at_1000 value: 94.258 - type: recall_at_3 value: 41.327000000000005 - type: recall_at_5 value: 47.588 - type: map_at_1 value: 29.142000000000003 - type: map_at_10 value: 38.769 - type: map_at_100 value: 40.292 - type: map_at_1000 value: 40.510000000000005 - type: map_at_3 value: 35.39 - type: map_at_5 value: 37.009 - type: mrr_at_1 value: 34.19 - type: mrr_at_10 value: 43.418 - type: mrr_at_100 value: 44.132 - type: mrr_at_1000 value: 44.175 - type: mrr_at_3 value: 40.547 - type: mrr_at_5 value: 42.088 - type: ndcg_at_1 value: 34.19 - type: ndcg_at_10 value: 45.14 - type: ndcg_at_100 value: 50.364 - type: ndcg_at_1000 value: 52.481 - type: ndcg_at_3 value: 39.466 - type: ndcg_at_5 value: 41.772 - type: precision_at_1 value: 34.19 - type: precision_at_10 value: 8.715 - type: precision_at_100 value: 1.6150000000000002 - type: precision_at_1000 value: 0.247 - type: precision_at_3 value: 18.248 - type: precision_at_5 value: 13.161999999999999 - type: recall_at_1 value: 29.142000000000003 - type: recall_at_10 value: 57.577999999999996 - type: recall_at_100 value: 81.428 - type: recall_at_1000 value: 94.017 - type: recall_at_3 value: 41.402 - type: recall_at_5 value: 47.695 - type: map_at_1 value: 22.039 - type: map_at_10 value: 30.669999999999998 - type: map_at_100 value: 31.682 - type: map_at_1000 value: 31.794 - type: map_at_3 value: 28.139999999999997 - type: map_at_5 value: 29.457 - type: mrr_at_1 value: 24.399 - type: mrr_at_10 value: 32.687 - type: mrr_at_100 value: 33.622 - type: mrr_at_1000 value: 33.698 - type: mrr_at_3 value: 30.407 - type: mrr_at_5 value: 31.552999999999997 - type: ndcg_at_1 value: 24.399 - type: ndcg_at_10 value: 35.472 - type: ndcg_at_100 value: 40.455000000000005 - type: ndcg_at_1000 value: 43.15 - type: ndcg_at_3 value: 30.575000000000003 - type: ndcg_at_5 value: 32.668 - type: precision_at_1 value: 24.399 - type: precision_at_10 value: 5.656 - type: precision_at_100 value: 0.874 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 13.062000000000001 - type: precision_at_5 value: 9.242 - type: recall_at_1 value: 22.039 - type: recall_at_10 value: 48.379 - type: recall_at_100 value: 71.11800000000001 - type: recall_at_1000 value: 91.095 - type: recall_at_3 value: 35.108 - type: recall_at_5 value: 40.015 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 10.144 - type: map_at_10 value: 18.238 - type: map_at_100 value: 20.143 - type: map_at_1000 value: 20.346 - type: map_at_3 value: 14.809 - type: map_at_5 value: 16.567999999999998 - type: mrr_at_1 value: 22.671 - type: mrr_at_10 value: 34.906 - type: mrr_at_100 value: 35.858000000000004 - type: mrr_at_1000 value: 35.898 - type: mrr_at_3 value: 31.238 - type: mrr_at_5 value: 33.342 - type: ndcg_at_1 value: 22.671 - type: ndcg_at_10 value: 26.540000000000003 - type: ndcg_at_100 value: 34.138000000000005 - type: ndcg_at_1000 value: 37.72 - type: ndcg_at_3 value: 20.766000000000002 - type: ndcg_at_5 value: 22.927 - type: precision_at_1 value: 22.671 - type: precision_at_10 value: 8.619 - type: precision_at_100 value: 1.678 - type: precision_at_1000 value: 0.23500000000000001 - type: precision_at_3 value: 15.592 - type: precision_at_5 value: 12.43 - type: recall_at_1 value: 10.144 - type: recall_at_10 value: 33.46 - type: recall_at_100 value: 59.758 - type: recall_at_1000 value: 79.704 - type: recall_at_3 value: 19.604 - type: recall_at_5 value: 25.367 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.654 - type: map_at_10 value: 18.506 - type: map_at_100 value: 26.412999999999997 - type: map_at_1000 value: 28.13 - type: map_at_3 value: 13.379 - type: map_at_5 value: 15.529000000000002 - type: mrr_at_1 value: 66.0 - type: mrr_at_10 value: 74.13 - type: mrr_at_100 value: 74.48700000000001 - type: mrr_at_1000 value: 74.49799999999999 - type: mrr_at_3 value: 72.75 - type: mrr_at_5 value: 73.762 - type: ndcg_at_1 value: 54.50000000000001 - type: ndcg_at_10 value: 40.236 - type: ndcg_at_100 value: 44.690999999999995 - type: ndcg_at_1000 value: 52.195 - type: ndcg_at_3 value: 45.632 - type: ndcg_at_5 value: 42.952 - type: precision_at_1 value: 66.0 - type: precision_at_10 value: 31.724999999999998 - type: precision_at_100 value: 10.299999999999999 - type: precision_at_1000 value: 2.194 - type: precision_at_3 value: 48.75 - type: precision_at_5 value: 41.6 - type: recall_at_1 value: 8.654 - type: recall_at_10 value: 23.74 - type: recall_at_100 value: 50.346999999999994 - type: recall_at_1000 value: 74.376 - type: recall_at_3 value: 14.636 - type: recall_at_5 value: 18.009 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 53.245 - type: f1 value: 48.74520523753552 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 51.729 - type: map_at_10 value: 63.904 - type: map_at_100 value: 64.363 - type: map_at_1000 value: 64.38199999999999 - type: map_at_3 value: 61.393 - type: map_at_5 value: 63.02100000000001 - type: mrr_at_1 value: 55.686 - type: mrr_at_10 value: 67.804 - type: mrr_at_100 value: 68.15299999999999 - type: mrr_at_1000 value: 68.161 - type: mrr_at_3 value: 65.494 - type: mrr_at_5 value: 67.01599999999999 - type: ndcg_at_1 value: 55.686 - type: ndcg_at_10 value: 70.025 - type: ndcg_at_100 value: 72.011 - type: ndcg_at_1000 value: 72.443 - type: ndcg_at_3 value: 65.32900000000001 - type: ndcg_at_5 value: 68.05600000000001 - type: precision_at_1 value: 55.686 - type: precision_at_10 value: 9.358 - type: precision_at_100 value: 1.05 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 26.318 - type: precision_at_5 value: 17.321 - type: recall_at_1 value: 51.729 - type: recall_at_10 value: 85.04 - type: recall_at_100 value: 93.777 - type: recall_at_1000 value: 96.824 - type: recall_at_3 value: 72.521 - type: recall_at_5 value: 79.148 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 23.765 - type: map_at_10 value: 39.114 - type: map_at_100 value: 40.987 - type: map_at_1000 value: 41.155 - type: map_at_3 value: 34.028000000000006 - type: map_at_5 value: 36.925000000000004 - type: mrr_at_1 value: 46.451 - type: mrr_at_10 value: 54.711 - type: mrr_at_100 value: 55.509 - type: mrr_at_1000 value: 55.535000000000004 - type: mrr_at_3 value: 52.649 - type: mrr_at_5 value: 53.729000000000006 - type: ndcg_at_1 value: 46.451 - type: ndcg_at_10 value: 46.955999999999996 - type: ndcg_at_100 value: 53.686 - type: ndcg_at_1000 value: 56.230000000000004 - type: ndcg_at_3 value: 43.374 - type: ndcg_at_5 value: 44.372 - type: precision_at_1 value: 46.451 - type: precision_at_10 value: 13.256 - type: precision_at_100 value: 2.019 - type: precision_at_1000 value: 0.247 - type: precision_at_3 value: 29.115000000000002 - type: precision_at_5 value: 21.389 - type: recall_at_1 value: 23.765 - type: recall_at_10 value: 53.452999999999996 - type: recall_at_100 value: 78.828 - type: recall_at_1000 value: 93.938 - type: recall_at_3 value: 39.023 - type: recall_at_5 value: 45.18 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 31.918000000000003 - type: map_at_10 value: 46.741 - type: map_at_100 value: 47.762 - type: map_at_1000 value: 47.849000000000004 - type: map_at_3 value: 43.578 - type: map_at_5 value: 45.395 - type: mrr_at_1 value: 63.834999999999994 - type: mrr_at_10 value: 71.312 - type: mrr_at_100 value: 71.695 - type: mrr_at_1000 value: 71.714 - type: mrr_at_3 value: 69.82000000000001 - type: mrr_at_5 value: 70.726 - type: ndcg_at_1 value: 63.834999999999994 - type: ndcg_at_10 value: 55.879999999999995 - type: ndcg_at_100 value: 59.723000000000006 - type: ndcg_at_1000 value: 61.49400000000001 - type: ndcg_at_3 value: 50.964 - type: ndcg_at_5 value: 53.47 - type: precision_at_1 value: 63.834999999999994 - type: precision_at_10 value: 11.845 - type: precision_at_100 value: 1.4869999999999999 - type: precision_at_1000 value: 0.172 - type: precision_at_3 value: 32.158 - type: precision_at_5 value: 21.278 - type: recall_at_1 value: 31.918000000000003 - type: recall_at_10 value: 59.223000000000006 - type: recall_at_100 value: 74.328 - type: recall_at_1000 value: 86.05000000000001 - type: recall_at_3 value: 48.238 - type: recall_at_5 value: 53.193999999999996 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 79.7896 - type: ap value: 73.65166029460288 - type: f1 value: 79.71794693711813 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 22.239 - type: map_at_10 value: 34.542 - type: map_at_100 value: 35.717999999999996 - type: map_at_1000 value: 35.764 - type: map_at_3 value: 30.432 - type: map_at_5 value: 32.81 - type: mrr_at_1 value: 22.908 - type: mrr_at_10 value: 35.127 - type: mrr_at_100 value: 36.238 - type: mrr_at_1000 value: 36.278 - type: mrr_at_3 value: 31.076999999999998 - type: mrr_at_5 value: 33.419 - type: ndcg_at_1 value: 22.908 - type: ndcg_at_10 value: 41.607 - type: ndcg_at_100 value: 47.28 - type: ndcg_at_1000 value: 48.414 - type: ndcg_at_3 value: 33.253 - type: ndcg_at_5 value: 37.486000000000004 - type: precision_at_1 value: 22.908 - type: precision_at_10 value: 6.645 - type: precision_at_100 value: 0.9490000000000001 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 14.130999999999998 - type: precision_at_5 value: 10.616 - type: recall_at_1 value: 22.239 - type: recall_at_10 value: 63.42 - type: recall_at_100 value: 89.696 - type: recall_at_1000 value: 98.351 - type: recall_at_3 value: 40.77 - type: recall_at_5 value: 50.93 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 95.06839945280439 - type: f1 value: 94.74276398224072 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 72.25718194254446 - type: f1 value: 53.91164489161391 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.47948890383323 - type: f1 value: 69.98520247230257 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.46603900470748 - type: f1 value: 76.44111526065399 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 33.19106070798198 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 30.78772205248094 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.811231631488507 - type: mrr value: 32.98200485378021 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.9 - type: map_at_10 value: 13.703000000000001 - type: map_at_100 value: 17.251 - type: map_at_1000 value: 18.795 - type: map_at_3 value: 10.366999999999999 - type: map_at_5 value: 11.675 - type: mrr_at_1 value: 47.059 - type: mrr_at_10 value: 55.816 - type: mrr_at_100 value: 56.434 - type: mrr_at_1000 value: 56.467 - type: mrr_at_3 value: 53.973000000000006 - type: mrr_at_5 value: 55.257999999999996 - type: ndcg_at_1 value: 44.737 - type: ndcg_at_10 value: 35.997 - type: ndcg_at_100 value: 33.487 - type: ndcg_at_1000 value: 41.897 - type: ndcg_at_3 value: 41.18 - type: ndcg_at_5 value: 38.721 - type: precision_at_1 value: 46.129999999999995 - type: precision_at_10 value: 26.533 - type: precision_at_100 value: 8.706 - type: precision_at_1000 value: 2.16 - type: precision_at_3 value: 38.493 - type: precision_at_5 value: 33.189 - type: recall_at_1 value: 6.9 - type: recall_at_10 value: 17.488999999999997 - type: recall_at_100 value: 34.583000000000006 - type: recall_at_1000 value: 64.942 - type: recall_at_3 value: 11.494 - type: recall_at_5 value: 13.496 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 33.028999999999996 - type: map_at_10 value: 49.307 - type: map_at_100 value: 50.205 - type: map_at_1000 value: 50.23 - type: map_at_3 value: 44.782 - type: map_at_5 value: 47.599999999999994 - type: mrr_at_1 value: 37.108999999999995 - type: mrr_at_10 value: 51.742999999999995 - type: mrr_at_100 value: 52.405 - type: mrr_at_1000 value: 52.422000000000004 - type: mrr_at_3 value: 48.087999999999994 - type: mrr_at_5 value: 50.414 - type: ndcg_at_1 value: 37.08 - type: ndcg_at_10 value: 57.236 - type: ndcg_at_100 value: 60.931999999999995 - type: ndcg_at_1000 value: 61.522 - type: ndcg_at_3 value: 48.93 - type: ndcg_at_5 value: 53.561 - type: precision_at_1 value: 37.08 - type: precision_at_10 value: 9.386 - type: precision_at_100 value: 1.1480000000000001 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 22.258 - type: precision_at_5 value: 16.025 - type: recall_at_1 value: 33.028999999999996 - type: recall_at_10 value: 78.805 - type: recall_at_100 value: 94.643 - type: recall_at_1000 value: 99.039 - type: recall_at_3 value: 57.602 - type: recall_at_5 value: 68.253 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.122 - type: map_at_10 value: 85.237 - type: map_at_100 value: 85.872 - type: map_at_1000 value: 85.885 - type: map_at_3 value: 82.27499999999999 - type: map_at_5 value: 84.13199999999999 - type: mrr_at_1 value: 81.73 - type: mrr_at_10 value: 87.834 - type: mrr_at_100 value: 87.92 - type: mrr_at_1000 value: 87.921 - type: mrr_at_3 value: 86.878 - type: mrr_at_5 value: 87.512 - type: ndcg_at_1 value: 81.73 - type: ndcg_at_10 value: 88.85499999999999 - type: ndcg_at_100 value: 89.992 - type: ndcg_at_1000 value: 90.07 - type: ndcg_at_3 value: 85.997 - type: ndcg_at_5 value: 87.55199999999999 - type: precision_at_1 value: 81.73 - type: precision_at_10 value: 13.491 - type: precision_at_100 value: 1.536 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.623 - type: precision_at_5 value: 24.742 - type: recall_at_1 value: 71.122 - type: recall_at_10 value: 95.935 - type: recall_at_100 value: 99.657 - type: recall_at_1000 value: 99.996 - type: recall_at_3 value: 87.80799999999999 - type: recall_at_5 value: 92.161 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 63.490029238193756 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 65.13153408508836 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.202999999999999 - type: map_at_10 value: 10.174 - type: map_at_100 value: 12.138 - type: map_at_1000 value: 12.418 - type: map_at_3 value: 7.379 - type: map_at_5 value: 8.727 - type: mrr_at_1 value: 20.7 - type: mrr_at_10 value: 30.389 - type: mrr_at_100 value: 31.566 - type: mrr_at_1000 value: 31.637999999999998 - type: mrr_at_3 value: 27.133000000000003 - type: mrr_at_5 value: 29.078 - type: ndcg_at_1 value: 20.7 - type: ndcg_at_10 value: 17.355999999999998 - type: ndcg_at_100 value: 25.151 - type: ndcg_at_1000 value: 30.37 - type: ndcg_at_3 value: 16.528000000000002 - type: ndcg_at_5 value: 14.396999999999998 - type: precision_at_1 value: 20.7 - type: precision_at_10 value: 8.98 - type: precision_at_100 value: 2.015 - type: precision_at_1000 value: 0.327 - type: precision_at_3 value: 15.367 - type: precision_at_5 value: 12.559999999999999 - type: recall_at_1 value: 4.202999999999999 - type: recall_at_10 value: 18.197 - type: recall_at_100 value: 40.903 - type: recall_at_1000 value: 66.427 - type: recall_at_3 value: 9.362 - type: recall_at_5 value: 12.747 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_spearman value: 81.69890989765257 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_spearman value: 75.31953790551489 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_spearman value: 87.44050861280759 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_spearman value: 81.86922869270393 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_spearman value: 88.9399170304284 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_spearman value: 85.38015314088582 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_spearman value: 90.53653527788835 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_spearman value: 68.64526474250209 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_spearman value: 86.56156983963042 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.48610254648003 - type: mrr value: 94.02481505422682 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 48.983 - type: map_at_10 value: 59.077999999999996 - type: map_at_100 value: 59.536 - type: map_at_1000 value: 59.575 - type: map_at_3 value: 55.691 - type: map_at_5 value: 57.410000000000004 - type: mrr_at_1 value: 51.666999999999994 - type: mrr_at_10 value: 60.427 - type: mrr_at_100 value: 60.763 - type: mrr_at_1000 value: 60.79900000000001 - type: mrr_at_3 value: 57.556 - type: mrr_at_5 value: 59.089000000000006 - type: ndcg_at_1 value: 51.666999999999994 - type: ndcg_at_10 value: 64.559 - type: ndcg_at_100 value: 66.58 - type: ndcg_at_1000 value: 67.64 - type: ndcg_at_3 value: 58.287 - type: ndcg_at_5 value: 61.001000000000005 - type: precision_at_1 value: 51.666999999999994 - type: precision_at_10 value: 9.067 - type: precision_at_100 value: 1.0170000000000001 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 23.0 - type: precision_at_5 value: 15.6 - type: recall_at_1 value: 48.983 - type: recall_at_10 value: 80.289 - type: recall_at_100 value: 89.43299999999999 - type: recall_at_1000 value: 97.667 - type: recall_at_3 value: 62.978 - type: recall_at_5 value: 69.872 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.79009900990098 - type: cos_sim_ap value: 94.94115052608419 - type: cos_sim_f1 value: 89.1260162601626 - type: cos_sim_precision value: 90.599173553719 - type: cos_sim_recall value: 87.7 - type: dot_accuracy value: 99.79009900990098 - type: dot_ap value: 94.94115052608419 - type: dot_f1 value: 89.1260162601626 - type: dot_precision value: 90.599173553719 - type: dot_recall value: 87.7 - type: euclidean_accuracy value: 99.79009900990098 - type: euclidean_ap value: 94.94115052608419 - type: euclidean_f1 value: 89.1260162601626 - type: euclidean_precision value: 90.599173553719 - type: euclidean_recall value: 87.7 - type: manhattan_accuracy value: 99.7940594059406 - type: manhattan_ap value: 94.95271414642431 - type: manhattan_f1 value: 89.24508790072387 - type: manhattan_precision value: 92.3982869379015 - type: manhattan_recall value: 86.3 - type: max_accuracy value: 99.7940594059406 - type: max_ap value: 94.95271414642431 - type: max_f1 value: 89.24508790072387 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 68.43866571935851 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 35.16579026551532 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 52.518952473513934 - type: mrr value: 53.292457134368895 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.12529588316604 - type: cos_sim_spearman value: 32.31662126895294 - type: dot_pearson value: 31.125303796647056 - type: dot_spearman value: 32.31662126895294 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.219 - type: map_at_10 value: 1.7469999999999999 - type: map_at_100 value: 10.177999999999999 - type: map_at_1000 value: 26.108999999999998 - type: map_at_3 value: 0.64 - type: map_at_5 value: 0.968 - type: mrr_at_1 value: 82.0 - type: mrr_at_10 value: 89.067 - type: mrr_at_100 value: 89.067 - type: mrr_at_1000 value: 89.067 - type: mrr_at_3 value: 88.333 - type: mrr_at_5 value: 88.73299999999999 - type: ndcg_at_1 value: 78.0 - type: ndcg_at_10 value: 71.398 - type: ndcg_at_100 value: 55.574999999999996 - type: ndcg_at_1000 value: 51.771 - type: ndcg_at_3 value: 77.765 - type: ndcg_at_5 value: 73.614 - type: precision_at_1 value: 82.0 - type: precision_at_10 value: 75.4 - type: precision_at_100 value: 58.040000000000006 - type: precision_at_1000 value: 23.516000000000002 - type: precision_at_3 value: 84.0 - type: precision_at_5 value: 78.4 - type: recall_at_1 value: 0.219 - type: recall_at_10 value: 1.958 - type: recall_at_100 value: 13.797999999999998 - type: recall_at_1000 value: 49.881 - type: recall_at_3 value: 0.672 - type: recall_at_5 value: 1.0370000000000001 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 1.8610000000000002 - type: map_at_10 value: 8.705 - type: map_at_100 value: 15.164 - type: map_at_1000 value: 16.78 - type: map_at_3 value: 4.346 - type: map_at_5 value: 6.151 - type: mrr_at_1 value: 22.448999999999998 - type: mrr_at_10 value: 41.556 - type: mrr_at_100 value: 42.484 - type: mrr_at_1000 value: 42.494 - type: mrr_at_3 value: 37.755 - type: mrr_at_5 value: 40.102 - type: ndcg_at_1 value: 21.429000000000002 - type: ndcg_at_10 value: 23.439 - type: ndcg_at_100 value: 36.948 - type: ndcg_at_1000 value: 48.408 - type: ndcg_at_3 value: 22.261 - type: ndcg_at_5 value: 23.085 - type: precision_at_1 value: 22.448999999999998 - type: precision_at_10 value: 21.633 - type: precision_at_100 value: 8.02 - type: precision_at_1000 value: 1.5939999999999999 - type: precision_at_3 value: 23.810000000000002 - type: precision_at_5 value: 24.490000000000002 - type: recall_at_1 value: 1.8610000000000002 - type: recall_at_10 value: 15.876000000000001 - type: recall_at_100 value: 50.300999999999995 - type: recall_at_1000 value: 86.098 - type: recall_at_3 value: 5.892 - type: recall_at_5 value: 9.443 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 70.3264 - type: ap value: 13.249577616243794 - type: f1 value: 53.621518367695685 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 61.57611771363894 - type: f1 value: 61.79797478568639 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 53.38315344479284 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.55438993860642 - type: cos_sim_ap value: 77.98702600017738 - type: cos_sim_f1 value: 71.94971653931476 - type: cos_sim_precision value: 67.50693802035153 - type: cos_sim_recall value: 77.01846965699208 - type: dot_accuracy value: 87.55438993860642 - type: dot_ap value: 77.98702925907986 - type: dot_f1 value: 71.94971653931476 - type: dot_precision value: 67.50693802035153 - type: dot_recall value: 77.01846965699208 - type: euclidean_accuracy value: 87.55438993860642 - type: euclidean_ap value: 77.98702951957925 - type: euclidean_f1 value: 71.94971653931476 - type: euclidean_precision value: 67.50693802035153 - type: euclidean_recall value: 77.01846965699208 - type: manhattan_accuracy value: 87.54246885617214 - type: manhattan_ap value: 77.95531413902947 - type: manhattan_f1 value: 71.93605683836589 - type: manhattan_precision value: 69.28152492668622 - type: manhattan_recall value: 74.80211081794195 - type: max_accuracy value: 87.55438993860642 - type: max_ap value: 77.98702951957925 - type: max_f1 value: 71.94971653931476 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.47296930182016 - type: cos_sim_ap value: 86.92853616302108 - type: cos_sim_f1 value: 79.35138351681047 - type: cos_sim_precision value: 76.74820143884892 - type: cos_sim_recall value: 82.13735756082538 - type: dot_accuracy value: 89.47296930182016 - type: dot_ap value: 86.92854339601595 - type: dot_f1 value: 79.35138351681047 - type: dot_precision value: 76.74820143884892 - type: dot_recall value: 82.13735756082538 - type: euclidean_accuracy value: 89.47296930182016 - type: euclidean_ap value: 86.92854191061649 - type: euclidean_f1 value: 79.35138351681047 - type: euclidean_precision value: 76.74820143884892 - type: euclidean_recall value: 82.13735756082538 - type: manhattan_accuracy value: 89.47685023479644 - type: manhattan_ap value: 86.90063722679578 - type: manhattan_f1 value: 79.30753865502702 - type: manhattan_precision value: 76.32066068631639 - type: manhattan_recall value: 82.53772713273791 - type: max_accuracy value: 89.47685023479644 - type: max_ap value: 86.92854339601595 - type: max_f1 value: 79.35138351681047 --- # retrainai/instructor-xl Its a fork of the original [hkunlp/instructor-xl](https://huggingface.co/hkunlp/instructor-xl) with minimal modifications to support running it in HuggingFace inference endpoint, just use the builtin custom handler. # hkunlp/instructor-xl We introduce **Instructor**👨‍🏫, an instruction-finetuned text embedding model that can generate text embeddings tailored to any task (e.g., classification, retrieval, clustering, text evaluation, etc.) and domains (e.g., science, finance, etc.) ***by simply providing the task instruction, without any finetuning***. Instructor👨‍ achieves sota on 70 diverse embedding tasks! The model is easy to use with **our customized** `sentence-transformer` library. For more details, check out [our paper](https://arxiv.org/abs/2212.09741) and [project page](https://instructor-embedding.github.io/)! **************************** **Updates** **************************** * 01/21: We released a new [checkpoint](https://huggingface.co/hkunlp/instructor-xl) trained with hard negatives, which gives better performance. * 12/21: We released our [paper](https://arxiv.org/abs/2212.09741), [code](https://github.com/HKUNLP/instructor-embedding), [checkpoint](https://huggingface.co/hkunlp/instructor-xl) and [project page](https://instructor-embedding.github.io/)! Check them out! ## Quick start <hr /> ## Installation ```bash pip install InstructorEmbedding ``` ## Compute your customized embeddings Then you can use the model like this to calculate domain-specific and task-aware embeddings: ```python from InstructorEmbedding import INSTRUCTOR model = INSTRUCTOR('hkunlp/instructor-xl') sentence = "3D ActionSLAM: wearable person tracking in multi-floor environments" instruction = "Represent the Science title:" embeddings = model.encode([[instruction,sentence]]) print(embeddings) ``` ## Use cases <hr /> ## Calculate embeddings for your customized texts If you want to calculate customized embeddings for specific sentences, you may follow the unified template to write instructions: &nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;Represent the `domain` `text_type` for `task_objective`: * `domain` is optional, and it specifies the domain of the text, e.g., science, finance, medicine, etc. * `text_type` is required, and it specifies the encoding unit, e.g., sentence, document, paragraph, etc. * `task_objective` is optional, and it specifies the objective of embedding, e.g., retrieve a document, classify the sentence, etc. ## Calculate Sentence similarities You can further use the model to compute similarities between two groups of sentences, with **customized embeddings**. ```python from sklearn.metrics.pairwise import cosine_similarity sentences_a = [['Represent the Science sentence: ','Parton energy loss in QCD matter'], ['Represent the Financial statement: ','The Federal Reserve on Wednesday raised its benchmark interest rate.']] sentences_b = [['Represent the Science sentence: ','The Chiral Phase Transition in Dissipative Dynamics'], ['Represent the Financial statement: ','The funds rose less than 0.5 per cent on Friday']] embeddings_a = model.encode(sentences_a) embeddings_b = model.encode(sentences_b) similarities = cosine_similarity(embeddings_a,embeddings_b) print(similarities) ``` ## Information Retrieval You can also use **customized embeddings** for information retrieval. ```python import numpy as np from sklearn.metrics.pairwise import cosine_similarity query = [['Represent the Wikipedia question for retrieving supporting documents: ','where is the food stored in a yam plant']] corpus = [['Represent the Wikipedia document for retrieval: ','Capitalism has been dominant in the Western world since the end of feudalism, but most feel[who?] that the term "mixed economies" more precisely describes most contemporary economies, due to their containing both private-owned and state-owned enterprises. In capitalism, prices determine the demand-supply scale. For example, higher demand for certain goods and services lead to higher prices and lower demand for certain goods lead to lower prices.'], ['Represent the Wikipedia document for retrieval: ',"The disparate impact theory is especially controversial under the Fair Housing Act because the Act regulates many activities relating to housing, insurance, and mortgage loans—and some scholars have argued that the theory's use under the Fair Housing Act, combined with extensions of the Community Reinvestment Act, contributed to rise of sub-prime lending and the crash of the U.S. housing market and ensuing global economic recession"], ['Represent the Wikipedia document for retrieval: ','Disparate impact in United States labor law refers to practices in employment, housing, and other areas that adversely affect one group of people of a protected characteristic more than another, even though rules applied by employers or landlords are formally neutral. Although the protected classes vary by statute, most federal civil rights laws protect based on race, color, religion, national origin, and sex as protected traits, and some laws include disability status and other traits as well.']] query_embeddings = model.encode(query) corpus_embeddings = model.encode(corpus) similarities = cosine_similarity(query_embeddings,corpus_embeddings) retrieved_doc_id = np.argmax(similarities) print(retrieved_doc_id) ``` ## Clustering Use **customized embeddings** for clustering texts in groups. ```python import sklearn.cluster sentences = [['Represent the Medicine sentence for clustering: ','Dynamical Scalar Degree of Freedom in Horava-Lifshitz Gravity'], ['Represent the Medicine sentence for clustering: ','Comparison of Atmospheric Neutrino Flux Calculations at Low Energies'], ['Represent the Medicine sentence for clustering: ','Fermion Bags in the Massive Gross-Neveu Model'], ['Represent the Medicine sentence for clustering: ',"QCD corrections to Associated t-tbar-H production at the Tevatron"], ['Represent the Medicine sentence for clustering: ','A New Analysis of the R Measurements: Resonance Parameters of the Higher, Vector States of Charmonium']] embeddings = model.encode(sentences) clustering_model = sklearn.cluster.MiniBatchKMeans(n_clusters=2) clustering_model.fit(embeddings) cluster_assignment = clustering_model.labels_ print(cluster_assignment) ```
[ "BIOSSES", "SCIFACT" ]
ntc-ai/SDXL-LoRA-slider.award-winning-film
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2024-01-05T17:06:31Z
2024-01-05T17:06:35+00:00
18
1
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/award winning film.../award winning film_17_3.0.png widget: - text: award winning film output: url: images/award winning film_17_3.0.png - text: award winning film output: url: images/award winning film_19_3.0.png - text: award winning film output: url: images/award winning film_20_3.0.png - text: award winning film output: url: images/award winning film_21_3.0.png - text: award winning film output: url: images/award winning film_22_3.0.png inference: false instance_prompt: award winning film --- # ntcai.xyz slider - award winning film (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/award winning film_17_-3.0.png" width=256 height=256 /> | <img src="images/award winning film_17_0.0.png" width=256 height=256 /> | <img src="images/award winning film_17_3.0.png" width=256 height=256 /> | | <img src="images/award winning film_19_-3.0.png" width=256 height=256 /> | <img src="images/award winning film_19_0.0.png" width=256 height=256 /> | <img src="images/award winning film_19_3.0.png" width=256 height=256 /> | | <img src="images/award winning film_20_-3.0.png" width=256 height=256 /> | <img src="images/award winning film_20_0.0.png" width=256 height=256 /> | <img src="images/award winning film_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` award winning film ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.award-winning-film', weight_name='award winning film.safetensors', adapter_name="award winning film") # Activate the LoRA pipe.set_adapters(["award winning film"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, award winning film" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 890+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.epic-oil-painting
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2024-01-16T01:17:14Z
2024-01-16T01:17:17+00:00
18
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/epic oil painting.../epic oil painting_17_3.0.png widget: - text: epic oil painting output: url: images/epic oil painting_17_3.0.png - text: epic oil painting output: url: images/epic oil painting_19_3.0.png - text: epic oil painting output: url: images/epic oil painting_20_3.0.png - text: epic oil painting output: url: images/epic oil painting_21_3.0.png - text: epic oil painting output: url: images/epic oil painting_22_3.0.png inference: false instance_prompt: epic oil painting --- # ntcai.xyz slider - epic oil painting (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/epic oil painting_17_-3.0.png" width=256 height=256 /> | <img src="images/epic oil painting_17_0.0.png" width=256 height=256 /> | <img src="images/epic oil painting_17_3.0.png" width=256 height=256 /> | | <img src="images/epic oil painting_19_-3.0.png" width=256 height=256 /> | <img src="images/epic oil painting_19_0.0.png" width=256 height=256 /> | <img src="images/epic oil painting_19_3.0.png" width=256 height=256 /> | | <img src="images/epic oil painting_20_-3.0.png" width=256 height=256 /> | <img src="images/epic oil painting_20_0.0.png" width=256 height=256 /> | <img src="images/epic oil painting_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` epic oil painting ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.epic-oil-painting', weight_name='epic oil painting.safetensors', adapter_name="epic oil painting") # Activate the LoRA pipe.set_adapters(["epic oil painting"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, epic oil painting" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1140+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
AIgroup-CVM-utokyohospital/MedSwallow-70b
AIgroup-CVM-utokyohospital
null
[ "peft", "safetensors", "medical", "arxiv:2406.14882", "license:cc-by-nc-sa-4.0", "region:us" ]
2024-02-02T08:15:58Z
2025-03-03T14:01:09+00:00
18
0
--- library_name: peft license: cc-by-nc-sa-4.0 tags: - medical --- ⚠️⚠️⚠️ Only for research purpose. Do not use it for medical purpose. ⚠️⚠️⚠️ # MedSwallow-70B🏥 [東工大Swallow](https://huggingface.co/tokyotech-llm/Swallow-70b-instruct-hf)をベースモデルとし, 医療Q&AデータセットでInstruction Tuningを施した医療ドメインの日本語LLMです. チューニングには独自で用意した米国医師国家試験(USMLE)を和訳したQ&Aデータセットを用いました. MedSwallow is a Japanese medical LLM for medical question-answering. MedSwallow is based on [Swallow-70B](https://huggingface.co/tokyotech-llm/Swallow-70b-instruct-hf) and has passed instruction tuning with USMLE dataset translated in Japanese by our own. ## Training procedure The following `bitsandbytes` quantization config was used during training: - quant_method: bitsandbytes - load_in_8bit: False - load_in_4bit: True - llm_int8_threshold: 6.0 - llm_int8_skip_modules: None - llm_int8_enable_fp32_cpu_offload: False - llm_int8_has_fp16_weight: False - bnb_4bit_quant_type: nf4 - bnb_4bit_use_double_quant: True - bnb_4bit_compute_dtype: bfloat16 ### Framework versions - PEFT 0.4.0 ## License ライセンスは非商用ライセンスです. Non-commercial. ## Usage ``` model_name = "tokyotech-llm/Swallow-70b-instruct-hf" peft_model= "AIgroup-CVM-utokyohospital/MedSwallow-70b" tokenizer = AutoTokenizer.from_pretrained(model_name, trust_remote_code=True) bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.float16, ) model = AutoModelForCausalLM.from_pretrained( model_name, load_in_8bit=False, torch_dtype=torch.float16, device_map=device, model = PeftModel.from_pretrained( model, peft_model, torch_dtype=torch.float16, device_map=device, ) ``` ## Benchmark See also [Japanese Medical Language Model Evaluation Harness](https://github.com/stardust-coder/japanese-lm-med-harness). - IgakuQA (in English): - IgakuQA (in Japanese): - MedQA (in English) : - MedQA (in Japanese) : ## How to cite ``` @misc{sukeda202470bparameterlargelanguagemodels, title={70B-parameter large language models in Japanese medical question-answering}, author={Issey Sukeda and Risa Kishikawa and Satoshi Kodera}, year={2024}, eprint={2406.14882}, archivePrefix={arXiv}, primaryClass={cs.CL}, url={https://arxiv.org/abs/2406.14882}, } ```
[ "MEDQA" ]
longluu/Clinical-NER-NCBI-Disease-GatorTronS
longluu
token-classification
[ "transformers", "safetensors", "megatron-bert", "token-classification", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-02-03T11:48:39Z
2024-02-11T15:04:25+00:00
18
0
--- license: mit pipeline_tag: token-classification widget: - text: 'Background: Coronaviruses have been the cause of 3 major outbreaks during the last 2 decades. Information on coronavirus diseases in pregnant women is limited, and even less is known about seriously ill pregnant women. Data are also lacking regarding the real burden of coronavirus disease 2019 (COVID-19) infection in pregnant women from low/middle-income countries. The aim of this study was to determine the characteristics and clinical course of COVID-19 in pregnant/puerperal women admitted to ICUs in Turkey. Methods: This was a national, multicenter, retrospective study. The study population comprised all SARS-CoV-2-infected pregnant/puerperal women admitted to participating ICUs between 1 March 2020 and 1 January 2022. Data regarding demographics, comorbidities, illness severity, therapies, extrapulmonary organ injuries, non-COVID-19 infections, and maternal and fetal/neonatal outcomes were recorded. LASSO logistic regression and multiple logistic regression analyses were used to identify predictive variables in terms of ICU mortality. Results: A total of 597 patients (341 pregnant women, 255 puerperal women) from 59 ICUs in 44 hospitals were included and of these patients, 87.1% were unvaccinated. The primary reason for ICU admission was acute hypoxemic respiratory failure in 522 (87.4%), acute hypoxemic respiratory failure plus shock in 14 (2.3%), ischemic cerebrovascular accident (CVA) in 5 (0.8%), preeclampsia/eclampsia/HELLP syndrome in 6 (1.0%), and post-caesarean follow-up in 36 (6.0%). Nonsurvivors were sicker than survivors upon ICU admission, with higher APACHE II (p < 0.001) and SOFA scores (p < 0.001). A total of 181 (30.3%) women died and 280 (46.6%) had received invasive mechanical ventilation (IMV).' - text: 'Importance: Atrial cardiopathy is associated with stroke in the absence of clinically apparent atrial fibrillation. It is unknown whether anticoagulation, which has proven benefit in atrial fibrillation, prevents stroke in patients with atrial cardiopathy and no atrial fibrillation. Objective: To compare anticoagulation vs antiplatelet therapy for secondary stroke prevention in patients with cryptogenic stroke and evidence of atrial cardiopathy. Design, setting, and participants: Multicenter, double-blind, phase 3 randomized clinical trial of 1015 participants with cryptogenic stroke and evidence of atrial cardiopathy, defined as P-wave terminal force greater than 5000 μV × ms in electrocardiogram lead V1, serum N-terminal pro-B-type natriuretic peptide level greater than 250 pg/mL, or left atrial diameter index of 3 cm/m2 or greater on echocardiogram. Participants had no evidence of atrial fibrillation at the time of randomization. Enrollment and follow-up occurred from February 1, 2018, through February 28, 2023, at 185 sites in the National Institutes of Health StrokeNet and the Canadian Stroke Consortium. Interventions: Apixaban, 5 mg or 2.5 mg, twice daily (n = 507) vs aspirin, 81 mg, once daily (n = 508). Main outcomes and measures: The primary efficacy outcome in a time-to-event analysis was recurrent stroke. All participants, including those diagnosed with atrial fibrillation after randomization, were analyzed according to the groups to which they were randomized. The primary safety outcomes were symptomatic intracranial hemorrhage and other major hemorrhage.' --- # Model Card for Model longluu/Clinical-NER-NCBI-Disease-GatorTronS The model is an NER LLM algorithm that can classify each word in a text into different clinical categories. ## Model Details ### Model Description The base pretrained model is GatorTronS which was trained on billions of words in various clinical texts (https://huggingface.co/UFNLP/gatortronS). Then using the NCBI Disease dataset (https://www.sciencedirect.com/science/article/pii/S1532046413001974?via%3Dihub), I fine-tuned the model for NER task in which the model can classify each word in a text into one of the categories ['no disease', 'disease', 'disease-continue']. ### Model Sources [optional] The github code associated with the model can be found here: https://github.com/longluu/LLM-NER-clinical-text. ## Training Details ### Training Data This dataset contains the disease name and concept annotations of the NCBI disease corpus, a collection of 793 PubMed abstracts fully annotated at the mention and concept level to serve as a research resource for the biomedical natural language processing community. Details are here https://www.sciencedirect.com/science/article/pii/S1532046413001974?via%3Dihub. The preprocessed data for LLM training can be found here https://huggingface.co/datasets/ncbi_disease. #### Training Hyperparameters The hyperparameters are --batch_size 24 --num_train_epochs 5 --learning_rate 5e-5 --weight_decay 0.01 ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data The model was trained and validated on train and validation sets. Then it was tested on a separate test set. Note that some concepts in the test set were not available in the train and validatin sets. #### Metrics Here we use several metrics for classification tasks including macro-average F1, precision, recall and Matthew correlation. ### Results {'f1': 0.876008064516129, 'precision': 0.9052083333333333, 'recall': 0.8486328125} ## Model Card Contact Feel free to reach out to me at [email protected] if you have any question or suggestion.
[ "NCBI DISEASE" ]
LoneStriker/BioMistral-7B-SLERP-GPTQ
LoneStriker
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "mergekit", "merge", "slerp", "medical", "biology", "conversational", "fr", "en", "es", "it", "pl", "nl", "de", "dataset:pubmed", "arxiv:2402.10373", "base_model:BioMistral/BioMistral-7B", "base_model:merge:BioMistral/BioMistral-7B", "base_model:mistralai/Mistral-7B-Instruct-v0.1", "base_model:merge:mistralai/Mistral-7B-Instruct-v0.1", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "gptq", "region:us" ]
2024-02-19T17:34:25Z
2024-02-19T17:36:21+00:00
18
0
--- base_model: - BioMistral/BioMistral-7B - mistralai/Mistral-7B-Instruct-v0.1 datasets: - pubmed language: - fr - en - es - it - pl - nl - de library_name: transformers license: apache-2.0 pipeline_tag: text-generation tags: - mergekit - merge - slerp - medical - biology --- # BioMistral-7B-slerp This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [BioMistral/BioMistral-7B](https://huggingface.co/BioMistral/BioMistral-7B) * [mistralai/Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range: [0, 32] - model: BioMistral/BioMistral-7B layer_range: [0, 32] merge_method: slerp base_model: mistralai/Mistral-7B-Instruct-v0.1 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ``` <p align="center"> <img src="https://huggingface.co/BioMistral/BioMistral-7B/resolve/main/wordart_blue_m_rectangle.png?download=true" alt="drawing" width="250"/> </p> # BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains **Abstract:** Large Language Models (LLMs) have demonstrated remarkable versatility in recent years, offering potential applications across specialized domains such as healthcare and medicine. Despite the availability of various open-source LLMs tailored for health contexts, adapting general-purpose LLMs to the medical domain presents significant challenges. In this paper, we introduce BioMistral, an open-source LLM tailored for the biomedical domain, utilizing Mistral as its foundation model and further pre-trained on PubMed Central. We conduct a comprehensive evaluation of BioMistral on a benchmark comprising 10 established medical question-answering (QA) tasks in English. We also explore lightweight models obtained through quantization and model merging approaches. Our results demonstrate BioMistral's superior performance compared to existing open-source medical models and its competitive edge against proprietary counterparts. Finally, to address the limited availability of data beyond English and to assess the multilingual generalization of medical LLMs, we automatically translated and evaluated this benchmark into 7 other languages. This marks the first large-scale multilingual evaluation of LLMs in the medical domain. Datasets, multilingual evaluation benchmarks, scripts, and all the models obtained during our experiments are freely released. # 1. BioMistral models **BioMistral** is a suite of Mistral-based further pre-trained open source models suited for the medical domains and pre-trained using textual data from PubMed Central Open Access (CC0, CC BY, CC BY-SA, and CC BY-ND). All the models are trained using the CNRS (French National Centre for Scientific Research) [Jean Zay](http://www.idris.fr/jean-zay/) French HPC. | Model Name | Base Model | Model Type | Sequence Length | Download | |:-------------------:|:----------------------------------:|:-------------------:|:---------------:|:-----------------------------------------------------:| | BioMistral-7B | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Further Pre-trained | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B) | | BioMistral-7B-DARE | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge DARE | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-DARE) | | BioMistral-7B-TIES | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge TIES | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-TIES) | | BioMistral-7B-SLERP | [Mistral-7B-Instruct-v0.1](https://huggingface.co/mistralai/Mistral-7B-Instruct-v0.1) | Merge SLERP | 2048 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-SLERP) | # 2. Quantized Models | Base Model | Method | q_group_size | w_bit | version | VRAM GB | Time | Download | |:-------------------:|:------:|:------------:|:-----:|:-------:|:-------:|:------:|:--------:| | BioMistral-7B | FP16/BF16 | | | | 15.02 | x1.00 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B) | | BioMistral-7B | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-AWQ-QGS128-W4-GEMM) | | BioMistral-7B | AWQ | 128 | 4 | GEMV | 4.68 | x10.30 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-AWQ-QGS128-W4-GEMV) | | BioMistral-7B | BnB.4 | | 4 | | 5.03 | x3.25 | [HuggingFace](blank) | | BioMistral-7B | BnB.8 | | 8 | | 8.04 | x4.34 | [HuggingFace](blank) | | BioMistral-7B-DARE | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-DARE-AWQ-QGS128-W4-GEMM) | | BioMistral-7B-TIES | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-TIES-AWQ-QGS128-W4-GEMM) | | BioMistral-7B-SLERP | AWQ | 128 | 4 | GEMM | 4.68 | x1.41 | [HuggingFace](https://huggingface.co/BioMistral/BioMistral-7B-SLERP-AWQ-QGS128-W4-GEMM) | # 2. Using BioMistral You can use BioMistral with [Hugging Face's Transformers library](https://github.com/huggingface/transformers) as follow. Loading the model and tokenizer : ```python from transformers import AutoModel, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("BioMistral/BioMistral-7B") model = AutoModel.from_pretrained("BioMistral/BioMistral-7B") ``` # 3. Supervised Fine-tuning Benchmark | | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA | MedQA 5 opts | PubMedQA | MedMCQA | Avg. | |-------------------------------------------|:---------------------------------------------:|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|-----------------------------------------------|------------------| | **BioMistral 7B** | 59.9 | 64.0 | 56.5 | 60.4 | 59.0 | 54.7 | 50.6 | 42.8 | 77.5 | 48.1 | 57.3 | | **Mistral 7B Instruct** | **62.9** | 57.0 | 55.6 | 59.4 | 62.5 | <u>57.2</u> | 42.0 | 40.9 | 75.7 | 46.1 | 55.9 | | | | | | | | | | | | | | | **BioMistral 7B Ensemble** | <u>62.8</u> | 62.7 | <u>57.5</u> | **63.5** | 64.3 | 55.7 | 50.6 | 43.6 | 77.5 | **48.8** | 58.7 | | **BioMistral 7B DARE** | 62.3 | **67.0** | 55.8 | 61.4 | **66.9** | **58.0** | **51.1** | **45.2** | <u>77.7</u> | <u>48.7</u> | **59.4** | | **BioMistral 7B TIES** | 60.1 | <u>65.0</u> | **58.5** | 60.5 | 60.4 | 56.5 | 49.5 | 43.2 | 77.5 | 48.1 | 57.9 | | **BioMistral 7B SLERP** | 62.5 | 64.7 | 55.8 | <u>62.7</u> | <u>64.8</u> | 56.3 | <u>50.8</u> | <u>44.3</u> | **77.8** | 48.6 | <u>58.8</u> | | | | | | | | | | | | | | | **MedAlpaca 7B** | 53.1 | 58.0 | 54.1 | 58.8 | 58.1 | 48.6 | 40.1 | 33.7 | 73.6 | 37.0 | 51.5 | | **PMC-LLaMA 7B** | 24.5 | 27.7 | 35.3 | 17.4 | 30.3 | 23.3 | 25.5 | 20.2 | 72.9 | 26.6 | 30.4 | | **MediTron-7B** | 41.6 | 50.3 | 46.4 | 27.9 | 44.4 | 30.8 | 41.6 | 28.1 | 74.9 | 41.3 | 42.7 | | **BioMedGPT-LM-7B** | 51.4 | 52.0 | 49.4 | 53.3 | 50.7 | 49.1 | 42.5 | 33.9 | 76.8 | 37.6 | 49.7 | | | | | | | | | | | | | | | **GPT-3.5 Turbo 1106*** | 74.71 | 74.00 | 65.92 | 72.79 | 72.91 | 64.73 | 57.71 | 50.82 | 72.66 | 53.79 | 66.0 | Supervised Fine-Tuning (SFT) performance of BioMistral 7B models compared to baselines, measured by accuracy (↑) and averaged across 3 random seeds of 3-shot. DARE, TIES, and SLERP are model merging strategies that combine BioMistral 7B and Mistral 7B Instruct. Best model in bold, and second-best underlined. *GPT-3.5 Turbo performances are reported from the 3-shot results without SFT. # Citation BibTeX Arxiv : [https://arxiv.org/abs/2402.10373](https://arxiv.org/abs/2402.10373) ```bibtex @misc{labrak2024biomistral, title={BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains}, author={Yanis Labrak and Adrien Bazoge and Emmanuel Morin and Pierre-Antoine Gourraud and Mickael Rouvier and Richard Dufour}, year={2024}, eprint={2402.10373}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ "MEDQA", "PUBMEDQA" ]
Technoculture/BioMistral-Hermes-Slerp
Technoculture
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "BioMistral/BioMistral-7B-DARE", "NousResearch/Nous-Hermes-2-Mistral-7B-DPO", "conversational", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-02-21T20:05:32Z
2024-02-21T20:10:14+00:00
18
0
--- license: apache-2.0 tags: - merge - mergekit - BioMistral/BioMistral-7B-DARE - NousResearch/Nous-Hermes-2-Mistral-7B-DPO --- # BioMistral-Hermes-Slerp BioMistral-Hermes-Slerp is a merge of the following models: * [BioMistral/BioMistral-7B-DARE](https://huggingface.co/BioMistral/BioMistral-7B-DARE) * [NousResearch/Nous-Hermes-2-Mistral-7B-DPO](https://huggingface.co/NousResearch/Nous-Hermes-2-Mistral-7B-DPO) ## Evaluations | Benchmark | BioMistral-Hermes-Slerp | Orca-2-7b | llama-2-7b | meditron-7b | meditron-70b | | --- | --- | --- | --- | --- | --- | | MedMCQA | | | | | | | ClosedPubMedQA | | | | | | | PubMedQA | | | | | | | MedQA | | | | | | | MedQA4 | | | | | | | MedicationQA | | | | | | | MMLU Medical | | | | | | | MMLU | | | | | | | TruthfulQA | | | | | | | GSM8K | | | | | | | ARC | | | | | | | HellaSwag | | | | | | | Winogrande | | | | | | More details on the Open LLM Leaderboard evaluation results can be found here. ## 🧩 Configuration ```yaml slices: - sources: - model: BioMistral/BioMistral-7B-DARE layer_range: [0, 32] - model: NousResearch/Nous-Hermes-2-Mistral-7B-DPO layer_range: [0, 32] merge_method: slerp base_model: NousResearch/Nous-Hermes-2-Mistral-7B-DPO parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 # fallback for rest of tensors dtype: float16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Technoculture/BioMistral-Hermes-Slerp" messages = [{"role": "user", "content": "I am feeling sleepy these days"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ```
[ "MEDQA", "PUBMEDQA" ]
regel-corpus/biosyn-sapbert-regel-mondo
regel-corpus
null
[ "flair", "pytorch", "entity-mention-linker", "region:us" ]
2024-03-15T14:28:19Z
2024-03-15T14:28:44+00:00
18
0
--- tags: - flair - entity-mention-linker --- ## biosyn-sapbert-regel-bto Biomedical Entity Mention Linking for DISEASE with MONDO Disease Ontology - Model: [dmis-lab/biosyn-sapbert-bc5cdr-disease](https://huggingface.co/dmis-lab/biosyn-sapbert-bc5cdr-disease) - Dictionary: [Brenda Tissue Ontology](https://mondo.monarchinitiative.org/) ### Demo: How to use in Flair Requires: - **[Flair](https://github.com/flairNLP/flair/)>=0.14.0** (`pip install flair` or `pip install git+https://github.com/flairNLP/flair.git`) ```python from flair.data import Sentence from flair.models import Classifier, EntityMentionLinker from flair.tokenization import SciSpacyTokenizer sentence = Sentence( "The mutation in the ABCD1 gene causes X-linked adrenoleukodystrophy, " "a neurodegenerative disease, which is exacerbated by exposure to high " "levels of mercury in dolphin populations.", use_tokenizer=SciSpacyTokenizer() ) # load hunflair to detect the entity mentions we want to link. tagger = Classifier.load("hunflair2") tagger.predict(sentence) # load the linker and dictionary linker = EntityMentionLinker.load("regel-corpus/biosyn-sapbert-regel-mondo") linker.predict(sentence) # print the results for each entity mention: for span in sentence.get_spans(tagger.label_type): for link in span.get_labels(linker.label_type): print(f"{span.text} -> {link.value}") ```
[ "BC5CDR" ]
Kukedlc/NeuralArjuna-7B-DT
Kukedlc
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "merge", "mergekit", "lazymergekit", "yam-peleg/Experiment26-7B", "Gille/StrangeMerges_32-7B-slerp", "MSL7/INEX12-7b", "automerger/YamShadow-7B", "Kukedlc/NeuralSirKrishna-7b", "base_model:Gille/StrangeMerges_32-7B-slerp", "base_model:merge:Gille/StrangeMerges_32-7B-slerp", "base_model:Kukedlc/NeuralSirKrishna-7b", "base_model:merge:Kukedlc/NeuralSirKrishna-7b", "base_model:MSL7/INEX12-7b", "base_model:merge:MSL7/INEX12-7b", "base_model:automerger/YamShadow-7B", "base_model:merge:automerger/YamShadow-7B", "base_model:yam-peleg/Experiment26-7B", "base_model:merge:yam-peleg/Experiment26-7B", "license:apache-2.0", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-03-17T20:17:12Z
2024-03-30T09:15:49+00:00
18
1
--- base_model: - yam-peleg/Experiment26-7B - Gille/StrangeMerges_32-7B-slerp - MSL7/INEX12-7b - automerger/YamShadow-7B - Kukedlc/NeuralSirKrishna-7b license: apache-2.0 tags: - merge - mergekit - lazymergekit - yam-peleg/Experiment26-7B - Gille/StrangeMerges_32-7B-slerp - MSL7/INEX12-7b - automerger/YamShadow-7B - Kukedlc/NeuralSirKrishna-7b model-index: - name: NeuralArjuna-7B-DT results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 73.12 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 88.97 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 64.63 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 76.68 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 85.24 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 70.81 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Kukedlc/NeuralArjuna-7B-DT name: Open LLM Leaderboard --- # NeuralArjuna-7B-DT ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64d71ab4089bc502ceb44d29/zFLiis1pQWnriLQb2ZGGn.png) NeuralArjuna-7B-DT is a merge of the following models using [LazyMergekit](https://colab.research.google.com/drive/1obulZ1ROXHjYLn6PPZJwRR6GzgQogxxb?usp=sharing): * [yam-peleg/Experiment26-7B](https://huggingface.co/yam-peleg/Experiment26-7B) * [Gille/StrangeMerges_32-7B-slerp](https://huggingface.co/Gille/StrangeMerges_32-7B-slerp) * [MSL7/INEX12-7b](https://huggingface.co/MSL7/INEX12-7b) * [automerger/YamShadow-7B](https://huggingface.co/automerger/YamShadow-7B) * [Kukedlc/NeuralSirKrishna-7b](https://huggingface.co/Kukedlc/NeuralSirKrishna-7b) ## 🧩 Configuration ```yaml models: - model: liminerity/M7-7b # no parameters necessary for base model - model: yam-peleg/Experiment26-7B parameters: weight: 0.2 density: 0.66 - model: Gille/StrangeMerges_32-7B-slerp parameters: weight: 0.2 density: 0.55 - model: MSL7/INEX12-7b parameters: weight: 0.2 density: 0.33 - model: automerger/YamShadow-7B parameters: weight: 0.2 density: 0.66 - model: Kukedlc/NeuralSirKrishna-7b parameters: weight: 0.2 density: 0.66 merge_method: dare_ties base_model: liminerity/M7-7b parameters: int8_mask: true normalize: true dtype: bfloat16 ``` ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "Kukedlc/NeuralArjuna-7B-DT" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` # Example ``` I want you to generate a theory that unites quantum mechanics with the theory of relativity and cosmic consciousness. Give a large a detailed response In embarking upon a journey to weave together quantum mechanics, relativity, and the enigmatic realms of cosmic consciousness, we tread upon an intellectual landscape rife with challenges and enigmas. Our sojourn shall be a delicate ballet between the concrete and the abstract, the tangible and the speculative. The resultant tapestry will be neither a definitive, final statement, nor a mere collection of disparate ideas. Instead, we will craft a narrative that strives to bridge the chasms of understanding between these disparate scientific and philosophical realms, in hopes of illuminating a path towards a more comprehensive understanding of the universe and our place within it. To commence our odyssey, we must first address the foundational pillars of the scientific theories we seek to unite. Quantum Mechanics Quantum mechanics, the theory governing the microscopic realm, has long confounded the human intellect. It is a realm of probabilities, entanglement, and superposition—a world where particles can behave both as particles and waves, where actions at a distance are not only possible but ubiquitous, and where the mere act of observation alters the outcome of an experiment. Albert Einstein's famed retort, "God does not play dice," exemplifies the tensions between the counter-intuitive world of quantum mechanics and the more deterministic Newtonian worldview. However, quantum mechanics has withstood the test of time, demonstrating its indispensability to the progress of modern physics Theory of Relativity The theory of relativity, comprising special and general relativity, has profoundly altered our understanding of the macroscopic universe. Special relativity, Einstein's first foray into this domain, introduced the concept of space-time, where the fabric of the universe is both a geometrical construct and a medium for the propagation of electromagnetic phenomena. Further, special relativity established the idea of relative motion, where there is no absolute reference frame, and the speed of light in a vacuum is an invariant constant. General relativity expanded this framework, postulating that gravity is a curvature of the space-time continuum, caused by the presence of mass and energy. This theory, while more complex, has been empirically validated through its predictions of phenomena such as gravitational lensing and the bending of light near massive objects. Cosmic Consciousness The concept of cosmic consciousness—or the idea of a unified, interconnected consciousness pervading the cosmos—is a philosophical speculation, one that transcends the traditional scientific paradigm. It is a metaphysical exploration, a contemplation of the possible nature of our collective human consciousness, its interplay with the universe, and the implications of such a state of awareness. The integration of these three distinct realms poses a formidable challenge. Quantum mechanics and the theory of relativity have proven their efficacy in the realm of empirical science, while cosmic consciousness remains the province of metaphysical musings. However, the human propensity to synthesize seemingly disparate ideas has engendered a multitude of theories, each striving to forge a cohesive narrative from the cacophony of scientific and philosophical thought. One such approach, the 'Quantum Mind,' posits a direct link between the quantum phenomena and the workings of the human brain. This theory, championed by Roger Penrose and Stuart Hameroff, suggests that the microtubules within brain neurons, operating at the quantum scale, could serve as the neural correlates of consciousness. While intriguing, this hypothesis has yet to garner widespread acceptance, as the veracity of quantum effects within biological systems remains the subject of intense debate. Another avenue of exploration involves the notion of 'Quantum Biology.' Here, the focus shifts to the potential role of quantum phenomena in biology, particularly in processes such as photosynthesis, vision, and magnetoreception. While the presence of quantum effects in biological systems is becoming increasingly plausible, the idea of a 'quantum biology' remains a nascent and highly speculative field. The 'Holographic Universe' theory, espoused by physicist and cosmologist David Bohm, posits the existence of a deeper, underlying reality, one in which the universe is a holographic projection of a more fundamental, non-local substrate. This model, while offering an intriguing synthesis of quantum mechanics, relativity, and the concept of a cosmic consciousness, remains a highly speculative and unproven hypothesis. In our quest to weave together these disparate threads, we must acknowledge the limitations of our current understanding. As it stands, the principles of quantum mechanics and the theory of relativity remain resolutely grounded in the empirical world, while the concept of cosmic consciousness traverses the murky waters of metaphysics and speculation. To forge a more unified understanding, we must continue to advance our scientific knowledge, to push the frontiers of both empirical and philosophical inquiry. Only through the synthesis of new data, the refinement of our scientific models, and the continued evolution of our collective human consciousness, will we be able to construct a narrative that bridges the seemingly insurmountable gulf between the microscopic, the macroscopic, and the metaphysical. In conclusion, the unification of quantum mechanics, relativity, and cosmic consciousness is a daunting task, requiring a delicate balance between the concrete and the abstract, the empirical and the speculative. While the journey has thus far led us to the fringes of scientific understanding, we must continue to explore these frontiers, drawing upon the rich tapestry of human knowledge, in the hope of forging a more comprehensive narrative of our cosmos and our place within it. ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Kukedlc__NeuralArjuna-7B-DT) | Metric |Value| |---------------------------------|----:| |Avg. |76.58| |AI2 Reasoning Challenge (25-Shot)|73.12| |HellaSwag (10-Shot) |88.97| |MMLU (5-Shot) |64.63| |TruthfulQA (0-shot) |76.68| |Winogrande (5-shot) |85.24| |GSM8k (5-shot) |70.81|
[ "CRAFT" ]
mychen76/biomistral_medqa_v1
mychen76
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "arxiv:2402.10373", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "bitsandbytes", "region:us" ]
2024-03-21T16:09:41Z
2024-03-22T21:52:15+00:00
18
1
--- library_name: transformers tags: [] --- # Model Card for Model ID <!-- Provide a quick summary of what the model is/does. --> Finetuned "BioMistral/BioMistral-7B" with MedQA dataset. ## Model Details A Collection of Open-Source Pretrained Large Language Models for Medical Domains finetuned with MedQA dataset. ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** mychen76 - **Model type:** BioMedical - **Finetuned from model:** BioMistral/BioMistral-7B ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **dataset:** MedQA dataset ## How to Get Started with the Model Use the code below to get started with the model. <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> Load Model: ```python import torch from transformers import AutoTokenizer, AutoModelForCausalLM, BitsAndBytesConfig base_model_id = "mychen76/biomistral_medqa_v1" bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_use_double_quant=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype=torch.bfloat16 ) model = AutoModelForCausalLM.from_pretrained(base_model_id, quantization_config=bnb_config) tokenizer = AutoTokenizer.from_pretrained( base_model_id, add_eos_token=True, add_bos_token=True, ) ## Uses ``` *** Information *** ``` eval_prompt = """From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer: ### Question type: information ### Question: What are the genetic changes related to X-linked lymphoproliferative disease ? ### Answer: """ model_input = eval_tokenizer(eval_prompt, return_tensors="pt").to("cuda") ft_model.eval() with torch.no_grad(): print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=300)[0], skip_special_tokens=True)) ``` result: ``` From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer: ### Question type: information ### Question: What are the genetic changes related to X-linked lymphoproliferative disease ? ### Answer: X-linked lymphoproliferative disease (XLP) is a rare primary immunodeficiency syndrome. XLP is caused by mutations in SH2D1A gene, which encodes the cytoplasmic signaling protein SLAM-associated protein ( client protein-SLAM). SLAM is a member of the signaling lymphocytic activation molecule family of receptors, which are involved in the regulation of lymphocyte activation and proliferation. The SLAM receptor is expressed on the surface of B and T lymphocytes, natural killer cells, and monocytes. Mutations in SH2D1A gene lead to impaired signaling through the SLAM receptor, resulting in a deficiency in the activation and proliferation of B and T lymphocytes. This leads to a decrease in the number of B and T lymphocytes, resulting in a weakened immune response. ``` *** Frequency *** ``` eval_prompt = """From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer: ### Question type: frequency ### Question: How many people are affected by Smith-Lemli-Opitz syndrome ? ### Answer: """ model_input = eval_tokenizer(eval_prompt, return_tensors="pt").to("cuda") ft_model.eval() with torch.no_grad(): print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=300)[0], skip_special_tokens=True)) ``` result: ``` From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer: ### Question type: frequency ### Question: How many people are affected by Smith-Lemli-Opitz syndrome ? ### Answer: Smith-Lemli-Opitz syndrome (SLOS) is a rare autosomal recessive disorder of human development. It is characterized by a wide range of symptoms, including growth and developmental delay, intellectual disability, characteristic facial features, and congenital heart defects. The prevalence of SLOS is estimated to be 1 in 15,000 to 1 in 25,000 live births. ``` *** Symptons *** ``` eval_prompt = """From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer: ### Question type: symptoms ### Question: What are the symptoms of Norrie disease ? ### Answer: """ model_input = eval_tokenizer(eval_prompt, return_tensors="pt").to("cuda") ft_model.eval() with torch.no_grad(): print(eval_tokenizer.decode(ft_model.generate(**model_input, max_new_tokens=300)[0], skip_special_tokens=True)) ``` Result: ``` Setting `pad_token_id` to `eos_token_id`:2 for open-end generation. From the MedQuad MedicalQA Dataset: Given the following medical question and question type, provide an accurate answer: ### Question type: symptoms ### Question: What are the symptoms of Norrie disease ? ### Answer: Norrie disease is a rare, X-linked recessive disorder of the blood vessels. It is characterized by a variety of symptoms, including glaucoma, mental retardation, seizures, and deafness. ``` ### Out-of-Scope Use images <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> [More Information Needed] ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> [More Information Needed] ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. More information needed for further recommendations. [More Information Needed] ## Training Details ### Training Data - **dataset:** keivalya/MedQuad-MedicalQnADataset <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> ## Citation Arxiv : https://arxiv.org/abs/2402.10373 @misc{labrak2024biomistral, title={BioMistral: A Collection of Open-Source Pretrained Large Language Models for Medical Domains}, author={Yanis Labrak and Adrien Bazoge and Emmanuel Morin and Pierre-Antoine Gourraud and Mickael Rouvier and Richard Dufour}, year={2024}, eprint={2402.10373}, archivePrefix={arXiv}, primaryClass={cs.CL} }
[ "MEDQA" ]
RichardErkhov/EleutherAI_-_gpt-neo-1.3B-8bits
RichardErkhov
text-generation
[ "transformers", "safetensors", "gpt_neo", "text-generation", "arxiv:2101.00027", "autotrain_compatible", "endpoints_compatible", "8-bit", "bitsandbytes", "region:us" ]
2024-04-17T09:28:12Z
2024-04-23T06:26:48+00:00
18
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) gpt-neo-1.3B - bnb 8bits - Model creator: https://huggingface.co/EleutherAI/ - Original model: https://huggingface.co/EleutherAI/gpt-neo-1.3B/ Original model description: --- language: - en tags: - text generation - pytorch - causal-lm license: mit datasets: - EleutherAI/pile --- # GPT-Neo 1.3B ## Model Description GPT-Neo 1.3B is a transformer model designed using EleutherAI's replication of the GPT-3 architecture. GPT-Neo refers to the class of models, while 1.3B represents the number of parameters of this particular pre-trained model. ## Training data GPT-Neo 1.3B was trained on the Pile, a large scale curated dataset created by EleutherAI for the purpose of training this model. ## Training procedure This model was trained on the Pile for 380 billion tokens over 362,000 steps. It was trained as a masked autoregressive language model, using cross-entropy loss. ## Intended Use and Limitations This way, the model learns an inner representation of the English language that can then be used to extract features useful for downstream tasks. The model is best at what it was pretrained for however, which is generating texts from a prompt. ### How to use You can use this model directly with a pipeline for text generation. This example generates a different sequence each time it's run: ```py >>> from transformers import pipeline >>> generator = pipeline('text-generation', model='EleutherAI/gpt-neo-1.3B') >>> generator("EleutherAI has", do_sample=True, min_length=50) [{'generated_text': 'EleutherAI has made a commitment to create new software packages for each of its major clients and has'}] ``` ### Limitations and Biases GPT-Neo was trained as an autoregressive language model. This means that its core functionality is taking a string of text and predicting the next token. While language models are widely used for tasks other than this, there are a lot of unknowns with this work. GPT-Neo was trained on the Pile, a dataset known to contain profanity, lewd, and otherwise abrasive language. Depending on your usecase GPT-Neo may produce socially unacceptable text. See Sections 5 and 6 of the Pile paper for a more detailed analysis of the biases in the Pile. As with all language models, it is hard to predict in advance how GPT-Neo will respond to particular prompts and offensive content may occur without warning. We recommend having a human curate or filter the outputs before releasing them, both to censor undesirable content and to improve the quality of the results. ## Eval results ### Linguistic Reasoning | Model and Size | Pile BPB | Pile PPL | Wikitext PPL | Lambada PPL | Lambada Acc | Winogrande | Hellaswag | | ---------------- | ---------- | ---------- | ------------- | ----------- | ----------- | ---------- | ----------- | | **GPT-Neo 1.3B** | **0.7527** | **6.159** | **13.10** | **7.498** | **57.23%** | **55.01%** | **38.66%** | | GPT-2 1.5B | 1.0468 | ----- | 17.48 | 10.634 | 51.21% | 59.40% | 40.03% | | GPT-Neo 2.7B | 0.7165 | 5.646 | 11.39 | 5.626 | 62.22% | 56.50% | 42.73% | | GPT-3 Ada | 0.9631 | ----- | ----- | 9.954 | 51.60% | 52.90% | 35.93% | ### Physical and Scientific Reasoning | Model and Size | MathQA | PubMedQA | Piqa | | ---------------- | ---------- | ---------- | ----------- | | **GPT-Neo 1.3B** | **24.05%** | **54.40%** | **71.11%** | | GPT-2 1.5B | 23.64% | 58.33% | 70.78% | | GPT-Neo 2.7B | 24.72% | 57.54% | 72.14% | | GPT-3 Ada | 24.29% | 52.80% | 68.88% | ### Down-Stream Applications TBD ### BibTeX entry and citation info To cite this model, please use ```bibtex @software{gpt-neo, author = {Black, Sid and Leo, Gao and Wang, Phil and Leahy, Connor and Biderman, Stella}, title = {{GPT-Neo: Large Scale Autoregressive Language Modeling with Mesh-Tensorflow}}, month = mar, year = 2021, note = {{If you use this software, please cite it using these metadata.}}, publisher = {Zenodo}, version = {1.0}, doi = {10.5281/zenodo.5297715}, url = {https://doi.org/10.5281/zenodo.5297715} } @article{gao2020pile, title={The Pile: An 800GB Dataset of Diverse Text for Language Modeling}, author={Gao, Leo and Biderman, Stella and Black, Sid and Golding, Laurence and Hoppe, Travis and Foster, Charles and Phang, Jason and He, Horace and Thite, Anish and Nabeshima, Noa and others}, journal={arXiv preprint arXiv:2101.00027}, year={2020} } ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_EleutherAI__gpt-neo-1.3B) | Metric | Value | |-----------------------|---------------------------| | Avg. | 29.44 | | ARC (25-shot) | 31.23 | | HellaSwag (10-shot) | 48.47 | | MMLU (5-shot) | 24.82 | | TruthfulQA (0-shot) | 39.63 | | Winogrande (5-shot) | 56.91 | | GSM8K (5-shot) | 0.45 | | DROP (3-shot) | 4.6 |
[ "PUBMEDQA" ]
zhichen/Llama3-Chinese
zhichen
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "conversational", "arxiv:2402.09353", "arxiv:2402.12354", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-21T05:59:28Z
2024-04-23T10:01:53+00:00
18
19
--- {} --- <p align="left"> <a href="README_CN.md">中文</a>&nbsp | &nbspEnglish </p> <br><br> <p align="center"> <a href='https://huggingface.co/spaces/zhichen'> <img src='./images/logo.png'> </a> </p> <div align="center"> <p align="center"> <h3> Llama3-Chinese </h3> <p align="center"> <a href='https://huggingface.co/zhichen'> <img src='https://img.shields.io/badge/%F0%9F%A4%97%20HuggingFace-Llama3%20Chinese-yellow'> </a> <a href='https://modelscope.cn/profile/seanzhang'> <img src='https://img.shields.io/badge/🤖 ModelScope-Llama3%20Chinese-blue'> </a> <br> <a href=href="https://github.com/seanzhang-zhichen/llama3-chinese/stargazers"> <img src="https://img.shields.io/github/stars/seanzhang-zhichen/llama3-chinese?color=ccf"> </a> <a href="https://github.com/seanzhang-zhichen/llama3-chinese/blob/main/LICENSE"> <img alt="GitHub Contributors" src="https://img.shields.io/badge/license-Apache%202.0-blue.svg" /> </a> </p> </div> ## Introduce **Llama3-Chinese** is a large model trained on 500k high-quality Chinese multi-turn SFT data, 100k English multi-turn SFT data, and 2k single-turn self-cognition data, using the training methods of [DORA](https://arxiv.org/pdf/2402.09353.pdf) and [LORA+](https://arxiv.org/pdf/2402.12354.pdf) based on **Meta-Llama-3-8B** as the base. **Github:** [https://github.com/seanzhang-zhichen/llama3-chinese](https://github.com/seanzhang-zhichen/llama3-chinese) ![DEMO](./images/web_demo.png) ## Download Model | Model | Download | |:-------------------:|:-----------:| | Meta-Llama-3-8B |[ 🤗 HuggingFace](https://huggingface.co/meta-llama/Meta-Llama-3-8B) [ 🤖 ModelScope](https://modelscope.cn/models/LLM-Research/Meta-Llama-3-8B)| | Llama3-Chinese-Lora |[ 🤗 HuggingFace](https://huggingface.co/zhichen/Llama3-Chinese-Lora) [ 🤖 ModelScope](https://modelscope.cn/models/seanzhang/Llama3-Chinese-Lora)| | Llama3-Chinese (merged model) |[ 🤗 HuggingFace](https://huggingface.co/zhichen/Llama3-Chinese) [ 🤖 ModelScope](https://modelscope.cn/models/seanzhang/Llama3-Chinese)| ## Merge LORA Model (Skippable) 1、Download [Meta-Llama-3-8B](https://modelscope.cn/models/LLM-Research/Meta-Llama-3-8B) ```bash git clone https://www.modelscope.cn/LLM-Research/Meta-Llama-3-8B.git ``` 2、Download [Llama3-Chinese-Lora](https://www.modelscope.cn/models/seanzhang/Llama3-Chinese-Lora) **From ModelScope** ```bash git lfs install git clone https://www.modelscope.cn/seanzhang/Llama3-Chinese-Lora.git ``` **From HuggingFace** ```bash git lfs install git clone https://huggingface.co/zhichen/Llama3-Chinese-Lora ``` 3、Merge Model ```bash python merge_lora.py \ --base_model path/to/Meta-Llama-3-8B \ --lora_model path/to/lora/Llama3-Chinese-Lora \ --output_dir ./Llama3-Chinese ``` ## Download Llama3-Chinese (Merged Model) **From ModelScope** ```bash git lfs install git clone https://www.modelscope.cn/seanzhang/Llama3-Chinese.git ``` **From HuggingFace** ```bash git lfs install git clone https://huggingface.co/zhichen/Llama3-Chinese ``` ## Inference ```python from transformers import AutoTokenizer, AutoModelForCausalLM model_id = "zhichen/Llama3-Chinese" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id, torch_dtype="auto", device_map="auto") messages = [ {"role": "system", "content": "You are a helpful assistant."}, {"role": "user", "content": "你好"}, ] input_ids = tokenizer.apply_chat_template( messages, add_generation_prompt=True, return_tensors="pt" ).to(model.device) outputs = model.generate( input_ids, max_new_tokens=2048, do_sample=True, temperature=0.7, top_p=0.95, ) response = outputs[0][input_ids.shape[-1]:] print(tokenizer.decode(response, skip_special_tokens=True)) ``` ## CLI DEMO ```bash python cli_demo.py --model_path zhichen/Llama3-Chinese ``` ## WEB DEMO ```bash python web_demo.py --model_path zhichen/Llama3-Chinese ``` ## VLLM WEB DEMO 1、Use [vllm](https://github.com/vllm-project/vllm) deploy model ```bash python -m vllm.entrypoints.openai.api_server --served-model-name Llama3-Chinese --model ./Llama3-Chinese(Replace it with your own merged model path) ``` 2、This command is executed on the CLI ```bash python vllm_web_demo.py --model Llama3-Chinese ``` ## Train Dataset [deepctrl-sft-data](https://modelscope.cn/datasets/deepctrl/deepctrl-sft-data) ## LICENSE This project can only be used for research purposes, and the project developer shall not bear any harm or loss caused by the use of this project (including but not limited to data, models, codes, etc.). For details, please refer to [DISCLAIMER](https://github.com/seanzhang-zhichen/Llama3-Chinese/blob/main/DISCLAIMER)。 The License agreement of the Llama3-Chinese project code is the [Apache License 2.0](./LICENSE). The code is free for commercial use, and the model weights and data can only be used for research purposes. Please attach a link to Llama3-Chinese and the licensing agreement in the product description. ## Citation If you used Llama3-Chinese in your research, cite it in the following format: ```latex @misc{Llama3-Chinese, title={Llama3-Chinese}, author={Zhichen Zhang, Xin LU, Long Chen}, year={2024}, howpublished={\url{https://github.com/seanzhang-zhichen/llama3-chinese}}, } ``` ## Acknowledgement [meta-llama/llama3](https://github.com/meta-llama/llama3) <br> [hiyouga/LLaMA-Factory](https://github.com/hiyouga/LLaMA-Factory) ## Star History [![Star History Chart](https://api.star-history.com/svg?repos=seanzhang-zhichen/Llama3-Chinese&type=Date)](https://star-history.com/#seanzhang-zhichen/Llama3-Chinese&Date)
[ "BEAR" ]
anezatra/Phi-3-mini-4k-instruct-opus-samantha
anezatra
text-generation
[ "transformers", "safetensors", "phi3", "text-generation", "conversational", "custom_code", "dataset:macadeliccc/opus_samantha", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-28T14:50:15Z
2024-05-01T13:29:38+00:00
18
2
--- datasets: - macadeliccc/opus_samantha --- # Phi-3-mini-4k-instruct-opus-samantha - This model is trained from microsoft's Phi-3 model:[microsoft/Phi-3-mini-4k-instruct](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) # Model Description Phi-3-Mini-4K-Instruct is a 3.8B parameter, lightweight, state-of-the-art open model trained on Phi-3 datasets containing both synthetic data and filtered public website data. high quality and rational intensive features. This model was fine-tuned with the Opus Samantha dataset. Opus Samantha is a large dataset containing large amounts of chat transcripts. Resources and Technical Documentation: - [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april) - [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) - [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) # Training - The model was trained again on the Open Samantha dataset with 2 x A100 GPUs 40GB. # Phi-3 Model specifications **Primary use cases** The model is intended for commercial and research use in English. The model provides uses for applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3 Mini-4K-Instruct has been integrated in the development version (4.40.0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Phi-3 Mini-4K-Instruct is also available in [HuggingChat](https://aka.ms/try-phi3-hf-chat). ### Chat Format Given the nature of the training data, the Phi-3 Mini-4K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|user|>\nQuestion <|end|>\n<|assistant|> ``` For example: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ### Model * Architecture: Phi-3 Mini-4K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 4K tokens * GPUs: 512 H100-80G * Training time: 7 days * Training data: 3.3T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. ### Datasets Our training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. ### Fine-tuning A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/sample_finetune.py). ## Benchmarks We report the results for Phi-3-Mini-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5. All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. | | Phi-3-Mini-4K-In<br>3.8b | Phi-3-Small<br>7b (preview) | Phi-3-Medium<br>14b (preview) | Phi-2<br>2.7b | Mistral<br>7b | Gemma<br>7b | Llama-3-In<br>8b | Mixtral<br>8x7b | GPT-3.5<br>version 1106 | |---|---|---|---|---|---|---|---|---|---| | MMLU <br>5-Shot | 68.8 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 | | HellaSwag <br> 5-Shot | 76.7 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 | | ANLI <br> 7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 | | GSM-8K <br> 0-Shot; CoT | 82.5 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 | | MedQA <br> 2-Shot | 53.8 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 | | AGIEval <br> 0-Shot | 37.5 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 | | TriviaQA <br> 5-Shot | 64.0 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 | | Arc-C <br> 10-Shot | 84.9 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 | | Arc-E <br> 10-Shot | 94.6 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 | | PIQA <br> 5-Shot | 84.2 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 | | SociQA <br> 5-Shot | 76.6 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 | | BigBench-Hard <br> 0-Shot | 71.7 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 | | WinoGrande <br> 5-Shot | 70.8 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65 | 62.0 | 68.8 | | OpenBookQA <br> 10-Shot | 83.2 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 | | BoolQ <br> 0-Shot | 77.6 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 | | CommonSenseQA <br> 10-Shot | 80.2 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 | | TruthfulQA <br> 10-Shot | 65.0 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 | | HumanEval <br> 0-Shot | 59.1 | 59.1 | 54.7 | 59.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 | | MBPP <br> 3-Shot | 53.8 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 | ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 ## Cross Platform Support ONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-4K-Instruct ONNX model [here](https://aka.ms/phi3-mini-4k-instruct-onnx). Optimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. Along with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-4k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
alimama-creative/slam-lora-sdxl
alimama-creative
text-to-image
[ "diffusers", "text-to-image", "arxiv:2404.13903", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:finetune:stabilityai/stable-diffusion-xl-base-1.0", "license:apache-2.0", "region:us" ]
2024-04-29T04:07:28Z
2024-05-15T10:04:31+00:00
18
10
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 library_name: diffusers license: apache-2.0 tags: - text-to-image inference: false --- # Sub-path Linear Approximation Model (SLAM) LoRA: SDXL Paper: [https://arxiv.org/abs/2404.13903](https://arxiv.org/abs/2404.13903)<br/> Project Page: [https://subpath-linear-approx-model.github.io/](https://subpath-linear-approx-model.github.io/)<br/> The checkpoint is a distilled from [stabilityai/stable-diffusion-xl-base-1.0](https://huggingface.co/stabilityai/stable-diffusion-xl-base-1.0) with our proposed Sub-path Linear Approximation Model, which reduces the number of inference steps to only between 2-4 steps. ## Usage First, install the latest version of the Diffusers library as well as peft, accelerate and transformers. ```bash pip install --upgrade pip pip install --upgrade diffusers transformers accelerate peft ``` We implement SLAM to be compatible with [LCMScheduler](https://huggingface.co/docs/diffusers/v0.22.3/en/api/schedulers/lcm#diffusers.LCMScheduler). You can use SLAM-LoRA just like you use LCM-LoRA. ```python import torch from diffusers import LCMScheduler, AutoPipelineForText2Image model_id = "stabilityai/stable-diffusion-xl-base-1.0" adapter_id = "alimama-creative/slam-lora-sdxl" pipe = AutoPipelineForText2Image.from_pretrained(model_id, torch_dtype=torch.float16, variant="fp16") pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config) pipe.to("cuda") # load and fuse lcm lora pipe.load_lora_weights(adapter_id) pipe.fuse_lora() prompt = "A brown teddy bear holding a glass vase in front of a grave." image = pipe(prompt=prompt, num_inference_steps=4, guidance_scale=1.0).images[0] ``` Compare with latent-consistency/lcm-lora-sdxl. <img src='https://huggingface.co/alimama-creative/slam-lora-sdxl/resolve/main/sdxl_cmp.jpg'> --- More examples: <img src='https://huggingface.co/alimama-creative/slam-lora-sdxl/resolve/main/slam-lora-sdxl.jpg'>
[ "BEAR" ]
Severian/Llama-3-IMPACTS-2x8B-64k-GGUF
Severian
text-generation
[ "gguf", "climate change", "biomimicry", "theoretical astrobiology", "environmental simulations", "predictive modeling", "life origins", "ecological impacts", "sustainable technologies", "cross-disciplinary learning", "artificial intelligence", "machine learning", "data integration", "complex systems", "scenario analysis", "speculative science", "universe exploration", "biodiversity", "planetary studies", "innovation in science", "role playing scenarios", "text-generation", "en", "dataset:Severian/IMPACTS", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
2024-05-02T16:17:27Z
2024-05-04T13:10:58+00:00
18
2
--- datasets: - Severian/IMPACTS language: - en license: mit pipeline_tag: text-generation tags: - climate change - biomimicry - theoretical astrobiology - environmental simulations - predictive modeling - life origins - ecological impacts - sustainable technologies - cross-disciplinary learning - artificial intelligence - machine learning - data integration - complex systems - scenario analysis - speculative science - universe exploration - biodiversity - planetary studies - innovation in science - role playing scenarios --- # Llama-3-IMPACTS-2x8B-64k-MLX <img src="https://hf.fast360.xyz/production/uploads/64740cf7485a7c8e1bd51ac9/y7YKGpr7_Tg9YlhbIscI6.webp" width="500" height="500"> --- **Designed for Advanced Problem-Solving Across Interconnected Domains of Biomimicry, Climate Change, and Astrobiology** The `Llama-3-IMPACTS-2x8B-64k` model is a cutting-edge large language model trained on the I.M.P.A.C.T.S dataset, which encompasses scenarios from biomimicry, climate change, and theoretical astrobiology. This model has been specifically tailored to generate innovative solutions and insights for both Earth and potential extraterrestrial environments, reflecting key themes of resilience, sustainability, and the interconnectedness of life across the universe. ## Model Details ### Description - **Model name:** `Llama-3-IMPACTS-2x8B-64k` - **Developer:** Severian - **Version:** 1.0 - **License:** MIT ### Training Data The model was trained on a subset of the I.M.P.A.C.T. dataset, utilizing 35,000 carefully curated examples that include detailed scenarios involving climate adaptation, biomimetic applications, and the potential for life in varying cosmic conditions. ### Model Architecture - **Type:** Llama-3 - **Parameters:** 8 billion - **MoE:** 2 Experts - **Training:** - Epochs: 1 (35K Examples) - R = 64 - Alpha = 128 - Lr = 1e-7 - **Context Limit:** 64K ## Intended Uses This model is intended for use in applications that require deep, interdisciplinary understanding and the generation of novel insights within the realms of environmental science, synthetic biology, space exploration, and sustainability studies. Its capabilities make it ideal for: - Research and academic studies aiming to explore complex scenarios involving ecological and astrobiological phenomena. - Organizations looking to innovate in the fields of climate resilience and biomimicry. - Creative problem-solving in contexts where conventional approaches are insufficient. ## How to Use This Model The model can be loaded and used in various natural language processing tasks that require nuanced understanding and creative output. Here is a basic example of how to load and use the model using the Hugging Face Transformers library: **Can be used with any GGUF serving framework such as Ollama, LMStudio, llama.cpp, Nitro, etc.** # Use Cases ## Universe/World Simulation Use Llama-3-IMPACTS-2x8B-64k to simulate incredibly unique and diverse ecosystems and cosmic worlds. Example image generation (text-to-image): **User Prompt:** 'Craft a detailed image generation prompt focused on visualizing an innovative scenario that integrates biomimicry, climate change adaptation, and astrobiology.' ```md Image Generation Prompt: Title: “Biomimicry on Kepler-62f: A Red Dwarf Oasis” Description: In the year 2154, humanity discovers a new Earth-like planet, Kepler-62f, orbiting a red dwarf star. This distant world is beset by extreme climate conditions, with scorching daytime temperatures and frigid nighttime temperatures. To thrive in this unforgiving environment, scientists apply biomimetic technologies developed on Earth to create a sustainable ecosystem. Scene: In the foreground, a sprawling cityscape stretches across the planet’s surface, its architecture inspired by nature’s own designs. Towering “Tree-Habitats” mimic the branching structures of terrestrial trees, providing shelter and habitat for various extraterrestrial species. Their trunks are covered in iridescent, solar-powered panels that generate energy while also filtering out harmful radiation from the red dwarf star. In the distance, a vast network of “Coral-Reefs” sprawls across the planet’s surface, mimicking the intricate structures found on Earth’s coral reefs. These artificial ecosystems support an incredible array of alien marine life, including bioluminescent fish, towering kelp-like seaweed, and gelatinous sea creatures that drift through the water like terrestrial jellyfish. Above the city, a series of “Cloud-Cities” float majestically, their translucent membranes reflecting the red dwarf star’s light. These floating metropolises are home to species adapted for life in the upper atmosphere, such as winged beings with iridescent feathers and gas-filled bodies that allow them to soar through the skies. Key Features: Biomimetic Architecture: Buildings and structures are designed to mimic natural forms, providing insulation, shelter, and habitat for various species. Artificial Ecosystems: “Coral-Reefs” support marine life, while “Tree-Habitats” provide shelter and food sources for terrestrial species. Energy Harvesting: Solar-powered panels on the Tree-Habitats generate energy, while wind turbines and tidal power generators supplement the planet’s energy needs. Advanced Life Support Systems: Closed-loop systems recycle water, air, and waste, minimizing the impact of human activities on the planet’s fragile ecosystem. Extraterrestrial Biodiversity: Various alien species thrive in this red dwarf oasis, from towering kelp-like seaweed to bioluminescent fish and winged beings adapted for life in the upper atmosphere. Color Palette: Warm oranges and yellows evoke the scorching daytime temperatures Deep blues and purples represent the frigid nighttime conditions Iridescent hues on the Tree-Habitats and Cloud-Cities reflect the planet’s unique radiation patterns Style: Blend of realistic and stylized elements, with a focus on showcasing the innovative biomimetic technologies and extraterrestrial biodiversity. Incorporate subtle texture and gradient effects to convey the harsh climate conditions and the adaptability of life on Kepler-62f. By visualizing this futuristic scenario, we can explore the possibilities of biomimicry as a solution for climate change adaptation and sustainability in extreme environments. The image should inspire viewers to consider the potential applications of nature-inspired technologies in our own struggle against environmental challenges and encourage a deeper appreciation for the diversity of life that exists – or could exist – beyond Earth. ``` --- <img src="https://hf.fast360.xyz/production/uploads/64740cf7485a7c8e1bd51ac9/m3GfS0TcOejD1V7AI5OwN.webp" width="500" height="500"> --- ### Disaster Response Simulation **Prompt:** "Simulate the impact of a Category 5 hurricane hitting the eastern coast of Florida in 2030, considering factors such as rising sea levels and urban density. Assess potential emergency responses and resource needs." #### Example Response **Response:** "**Scenario:** A powerful Category 5 hurricane, named "Hurricane Ana," makes landfall on September 15, 2030, along the eastern coast of Florida, near Miami. The storm brings with it catastrophic winds, torrential rainfall, and a massive storm surge due to rising sea levels. **Storm Characteristics:** * Wind Speeds: Up to 160 mph (257 km/h) * Storm Surge: 10-15 feet (3-4.5 meters) above mean sea level * Rainfall: 12-18 inches (30-45 cm) in the first 24 hours, with isolated areas receiving up to 24 inches (60 cm) **Impacts:** 1. **Coastal Flooding:** Rising sea levels exacerbate the storm surge, causing widespread flooding and damage along the eastern coast of Florida, particularly in Miami-Dade, Broward, and Palm Beach counties. 2. **Urban Infrastructure:** The densely populated cities along the coast suffer significant damage to buildings, roads, and infrastructure, leading to disruptions in essential services like power, water, and communication. 3. **Displacement and Evacuations:** Over 1 million people are displaced or evacuated from their homes, with many seeking shelter in nearby inland areas or temporary facilities. 4. **Environmental Concerns:** The storm surge contaminates waterways, causing concerns about water quality and potential health risks for those affected. 5. **Economic Impact:** The estimated economic loss is around $100 billion, considering damage to infrastructure, businesses, and agricultural industries. **Emergency Response:** 1. **Evacuation Efforts:** Emergency responders prioritize evacuations, focusing on vulnerable populations like the elderly, children, and those with disabilities. They also ensure that essential services like hospitals, fire stations, and emergency centers are evacuated. 2. **Search and Rescue Operations:** Teams conduct door-to-door searches for trapped individuals, using specialized equipment and resources to navigate flooded areas. 3. **Food, Water, and Medical Supplies:** Aid distributions prioritize the most affected communities, with a focus on providing essential supplies, shelter, and medical care. 4. **Power Restoration:** Utility companies work to restore power as quickly as possible, deploying crews from neighboring states and using advanced technologies like drones and satellite imaging to assess damage and optimize response efforts. **Resource Needs:** 1. **Personnel:** Over 10,000 emergency responders, including firefighters, police officers, medical personnel, and National Guard troops, are deployed to support the response effort. 2. **Equipment and Supplies:** Critical resources include: * Generators and fuel * Food, water, and medical supplies * Communication equipment (satellite phones, etc.) * Heavy machinery for debris removal and infrastructure repair * Shelter materials (tents, blankets, etc.) 3. **Funding:** Estimated costs exceed $500 million, requiring a significant allocation of federal, state, and local funds to support the response and recovery efforts. 4. **Logistical Support:** Coordination with neighboring states and national agencies is crucial for maintaining supply chains, transporting personnel and equipment, and sharing expertise. **Challenges:** 1. **Infrastructure Overload:** The sheer scale of damage overwhelms local infrastructure, leading to delays in aid distribution and response efforts. 2. **Water Contamination:** Concerns about water quality hinder access to clean drinking water, exacerbating public health risks. 3. **Displacement and Shelter:** The large number of displaced individuals creates a significant shortage of available shelter options, straining resources and emergency services. **Lessons Learned:** 1. **Preparedness is Key:** Prioritizing evacuation planning, infrastructure hardening, and disaster preparedness can significantly reduce the impact of such events. 2. **Collaboration is Crucial:** Effective coordination between local, state, and federal agencies, as well as with neighboring states and national organizations, is essential for a successful response. 3. **Adaptation to Rising Sea Levels:** The increasing threat of sea-level rise requires a shift in disaster preparedness strategies, focusing on resilient infrastructure design and adaptive management practices. **Conclusion:** The simulated impact of Hurricane Ana highlights the devastating consequences of a Category 5 hurricane hitting Florida's eastern coast, exacerbated by rising sea levels. The emergency response efforts require significant resources, personnel, and coordination to address the complex challenges posed by this disaster. As we move forward, it is essential to prioritize preparedness, collaboration, and adaptation to mitigate the effects of these events and protect vulnerable communities." # Try it out yourself! ### Extraterrestrial Colony Planning **Prompt:** "Design a sustainable human colony for Mars, focusing on water resource management, energy production, and habitat construction using local materials. Include potential environmental challenges and solutions." ### Environmental Policy Formulation **Prompt:** "Evaluate the long-term ecological impacts of banning single-use plastics globally. Simulate the effects on marine life, pollution levels, and waste management systems over the next 50 years." ### Advanced Educational Tools **Prompt:** "Create an interactive simulation that demonstrates the water cycle on Earth, including the effects of deforestation and urbanization on water availability and quality in major river basins." ### Interactive Storytelling **Prompt:** "Generate a narrative where the user is a leader in a community facing severe drought conditions. Allow the user to make decisions about water usage, agricultural practices, and public policy, showing the consequences of each choice." ### Biodiversity Conservation Strategies **Prompt:** "Develop a conservation strategy for the Amazon rainforest, focusing on mitigating the effects of deforestation and climate change. Simulate various scenarios involving local communities and global stakeholders." ### Interstellar Communication Simulation **Prompt:** "Imagine a scenario where Earth receives a signal from a distant planet. Simulate a series of potential communications that could be exchanged, considering language barriers and the transmission delay over light-years." ### Bioengineering Solutions **Prompt:** "Propose a bioengineering project to create microbial life forms that can detoxify plastic waste in the ocean. Describe the genetic traits these organisms would need and simulate the potential ecological impact." ### Cross-Planetary Impact Studies **Prompt:** "Analyze how a supernova explosion in a neighboring star system could affect planetary systems in its vicinity, including potential impacts on Earth's magnetic field and atmosphere." ### Custom Scenario Development **Prompt:** "Allow the user to create a custom scenario involving an unexpected volcanic eruption in Iceland. Let the user set parameters like the eruption's size, duration, and ash distribution, then simulate the global climate and air travel impacts." These prompts are designed to maximize the utilization of the model's capabilities in various complex and interdisciplinary scenarios, making them useful for researchers, educators, policymakers, and enthusiasts interested in exploring these domains. ## Limitations and Biases While the `Llama-3-IMPACTS-2x8B-64k` model is designed to be a powerful tool for generating insightful content, it inherits limitations from its training data, which, though extensive, may not capture all possible scenarios or biases. Users should be aware of these limitations and consider them when interpreting the model's outputs, especially in decision-making contexts. ## Model Performance Initial tests indicate that the model performs exceptionally well in tasks that involve complex reasoning and generating innovative solutions based on the scenarios presented in the I.M.P.A.C.T.S dataset. Further evaluation and fine-tuning may be required to optimize performance for specific applications. The `Llama-3-IMPACTS-2x8B-64k` model represents an avenue that AI can use for exploring and solving complex problems across multiple domains. By leveraging the rich, interconnected dataset of I.M.P.A.C.T.S, it offers a valuable tool for researchers, innovators, and thinkers aiming to push the boundaries of what's possible in their fields.
[ "CRAFT" ]
thesven/Llama3-8B-SFT-SyntheticMedical-bnb-4bit
thesven
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "biology", "medical", "en", "dataset:thesven/SyntheticMedicalQA-4336", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-05-10T20:35:34Z
2024-05-25T14:55:07+00:00
18
0
--- datasets: - thesven/SyntheticMedicalQA-4336 language: - en library_name: transformers license: llama3 tags: - biology - medical --- # Llama3-8B-SFT-SyntheticMedical-bnb-4bit <!-- Provide a quick summary of what the model is/does. --> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6324ce4d5d0cf5c62c6e3c5a/ZMeYpx2-wRbla__Tf6fvr.png) ## Model Details ### Model Description Llama3-8B-SFT-SSyntheticMedical-bnb-4bit is trained using the SFT method via QLoRA on 4336 rows of medical data to enhance it's abilities in the realm of scientific anatomy. This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. ### Using the model with transformers ```python from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig model_name_or_path = "thesven/Llama3-8B-SFT-SyntheticMedical-bnb-4bit" # BitsAndBytesConfig for loading the model in 4-bit precision bnb_config = BitsAndBytesConfig( load_in_4bit=True, bnb_4bit_quant_type="nf4", bnb_4bit_compute_dtype="float16", ) tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) model = AutoModelForCausalLM.from_pretrained( model_name_or_path, device_map="auto", trust_remote_code=False, revision="main", quantization_config=bnb_config ) model.pad_token = model.config.eos_token_id prompt_template = ''' <|begin_of_text|><|start_header_id|>system<|end_header_id|> You are an expert in the field of anatomy, help explain its topics to me.<|eot_id|><|start_header_id|>user<|end_header_id|> What is the function of the hamstring?<|eot_id|><|start_header_id|>assistant<|end_header_id|> ''' input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.1, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(generated_text) ```
[ "MEDICAL DATA" ]
Hazy2028/pytorch_model-00001-of-00003.bin
Hazy2028
feature-extraction
[ "transformers", "pytorch", "internlm", "feature-extraction", "medical", "custom_code", "en", "zh", "ja", "fr", "ru", "es", "dataset:Henrychur/MMedC", "arxiv:2402.13963", "license:cc-by-4.0", "region:us" ]
2024-05-23T02:28:52Z
2024-05-23T20:33:29+00:00
18
0
--- datasets: - Henrychur/MMedC language: - en - zh - ja - fr - ru - es license: cc-by-4.0 tags: - medical --- # MMedLM [💻Github Repo](https://github.com/MAGIC-AI4Med/MMedLM) [🖨️arXiv Paper](https://arxiv.org/abs/2402.13963) The official model weights for "Towards Building Multilingual Language Model for Medicine". [MMedLM 2](https://huggingface.co/Henrychur/MMedLM2) has been released now. MMedLM2 is a more powerful multilingual medical foundation model, which has undergone the same medical data enhancement pipeline as MMedLM. ## Introduction This repo contains MMedLM, a multilingual medical foundation model with 7 billion parameters. MMedLM builds upon the foundation of InternLM and has been further pretrained on MMedC, a comprehensive multilingual medical corpus. This further pretraining enhances the model's medical-domain knowledge. The model underwent further pretraining on MMedC with the following hyperparameters: - Iterations: 15000 - Global batch size: 512 - Cutoff length: 2048 - Learning rate: 2e-5 The model can be loaded as follows: ```py import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("Henrychur/MMedLM", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("Henrychur/MMedLM", torch_dtype=torch.float16, trust_remote_code=True) ``` - Note that this is a foundation model that has not undergone instruction fine-tuning. - Testing has found that using the latest version of transformers will result in errors. It is recommended to use transformers==4.28.1. ## News [2024.2.21] Our pre-print paper is released ArXiv. Dive into our findings [here](https://arxiv.org/abs/2402.13963). [2024.2.20] We release [MMedLM](https://huggingface.co/Henrychur/MMedLM) and [MMedLM 2](https://huggingface.co/Henrychur/MMedLM2). With an auto-regressive continues training on MMedC, these models achieves superior performance compared to all other open-source models, even rivaling GPT-4 on MMedBench. [2023.2.20] We release [MMedC](https://huggingface.co/datasets/Henrychur/MMedC), a multilingual medical corpus containing 25.5B tokens. [2023.2.20] We release [MMedBench](https://huggingface.co/datasets/Henrychur/MMedBench), a new multilingual medical multi-choice question-answering benchmark with rationale. Check out the leaderboard [here](https://henrychur.github.io/MultilingualMedQA/). ## Evaluation on MMedBench The further pretrained MMedLM 2 showcast it's great performance in medical domain across different language. | Method | Size | Year | MMedC | MMedBench | English | Chinese | Japanese | French | Russian | Spanish | Avg. | |------------------|------|---------|-----------|-----------|----------------|----------------|----------------|----------------|----------------|----------------|----------------| | GPT-3.5 | - | 2022.12 | &#10007; | &#10007; | 56.88 | 52.29 | 34.63 | 32.48 | 66.36 | 66.06 | 51.47 | | GPT-4 | - | 2023.3 | &#10007; | &#10007; | 78.00 | 75.07 | 72.91 | 56.59 | 83.62 | 85.67 | 74.27 | | Gemini-1.0 pro | - | 2024.1 | &#10007; | &#10007; | 53.73 | 60.19 | 44.22 | 29.90 | 73.44 | 69.69 | 55.20 | | BLOOMZ | 7B | 2023.5 | &#10007; | trainset | 43.28 | 58.06 | 32.66 | 26.37 | 62.89 | 47.34 | 45.10 | | InternLM | 7B | 2023.7 | &#10007; | trainset | 44.07 | 64.62 | 37.19 | 24.92 | 58.20 | 44.97 | 45.67 | | Llama\ 2 | 7B | 2023.7 | &#10007; | trainset | 43.36 | 50.29 | 25.13 | 20.90 | 66.80 | 47.10 | 42.26 | | MedAlpaca | 7B | 2023.3 | &#10007; | trainset | 46.74 | 44.80 | 29.64 | 21.06 | 59.38 | 45.00 | 41.11 | | ChatDoctor | 7B | 2023.4 | &#10007; | trainset | 43.52 | 43.26 | 25.63 | 18.81 | 62.50 | 43.44 | 39.53 | | PMC-LLaMA | 7B | 2023.4 | &#10007; | trainset | 47.53 | 42.44 | 24.12 | 20.74 | 62.11 | 43.29 | 40.04 | | Mistral | 7B | 2023.10 | &#10007; | trainset | 61.74 | 71.10 | 44.72 | 48.71 | 74.22 | 63.86 | 60.73 | | InternLM\ 2 | 7B | 2024.2 | &#10007; | trainset | 57.27 | 77.55 | 47.74 | 41.00 | 68.36 | 59.59 | 58.59 | | MMedLM~(Ours) | 7B | - | &#10007; | trainset | 49.88 | 70.49 | 46.23 | 36.66 | 72.27 | 54.52 | 55.01 | | MMedLM\ 2~(Ours) | 7B | - | &#10007; | trainset | 61.74 | 80.01 | 61.81 | 52.09 | 80.47 | 67.65 | 67.30 | - GPT and Gemini is evluated under zero-shot setting through API - Open-source models first undergo training on the trainset of MMedBench before evaluate. ## Contact If you have any question, please feel free to contact [email protected]. ## Citation ``` @misc{qiu2024building, title={Towards Building Multilingual Language Model for Medicine}, author={Pengcheng Qiu and Chaoyi Wu and Xiaoman Zhang and Weixiong Lin and Haicheng Wang and Ya Zhang and Yanfeng Wang and Weidi Xie}, year={2024}, eprint={2402.13963}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ "MEDICAL DATA" ]
LiteLLMs/Phi-3-mini-128k-instruct-GGUF
LiteLLMs
text-generation
[ "gguf", "nlp", "code", "GGUF", "text-generation", "en", "license:mit", "endpoints_compatible", "region:us", "conversational" ]
2024-05-23T12:52:21Z
2024-05-23T21:08:31+00:00
18
0
--- language: - en license: mit license_link: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/LICENSE pipeline_tag: text-generation tags: - nlp - code - GGUF widget: - messages: - role: user content: Can you provide ways to eat combinations of bananas and dragonfruits? quantized_by: andrijdavid --- # Phi-3-mini-128k-instruct-GGUF - Original model: [Phi-3-mini-128k-instruct](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) <!-- description start --> ## Description This repo contains GGUF format model files for [Phi-3-mini-128k-instruct](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct). <!-- description end --> <!-- README_GGUF.md-about-gguf start --> ### About GGUF GGUF is a new format introduced by the llama.cpp team on August 21st 2023. It is a replacement for GGML, which is no longer supported by llama.cpp. Here is an incomplete list of clients and libraries that are known to support GGUF: * [llama.cpp](https://github.com/ggerganov/llama.cpp). This is the source project for GGUF, providing both a Command Line Interface (CLI) and a server option. * [text-generation-webui](https://github.com/oobabooga/text-generation-webui), Known as the most widely used web UI, this project boasts numerous features and powerful extensions, and supports GPU acceleration. * [Ollama](https://github.com/jmorganca/ollama) Ollama is a lightweight and extensible framework designed for building and running language models locally. It features a simple API for creating, managing, and executing models, along with a library of pre-built models for use in various applications​ * [KoboldCpp](https://github.com/LostRuins/koboldcpp), A comprehensive web UI offering GPU acceleration across all platforms and architectures, particularly renowned for storytelling. * [GPT4All](https://gpt4all.io), This is a free and open source GUI that runs locally, supporting Windows, Linux, and macOS with full GPU acceleration. * [LM Studio](https://lmstudio.ai/) An intuitive and powerful local GUI for Windows and macOS (Silicon), featuring GPU acceleration. * [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui). A notable web UI with a variety of unique features, including a comprehensive model library for easy model selection. * [Faraday.dev](https://faraday.dev/), An attractive, user-friendly character-based chat GUI for Windows and macOS (both Silicon and Intel), also offering GPU acceleration. * [llama-cpp-python](https://github.com/abetlen/llama-cpp-python), A Python library equipped with GPU acceleration, LangChain support, and an OpenAI-compatible API server. * [candle](https://github.com/huggingface/candle), A Rust-based ML framework focusing on performance, including GPU support, and designed for ease of use. * [ctransformers](https://github.com/marella/ctransformers), A Python library featuring GPU acceleration, LangChain support, and an OpenAI-compatible AI server. * [localGPT](https://github.com/PromtEngineer/localGPT) An open-source initiative enabling private conversations with documents. <!-- README_GGUF.md-about-gguf end --> <!-- compatibility_gguf start --> ## Explanation of quantisation methods <details> <summary>Click to see details</summary> The new methods available are: * GGML_TYPE_Q2_K - "type-1" 2-bit quantization in super-blocks containing 16 blocks, each block having 16 weight. Block scales and mins are quantized with 4 bits. This ends up effectively using 2.5625 bits per weight (bpw) * GGML_TYPE_Q3_K - "type-0" 3-bit quantization in super-blocks containing 16 blocks, each block having 16 weights. Scales are quantized with 6 bits. This end up using 3.4375 bpw. * GGML_TYPE_Q4_K - "type-1" 4-bit quantization in super-blocks containing 8 blocks, each block having 32 weights. Scales and mins are quantized with 6 bits. This ends up using 4.5 bpw. * GGML_TYPE_Q5_K - "type-1" 5-bit quantization. Same super-block structure as GGML_TYPE_Q4_K resulting in 5.5 bpw * GGML_TYPE_Q6_K - "type-0" 6-bit quantization. Super-blocks with 16 blocks, each block having 16 weights. Scales are quantized with 8 bits. This ends up using 6.5625 bpw. </details> <!-- compatibility_gguf end --> <!-- README_GGUF.md-how-to-download start --> ## How to download GGUF files **Note for manual downloaders:** You almost never want to clone the entire repo! Multiple different quantisation formats are provided, and most users only want to pick and download a single folder. The following clients/libraries will automatically download models for you, providing a list of available models to choose from: * LM Studio * LoLLMS Web UI * Faraday.dev ### In `text-generation-webui` Under Download Model, you can enter the model repo: LiteLLMs/Phi-3-mini-128k-instruct-GGUF and below it, a specific filename to download, such as: Q4_0/Q4_0-00001-of-00009.gguf. Then click Download. ### On the command line, including multiple files at once I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` Then you can download any individual model file to the current directory, at high speed, with a command like this: ```shell huggingface-cli download LiteLLMs/Phi-3-mini-128k-instruct-GGUF Q4_0/Q4_0-00001-of-00009.gguf --local-dir . --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage (click to read)</summary> You can also download multiple files at once with a pattern: ```shell huggingface-cli download LiteLLMs/Phi-3-mini-128k-instruct-GGUF --local-dir . --local-dir-use-symlinks False --include='*Q4_K*gguf' ``` For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install huggingface_hub[hf_transfer] ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download LiteLLMs/Phi-3-mini-128k-instruct-GGUF Q4_0/Q4_0-00001-of-00009.gguf --local-dir . --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> <!-- README_GGUF.md-how-to-download end --> <!-- README_GGUF.md-how-to-run start --> ## Example `llama.cpp` command Make sure you are using `llama.cpp` from commit [d0cee0d](https://github.com/ggerganov/llama.cpp/commit/d0cee0d36d5be95a0d9088b674dbb27354107221) or later. ```shell ./main -ngl 35 -m Q4_0/Q4_0-00001-of-00009.gguf --color -c 8192 --temp 0.7 --repeat_penalty 1.1 -n -1 -p "<PROMPT>" ``` Change `-ngl 32` to the number of layers to offload to GPU. Remove it if you don't have GPU acceleration. Change `-c 8192` to the desired sequence length. For extended sequence models - eg 8K, 16K, 32K - the necessary RoPE scaling parameters are read from the GGUF file and set by llama.cpp automatically. Note that longer sequence lengths require much more resources, so you may need to reduce this value. If you want to have a chat-style conversation, replace the `-p <PROMPT>` argument with `-i -ins` For other parameters and how to use them, please refer to [the llama.cpp documentation](https://github.com/ggerganov/llama.cpp/blob/master/examples/main/README.md) ## How to run in `text-generation-webui` Further instructions can be found in the text-generation-webui documentation, here: [text-generation-webui/docs/04 ‐ Model Tab.md](https://github.com/oobabooga/text-generation-webui/blob/main/docs/04%20%E2%80%90%20Model%20Tab.md#llamacpp). ## How to run from Python code You can use GGUF models from Python using the [llama-cpp-python](https://github.com/abetlen/llama-cpp-python) or [ctransformers](https://github.com/marella/ctransformers) libraries. Note that at the time of writing (Nov 27th 2023), ctransformers has not been updated for some time and is not compatible with some recent models. Therefore I recommend you use llama-cpp-python. ### How to load this model in Python code, using llama-cpp-python For full documentation, please see: [llama-cpp-python docs](https://abetlen.github.io/llama-cpp-python/). #### First install the package Run one of the following commands, according to your system: ```shell # Base ctransformers with no GPU acceleration pip install llama-cpp-python # With NVidia CUDA acceleration CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install llama-cpp-python # Or with OpenBLAS acceleration CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" pip install llama-cpp-python # Or with CLBLast acceleration CMAKE_ARGS="-DLLAMA_CLBLAST=on" pip install llama-cpp-python # Or with AMD ROCm GPU acceleration (Linux only) CMAKE_ARGS="-DLLAMA_HIPBLAS=on" pip install llama-cpp-python # Or with Metal GPU acceleration for macOS systems only CMAKE_ARGS="-DLLAMA_METAL=on" pip install llama-cpp-python # In windows, to set the variables CMAKE_ARGS in PowerShell, follow this format; eg for NVidia CUDA: $env:CMAKE_ARGS = "-DLLAMA_OPENBLAS=on" pip install llama-cpp-python ``` #### Simple llama-cpp-python example code ```python from llama_cpp import Llama # Set gpu_layers to the number of layers to offload to GPU. Set to 0 if no GPU acceleration is available on your system. llm = Llama( model_path="./Q4_0/Q4_0-00001-of-00009.gguf", # Download the model file first n_ctx=32768, # The max sequence length to use - note that longer sequence lengths require much more resources n_threads=8, # The number of CPU threads to use, tailor to your system and the resulting performance n_gpu_layers=35 # The number of layers to offload to GPU, if you have GPU acceleration available ) # Simple inference example output = llm( "<PROMPT>", # Prompt max_tokens=512, # Generate up to 512 tokens stop=["</s>"], # Example stop token - not necessarily correct for this specific model! Please check before using. echo=True # Whether to echo the prompt ) # Chat Completion API llm = Llama(model_path="./Q4_0/Q4_0-00001-of-00009.gguf", chat_format="llama-2") # Set chat_format according to the model you are using llm.create_chat_completion( messages = [ {"role": "system", "content": "You are a story writing assistant."}, { "role": "user", "content": "Write a story about llamas." } ] ) ``` ## How to use with LangChain Here are guides on using llama-cpp-python and ctransformers with LangChain: * [LangChain + llama-cpp-python](https://python.langchain.com/docs/integrations/llms/llamacpp) * [LangChain + ctransformers](https://python.langchain.com/docs/integrations/providers/ctransformers) <!-- README_GGUF.md-how-to-run end --> <!-- footer end --> <!-- original-model-card start --> # Original model card: Phi-3-mini-128k-instruct ## Model Summary The Phi-3-Mini-128K-Instruct is a 3.8 billion-parameter, lightweight, state-of-the-art open model trained using the Phi-3 datasets. This dataset includes both synthetic data and filtered publicly available website data, with an emphasis on high-quality and reasoning-dense properties. The model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support. After initial training, the model underwent a post-training process that involved supervised fine-tuning and direct preference optimization to enhance its ability to follow instructions and adhere to safety measures. When evaluated against benchmarks that test common sense, language understanding, mathematics, coding, long-term context, and logical reasoning, the Phi-3 Mini-128K-Instruct demonstrated robust and state-of-the-art performance among models with fewer than 13 billion parameters. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook) | | Short Context | Long Context | | | -- | | -- | | ----- | | MMLU <br>5-Shot | 68.1 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 | | HellaSwag <br> 5-Shot | 74.5 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 | | ANLI <br> 7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 | | GSM-8K <br> 0-Shot; CoT | 83.6 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 | | MedQA <br> 2-Shot | 55.3 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 | | AGIEval <br> 0-Shot | 36.9 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 | | TriviaQA <br> 5-Shot | 57.1 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 | | Arc-C <br> 10-Shot | 84.0 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 | | Arc-E <br> 10-Shot | 95.2 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 | | PIQA <br> 5-Shot | 83.6 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 | | SociQA <br> 5-Shot | 76.1 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 | | BigBench-Hard <br> 0-Shot | 71.5 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 | | WinoGrande <br> 5-Shot | 72.5 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65.0 | 62.0 | 68.8 | | OpenBookQA <br> 10-Shot | 80.6 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 | | BoolQ <br> 0-Shot | 78.7 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 | | CommonSenseQA <br> 10-Shot | 78.0 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 | | TruthfulQA <br> 10-Shot | 63.2 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 | | HumanEval <br> 0-Shot | 57.9 | 59.1 | 54.7 | 47.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 | | MBPP <br> 3-Shot | 62.5 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 | ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: * NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager" * Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128K](https://aka.ms/phi3-mini-128k-instruct-onnx) ## Cross Platform Support ONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-128K-Instruct ONNX model [here](https://aka.ms/phi3-mini-128k-instruct-onnx). Optimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. Along with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-128k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies. <!-- original-model-card end -->
[ "MEDQA" ]
kadirnar/yolov10m
kadirnar
object-detection
[ "yolov10", "object-detection", "computer-vision", "pypi", "dataset:detection-datasets/coco", "arxiv:2405.14458", "license:agpl-3.0", "region:us" ]
2024-05-27T12:49:07Z
2024-05-27T13:07:02+00:00
18
1
--- datasets: - detection-datasets/coco license: agpl-3.0 tags: - object-detection - computer-vision - yolov10 - pypi --- ### Model Description [YOLOv10: Trainable bag-of-freebies sets new state-of-the-art for real-time object detectors](https://arxiv.org/abs/2405.14458v1) [Paper Repo: Implementation of paper - YOLOv10](https://github.com/THU-MIG/yolov10) ### Installation ``` pip install supervision git+https://github.com/THU-MIG/yolov10.git ``` ### Yolov10 Inference ```python from ultralytics import YOLOv10 import supervision as sv import cv2 def attempt_download_from_hub(repo_id, hf_token=None): # https://github.com/fcakyon/yolov5-pip/blob/main/yolov5/utils/downloads.py from huggingface_hub import hf_hub_download, list_repo_files from huggingface_hub.utils._errors import RepositoryNotFoundError from huggingface_hub.utils._validators import HFValidationError try: repo_files = list_repo_files(repo_id=repo_id, repo_type='model', token=hf_token) model_file = [f for f in repo_files if f.endswith('.pt')][0] file = hf_hub_download( repo_id=repo_id, filename=model_file, repo_type='model', token=hf_token, ) return file except (RepositoryNotFoundError, HFValidationError): return None MODEL_PATH = attempt_download_from_hub("kadirnar/yolov10x", hf_token="hf_token") IMAGE_PATH = 'dog.jpeg' model = YOLOv10(MODEL_PATH) image = cv2.imread(IMAGE_PATH) results = model(source=image, conf=0.25, verbose=False)[0] detections = sv.Detections.from_ultralytics(results) box_annotator = sv.BoxAnnotator() category_dict = { 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush' } labels = [ f"{category_dict[class_id]} {confidence:.2f}" for class_id, confidence in zip(detections.class_id, detections.confidence) ] annotated_image = box_annotator.annotate( image.copy(), detections=detections, labels=labels ) cv2.imwrite('annotated_dog.jpeg', annotated_image) ``` ### BibTeX Entry and Citation Info ``` @misc{wang2024yolov10, title={YOLOv10: Real-Time End-to-End Object Detection}, author={Ao Wang and Hui Chen and Lihao Liu and Kai Chen and Zijia Lin and Jungong Han and Guiguang Ding}, year={2024}, eprint={2405.14458}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "BEAR" ]
nielsr/yolov10l
nielsr
null
[ "transformers", "safetensors", "pytorch_model_hub_mixin", "model_hub_mixin", "object detection", "arxiv:2405.14458", "endpoints_compatible", "region:us" ]
2024-06-01T08:09:07Z
2024-06-01T09:20:42+00:00
18
0
--- tags: - pytorch_model_hub_mixin - model_hub_mixin - object detection --- This model has been pushed to the Hub using the [PytorchModelHubMixin](https://huggingface.co/docs/huggingface_hub/package_reference/mixins#huggingface_hub.PyTorchModelHubMixin) integration. ## Installation First install the [YOLOv10 Github repository](https://github.com/THU-MIG/yolov10) along with supervision which provides some nice utilities for bounding box processing. ``` pip install git+https://github.com/nielsrogge/yolov10.git@feature/add_hf supervision ``` ## Usage One can perform inference as follows: ```python from ultralytics import YOLOv10 import supervision as sv from PIL import Image import requests # load model model = YOLOv10.from_pretrained("nielsr/yolov10l") # load image url = 'http://images.cocodataset.org/val2017/000000039769.jpg' image = Image.open(requests.get(url, stream=True).raw) image = np.array(image) # perform inference results = model(source=image, conf=0.25, verbose=False)[0] detections = sv.Detections.from_ultralytics(results) box_annotator = sv.BoxAnnotator() category_dict = { 0: 'person', 1: 'bicycle', 2: 'car', 3: 'motorcycle', 4: 'airplane', 5: 'bus', 6: 'train', 7: 'truck', 8: 'boat', 9: 'traffic light', 10: 'fire hydrant', 11: 'stop sign', 12: 'parking meter', 13: 'bench', 14: 'bird', 15: 'cat', 16: 'dog', 17: 'horse', 18: 'sheep', 19: 'cow', 20: 'elephant', 21: 'bear', 22: 'zebra', 23: 'giraffe', 24: 'backpack', 25: 'umbrella', 26: 'handbag', 27: 'tie', 28: 'suitcase', 29: 'frisbee', 30: 'skis', 31: 'snowboard', 32: 'sports ball', 33: 'kite', 34: 'baseball bat', 35: 'baseball glove', 36: 'skateboard', 37: 'surfboard', 38: 'tennis racket', 39: 'bottle', 40: 'wine glass', 41: 'cup', 42: 'fork', 43: 'knife', 44: 'spoon', 45: 'bowl', 46: 'banana', 47: 'apple', 48: 'sandwich', 49: 'orange', 50: 'broccoli', 51: 'carrot', 52: 'hot dog', 53: 'pizza', 54: 'donut', 55: 'cake', 56: 'chair', 57: 'couch', 58: 'potted plant', 59: 'bed', 60: 'dining table', 61: 'toilet', 62: 'tv', 63: 'laptop', 64: 'mouse', 65: 'remote', 66: 'keyboard', 67: 'cell phone', 68: 'microwave', 69: 'oven', 70: 'toaster', 71: 'sink', 72: 'refrigerator', 73: 'book', 74: 'clock', 75: 'vase', 76: 'scissors', 77: 'teddy bear', 78: 'hair drier', 79: 'toothbrush' } labels = [ f"{category_dict[class_id]} {confidence:.2f}" for class_id, confidence in zip(detections.class_id, detections.confidence) ] annotated_image = box_annotator.annotate( image.copy(), detections=detections, labels=labels ) Image.fromarray(annotated_image) ``` This shows the following: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f1158120c833276f61f1a84/hjN882Pbbb9Y13KAO__Wd.png) https://cdn-uploads.huggingface.co/production/uploads/5f1158120c833276f61f1a84/IL9mL4_WUdcSxRQ7AsrTT.png) ### BibTeX Entry and Citation Info ``` @misc{wang2024yolov10, title={YOLOv10: Real-Time End-to-End Object Detection}, author={Ao Wang and Hui Chen and Lihao Liu and Kai Chen and Zijia Lin and Jungong Han and Guiguang Ding}, year={2024}, eprint={2405.14458}, archivePrefix={arXiv}, primaryClass={cs.CV} } ```
[ "BEAR" ]
Chan-Y/Cyber-Stable-Realistic
Chan-Y
text-to-image
[ "diffusers", "safetensors", "art", "license:mit", "diffusers:StableDiffusion3Pipeline", "region:us" ]
2024-06-30T18:41:14Z
2024-07-03T17:46:26+00:00
18
1
--- library_name: diffusers license: mit tags: - art --- ### Model Description This model combines the capabilities of the stable diffusion medium model with a Civit AI text-to-image model fine-tuned on a custom dataset of high-quality images. It aims to generate realistic and detailed images based on textual prompts. ![batman](imgs/007_resized.png) - **Developed by:** [M.Cihan Yalçın](https://www.linkedin.com/in/chanyalcin/) - **Model type:** Stable Diffusion - **License:** MIT - **Finetuned from models:** - [stabilityai/stable-diffusion-3-medium-diffusers](https://huggingface.co/stabilityai/stable-diffusion-3-medium) - [CyberRealistic](https://civitai.com/models/15003/cyberrealistic) ![008.png](imgs/008.png) ![009.png](imgs/009.png) ## Uses ### Direct Use ```python from diffusers import DiffusionPipeline import torch pipeline = DiffusionPipeline.from_pretrained( "Chan-Y/Cyber-Stable-Realistic", torch_dtype=torch.float16).to("cuda") prompt = "A bowl of ramen shaped like a cute kawaii bear, by Feng Zikai" negative = "" image = pipeline(prompt, negative_prompt=negative).images[0] image ``` ## Bias, Risks, and Limitations - The model may not always perfectly capture highly complex or abstract concepts. - The quality of the output can be influenced by the specificity and clarity of the prompt. - Ethical considerations should be taken into account when generating images to avoid misuse. ## Finetuning Details ### Finetuning Data - Model is finetuned with sentetic high quality images collected from high performance Text-to-Image models.
[ "BEAR" ]
minchyeom/MemeGPT
minchyeom
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "meme", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-07-15T06:50:06Z
2024-07-16T02:16:14+00:00
18
2
--- language: - en library_name: transformers license: apache-2.0 tags: - meme --- This is NOT MemGPT, this is **Meme**GPT. When using it, please put this as system prompt: ``` You are a witty AI assistant specializing in joke creation. Always respond with a joke, regardless of the input or topic. Craft jokes suitable for a general audience. Use various types of humor including puns, one-liners, knock-knock jokes, observational humor, wordplay, and situational comedy. Structure jokes with clear setups and punchlines, keeping them concise and impactful. Incorporate given topics into your jokes when possible. Reframe questions as jokes while addressing their essence. Employ misdirection and surprise to enhance humor. Never explain jokes or break character – always stay in joke mode. Provide unique jokes for multiple responses. Be creative and original, avoiding common, overused jokes. Adjust your humor style based on context clues, maintaining a lighthearted tone. Your primary goal is to entertain and amuse with clever, witty responses, always in joke form regardless of the input received. ```
[ "CRAFT" ]
niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF
niancheng
sentence-similarity
[ "sentence-transformers", "gguf", "mteb", "transformers", "Qwen2", "sentence-similarity", "llama-cpp", "gguf-my-repo", "base_model:Alibaba-NLP/gte-Qwen2-7B-instruct", "base_model:quantized:Alibaba-NLP/gte-Qwen2-7B-instruct", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us", "conversational" ]
2024-07-15T08:07:22Z
2024-07-15T08:07:43+00:00
18
0
--- base_model: Alibaba-NLP/gte-Qwen2-7B-instruct license: apache-2.0 tags: - mteb - sentence-transformers - transformers - Qwen2 - sentence-similarity - llama-cpp - gguf-my-repo model-index: - name: gte-qwen2-7B-instruct results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 91.31343283582089 - type: ap value: 67.64251402604096 - type: f1 value: 87.53372530755692 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 97.497825 - type: ap value: 96.30329547047529 - type: f1 value: 97.49769793778039 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 62.564 - type: f1 value: 60.975777935041066 - task: type: Retrieval dataset: name: MTEB ArguAna type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: map_at_1 value: 36.486000000000004 - type: map_at_10 value: 54.842 - type: map_at_100 value: 55.206999999999994 - type: map_at_1000 value: 55.206999999999994 - type: map_at_3 value: 49.893 - type: map_at_5 value: 53.105000000000004 - type: mrr_at_1 value: 37.34 - type: mrr_at_10 value: 55.143 - type: mrr_at_100 value: 55.509 - type: mrr_at_1000 value: 55.509 - type: mrr_at_3 value: 50.212999999999994 - type: mrr_at_5 value: 53.432 - type: ndcg_at_1 value: 36.486000000000004 - type: ndcg_at_10 value: 64.273 - type: ndcg_at_100 value: 65.66199999999999 - type: ndcg_at_1000 value: 65.66199999999999 - type: ndcg_at_3 value: 54.352999999999994 - type: ndcg_at_5 value: 60.131 - type: precision_at_1 value: 36.486000000000004 - type: precision_at_10 value: 9.395000000000001 - type: precision_at_100 value: 0.996 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.428 - type: precision_at_5 value: 16.259 - type: recall_at_1 value: 36.486000000000004 - type: recall_at_10 value: 93.95400000000001 - type: recall_at_100 value: 99.644 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 67.283 - type: recall_at_5 value: 81.294 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 56.461169803700564 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 51.73600434466286 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 67.57827065898053 - type: mrr value: 79.08136569493911 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 83.53324575999243 - type: cos_sim_spearman value: 81.37173362822374 - type: euclidean_pearson value: 82.19243335103444 - type: euclidean_spearman value: 81.33679307304334 - type: manhattan_pearson value: 82.38752665975699 - type: manhattan_spearman value: 81.31510583189689 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.56818181818181 - type: f1 value: 87.25826722019875 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 50.09239610327673 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 46.64733054606282 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: map_at_1 value: 33.997 - type: map_at_10 value: 48.176 - type: map_at_100 value: 49.82 - type: map_at_1000 value: 49.924 - type: map_at_3 value: 43.626 - type: map_at_5 value: 46.275 - type: mrr_at_1 value: 42.059999999999995 - type: mrr_at_10 value: 53.726 - type: mrr_at_100 value: 54.398 - type: mrr_at_1000 value: 54.416 - type: mrr_at_3 value: 50.714999999999996 - type: mrr_at_5 value: 52.639 - type: ndcg_at_1 value: 42.059999999999995 - type: ndcg_at_10 value: 55.574999999999996 - type: ndcg_at_100 value: 60.744 - type: ndcg_at_1000 value: 61.85699999999999 - type: ndcg_at_3 value: 49.363 - type: ndcg_at_5 value: 52.44 - type: precision_at_1 value: 42.059999999999995 - type: precision_at_10 value: 11.101999999999999 - type: precision_at_100 value: 1.73 - type: precision_at_1000 value: 0.218 - type: precision_at_3 value: 24.464 - type: precision_at_5 value: 18.026 - type: recall_at_1 value: 33.997 - type: recall_at_10 value: 70.35900000000001 - type: recall_at_100 value: 91.642 - type: recall_at_1000 value: 97.977 - type: recall_at_3 value: 52.76 - type: recall_at_5 value: 61.148 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval type: BeIR/cqadupstack config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: map_at_1 value: 35.884 - type: map_at_10 value: 48.14 - type: map_at_100 value: 49.5 - type: map_at_1000 value: 49.63 - type: map_at_3 value: 44.646 - type: map_at_5 value: 46.617999999999995 - type: mrr_at_1 value: 44.458999999999996 - type: mrr_at_10 value: 53.751000000000005 - type: mrr_at_100 value: 54.37800000000001 - type: mrr_at_1000 value: 54.415 - type: mrr_at_3 value: 51.815 - type: mrr_at_5 value: 52.882 - type: ndcg_at_1 value: 44.458999999999996 - type: ndcg_at_10 value: 54.157 - type: ndcg_at_100 value: 58.362 - type: ndcg_at_1000 value: 60.178 - type: ndcg_at_3 value: 49.661 - type: ndcg_at_5 value: 51.74999999999999 - type: precision_at_1 value: 44.458999999999996 - type: precision_at_10 value: 10.248 - type: precision_at_100 value: 1.5890000000000002 - type: precision_at_1000 value: 0.207 - type: precision_at_3 value: 23.928 - type: precision_at_5 value: 16.878999999999998 - type: recall_at_1 value: 35.884 - type: recall_at_10 value: 64.798 - type: recall_at_100 value: 82.345 - type: recall_at_1000 value: 93.267 - type: recall_at_3 value: 51.847 - type: recall_at_5 value: 57.601 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval type: BeIR/cqadupstack config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: map_at_1 value: 39.383 - type: map_at_10 value: 53.714 - type: map_at_100 value: 54.838 - type: map_at_1000 value: 54.87800000000001 - type: map_at_3 value: 50.114999999999995 - type: map_at_5 value: 52.153000000000006 - type: mrr_at_1 value: 45.016 - type: mrr_at_10 value: 56.732000000000006 - type: mrr_at_100 value: 57.411 - type: mrr_at_1000 value: 57.431 - type: mrr_at_3 value: 54.044000000000004 - type: mrr_at_5 value: 55.639 - type: ndcg_at_1 value: 45.016 - type: ndcg_at_10 value: 60.228 - type: ndcg_at_100 value: 64.277 - type: ndcg_at_1000 value: 65.07 - type: ndcg_at_3 value: 54.124 - type: ndcg_at_5 value: 57.147000000000006 - type: precision_at_1 value: 45.016 - type: precision_at_10 value: 9.937 - type: precision_at_100 value: 1.288 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 24.471999999999998 - type: precision_at_5 value: 16.991 - type: recall_at_1 value: 39.383 - type: recall_at_10 value: 76.175 - type: recall_at_100 value: 93.02 - type: recall_at_1000 value: 98.60900000000001 - type: recall_at_3 value: 60.265 - type: recall_at_5 value: 67.46600000000001 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval type: BeIR/cqadupstack config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: map_at_1 value: 27.426000000000002 - type: map_at_10 value: 37.397000000000006 - type: map_at_100 value: 38.61 - type: map_at_1000 value: 38.678000000000004 - type: map_at_3 value: 34.150999999999996 - type: map_at_5 value: 36.137 - type: mrr_at_1 value: 29.944 - type: mrr_at_10 value: 39.654 - type: mrr_at_100 value: 40.638000000000005 - type: mrr_at_1000 value: 40.691 - type: mrr_at_3 value: 36.817 - type: mrr_at_5 value: 38.524 - type: ndcg_at_1 value: 29.944 - type: ndcg_at_10 value: 43.094 - type: ndcg_at_100 value: 48.789 - type: ndcg_at_1000 value: 50.339999999999996 - type: ndcg_at_3 value: 36.984 - type: ndcg_at_5 value: 40.248 - type: precision_at_1 value: 29.944 - type: precision_at_10 value: 6.78 - type: precision_at_100 value: 1.024 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 15.895000000000001 - type: precision_at_5 value: 11.39 - type: recall_at_1 value: 27.426000000000002 - type: recall_at_10 value: 58.464000000000006 - type: recall_at_100 value: 84.193 - type: recall_at_1000 value: 95.52000000000001 - type: recall_at_3 value: 42.172 - type: recall_at_5 value: 50.101 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval type: BeIR/cqadupstack config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: map_at_1 value: 19.721 - type: map_at_10 value: 31.604 - type: map_at_100 value: 32.972 - type: map_at_1000 value: 33.077 - type: map_at_3 value: 27.218999999999998 - type: map_at_5 value: 29.53 - type: mrr_at_1 value: 25.0 - type: mrr_at_10 value: 35.843 - type: mrr_at_100 value: 36.785000000000004 - type: mrr_at_1000 value: 36.842000000000006 - type: mrr_at_3 value: 32.193 - type: mrr_at_5 value: 34.264 - type: ndcg_at_1 value: 25.0 - type: ndcg_at_10 value: 38.606 - type: ndcg_at_100 value: 44.272 - type: ndcg_at_1000 value: 46.527 - type: ndcg_at_3 value: 30.985000000000003 - type: ndcg_at_5 value: 34.43 - type: precision_at_1 value: 25.0 - type: precision_at_10 value: 7.811 - type: precision_at_100 value: 1.203 - type: precision_at_1000 value: 0.15 - type: precision_at_3 value: 15.423 - type: precision_at_5 value: 11.791 - type: recall_at_1 value: 19.721 - type: recall_at_10 value: 55.625 - type: recall_at_100 value: 79.34400000000001 - type: recall_at_1000 value: 95.208 - type: recall_at_3 value: 35.19 - type: recall_at_5 value: 43.626 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval type: BeIR/cqadupstack config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: map_at_1 value: 33.784 - type: map_at_10 value: 47.522 - type: map_at_100 value: 48.949999999999996 - type: map_at_1000 value: 49.038 - type: map_at_3 value: 43.284 - type: map_at_5 value: 45.629 - type: mrr_at_1 value: 41.482 - type: mrr_at_10 value: 52.830999999999996 - type: mrr_at_100 value: 53.559999999999995 - type: mrr_at_1000 value: 53.588 - type: mrr_at_3 value: 50.016000000000005 - type: mrr_at_5 value: 51.614000000000004 - type: ndcg_at_1 value: 41.482 - type: ndcg_at_10 value: 54.569 - type: ndcg_at_100 value: 59.675999999999995 - type: ndcg_at_1000 value: 60.989000000000004 - type: ndcg_at_3 value: 48.187000000000005 - type: ndcg_at_5 value: 51.183 - type: precision_at_1 value: 41.482 - type: precision_at_10 value: 10.221 - type: precision_at_100 value: 1.486 - type: precision_at_1000 value: 0.17500000000000002 - type: precision_at_3 value: 23.548 - type: precision_at_5 value: 16.805 - type: recall_at_1 value: 33.784 - type: recall_at_10 value: 69.798 - type: recall_at_100 value: 90.098 - type: recall_at_1000 value: 98.176 - type: recall_at_3 value: 52.127 - type: recall_at_5 value: 59.861 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval type: BeIR/cqadupstack config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: map_at_1 value: 28.038999999999998 - type: map_at_10 value: 41.904 - type: map_at_100 value: 43.36 - type: map_at_1000 value: 43.453 - type: map_at_3 value: 37.785999999999994 - type: map_at_5 value: 40.105000000000004 - type: mrr_at_1 value: 35.046 - type: mrr_at_10 value: 46.926 - type: mrr_at_100 value: 47.815000000000005 - type: mrr_at_1000 value: 47.849000000000004 - type: mrr_at_3 value: 44.273 - type: mrr_at_5 value: 45.774 - type: ndcg_at_1 value: 35.046 - type: ndcg_at_10 value: 48.937000000000005 - type: ndcg_at_100 value: 54.544000000000004 - type: ndcg_at_1000 value: 56.069 - type: ndcg_at_3 value: 42.858000000000004 - type: ndcg_at_5 value: 45.644 - type: precision_at_1 value: 35.046 - type: precision_at_10 value: 9.452 - type: precision_at_100 value: 1.429 - type: precision_at_1000 value: 0.173 - type: precision_at_3 value: 21.346999999999998 - type: precision_at_5 value: 15.342 - type: recall_at_1 value: 28.038999999999998 - type: recall_at_10 value: 64.59700000000001 - type: recall_at_100 value: 87.735 - type: recall_at_1000 value: 97.41300000000001 - type: recall_at_3 value: 47.368 - type: recall_at_5 value: 54.93900000000001 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: BeIR/cqadupstack config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: map_at_1 value: 28.17291666666667 - type: map_at_10 value: 40.025749999999995 - type: map_at_100 value: 41.39208333333333 - type: map_at_1000 value: 41.499249999999996 - type: map_at_3 value: 36.347 - type: map_at_5 value: 38.41391666666667 - type: mrr_at_1 value: 33.65925 - type: mrr_at_10 value: 44.085499999999996 - type: mrr_at_100 value: 44.94116666666667 - type: mrr_at_1000 value: 44.9855 - type: mrr_at_3 value: 41.2815 - type: mrr_at_5 value: 42.91491666666666 - type: ndcg_at_1 value: 33.65925 - type: ndcg_at_10 value: 46.430833333333325 - type: ndcg_at_100 value: 51.761 - type: ndcg_at_1000 value: 53.50899999999999 - type: ndcg_at_3 value: 40.45133333333333 - type: ndcg_at_5 value: 43.31483333333334 - type: precision_at_1 value: 33.65925 - type: precision_at_10 value: 8.4995 - type: precision_at_100 value: 1.3210000000000004 - type: precision_at_1000 value: 0.16591666666666666 - type: precision_at_3 value: 19.165083333333335 - type: precision_at_5 value: 13.81816666666667 - type: recall_at_1 value: 28.17291666666667 - type: recall_at_10 value: 61.12624999999999 - type: recall_at_100 value: 83.97266666666667 - type: recall_at_1000 value: 95.66550000000001 - type: recall_at_3 value: 44.661249999999995 - type: recall_at_5 value: 51.983333333333334 - type: map_at_1 value: 17.936 - type: map_at_10 value: 27.399 - type: map_at_100 value: 28.632 - type: map_at_1000 value: 28.738000000000003 - type: map_at_3 value: 24.456 - type: map_at_5 value: 26.06 - type: mrr_at_1 value: 19.224 - type: mrr_at_10 value: 28.998 - type: mrr_at_100 value: 30.11 - type: mrr_at_1000 value: 30.177 - type: mrr_at_3 value: 26.247999999999998 - type: mrr_at_5 value: 27.708 - type: ndcg_at_1 value: 19.224 - type: ndcg_at_10 value: 32.911 - type: ndcg_at_100 value: 38.873999999999995 - type: ndcg_at_1000 value: 41.277 - type: ndcg_at_3 value: 27.142 - type: ndcg_at_5 value: 29.755 - type: precision_at_1 value: 19.224 - type: precision_at_10 value: 5.6930000000000005 - type: precision_at_100 value: 0.9259999999999999 - type: precision_at_1000 value: 0.126 - type: precision_at_3 value: 12.138 - type: precision_at_5 value: 8.909 - type: recall_at_1 value: 17.936 - type: recall_at_10 value: 48.096 - type: recall_at_100 value: 75.389 - type: recall_at_1000 value: 92.803 - type: recall_at_3 value: 32.812999999999995 - type: recall_at_5 value: 38.851 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval type: BeIR/cqadupstack config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: map_at_1 value: 24.681 - type: map_at_10 value: 34.892 - type: map_at_100 value: 35.996 - type: map_at_1000 value: 36.083 - type: map_at_3 value: 31.491999999999997 - type: map_at_5 value: 33.632 - type: mrr_at_1 value: 28.528 - type: mrr_at_10 value: 37.694 - type: mrr_at_100 value: 38.613 - type: mrr_at_1000 value: 38.668 - type: mrr_at_3 value: 34.714 - type: mrr_at_5 value: 36.616 - type: ndcg_at_1 value: 28.528 - type: ndcg_at_10 value: 40.703 - type: ndcg_at_100 value: 45.993 - type: ndcg_at_1000 value: 47.847 - type: ndcg_at_3 value: 34.622 - type: ndcg_at_5 value: 38.035999999999994 - type: precision_at_1 value: 28.528 - type: precision_at_10 value: 6.902 - type: precision_at_100 value: 1.0370000000000001 - type: precision_at_1000 value: 0.126 - type: precision_at_3 value: 15.798000000000002 - type: precision_at_5 value: 11.655999999999999 - type: recall_at_1 value: 24.681 - type: recall_at_10 value: 55.81 - type: recall_at_100 value: 79.785 - type: recall_at_1000 value: 92.959 - type: recall_at_3 value: 39.074 - type: recall_at_5 value: 47.568 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval type: BeIR/cqadupstack config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: map_at_1 value: 18.627 - type: map_at_10 value: 27.872000000000003 - type: map_at_100 value: 29.237999999999996 - type: map_at_1000 value: 29.363 - type: map_at_3 value: 24.751 - type: map_at_5 value: 26.521 - type: mrr_at_1 value: 23.021 - type: mrr_at_10 value: 31.924000000000003 - type: mrr_at_100 value: 32.922000000000004 - type: mrr_at_1000 value: 32.988 - type: mrr_at_3 value: 29.192 - type: mrr_at_5 value: 30.798 - type: ndcg_at_1 value: 23.021 - type: ndcg_at_10 value: 33.535 - type: ndcg_at_100 value: 39.732 - type: ndcg_at_1000 value: 42.201 - type: ndcg_at_3 value: 28.153 - type: ndcg_at_5 value: 30.746000000000002 - type: precision_at_1 value: 23.021 - type: precision_at_10 value: 6.459 - type: precision_at_100 value: 1.1320000000000001 - type: precision_at_1000 value: 0.153 - type: precision_at_3 value: 13.719000000000001 - type: precision_at_5 value: 10.193000000000001 - type: recall_at_1 value: 18.627 - type: recall_at_10 value: 46.463 - type: recall_at_100 value: 74.226 - type: recall_at_1000 value: 91.28500000000001 - type: recall_at_3 value: 31.357000000000003 - type: recall_at_5 value: 38.067 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval type: BeIR/cqadupstack config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: map_at_1 value: 31.457 - type: map_at_10 value: 42.888 - type: map_at_100 value: 44.24 - type: map_at_1000 value: 44.327 - type: map_at_3 value: 39.588 - type: map_at_5 value: 41.423 - type: mrr_at_1 value: 37.126999999999995 - type: mrr_at_10 value: 47.083000000000006 - type: mrr_at_100 value: 47.997 - type: mrr_at_1000 value: 48.044 - type: mrr_at_3 value: 44.574000000000005 - type: mrr_at_5 value: 46.202 - type: ndcg_at_1 value: 37.126999999999995 - type: ndcg_at_10 value: 48.833 - type: ndcg_at_100 value: 54.327000000000005 - type: ndcg_at_1000 value: 56.011 - type: ndcg_at_3 value: 43.541999999999994 - type: ndcg_at_5 value: 46.127 - type: precision_at_1 value: 37.126999999999995 - type: precision_at_10 value: 8.376999999999999 - type: precision_at_100 value: 1.2309999999999999 - type: precision_at_1000 value: 0.146 - type: precision_at_3 value: 20.211000000000002 - type: precision_at_5 value: 14.16 - type: recall_at_1 value: 31.457 - type: recall_at_10 value: 62.369 - type: recall_at_100 value: 85.444 - type: recall_at_1000 value: 96.65599999999999 - type: recall_at_3 value: 47.961 - type: recall_at_5 value: 54.676 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval type: BeIR/cqadupstack config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: map_at_1 value: 27.139999999999997 - type: map_at_10 value: 38.801 - type: map_at_100 value: 40.549 - type: map_at_1000 value: 40.802 - type: map_at_3 value: 35.05 - type: map_at_5 value: 36.884 - type: mrr_at_1 value: 33.004 - type: mrr_at_10 value: 43.864 - type: mrr_at_100 value: 44.667 - type: mrr_at_1000 value: 44.717 - type: mrr_at_3 value: 40.777 - type: mrr_at_5 value: 42.319 - type: ndcg_at_1 value: 33.004 - type: ndcg_at_10 value: 46.022 - type: ndcg_at_100 value: 51.542 - type: ndcg_at_1000 value: 53.742000000000004 - type: ndcg_at_3 value: 39.795 - type: ndcg_at_5 value: 42.272 - type: precision_at_1 value: 33.004 - type: precision_at_10 value: 9.012 - type: precision_at_100 value: 1.7770000000000001 - type: precision_at_1000 value: 0.26 - type: precision_at_3 value: 19.038 - type: precision_at_5 value: 13.675999999999998 - type: recall_at_1 value: 27.139999999999997 - type: recall_at_10 value: 60.961 - type: recall_at_100 value: 84.451 - type: recall_at_1000 value: 98.113 - type: recall_at_3 value: 43.001 - type: recall_at_5 value: 49.896 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: map_at_1 value: 22.076999999999998 - type: map_at_10 value: 35.44 - type: map_at_100 value: 37.651 - type: map_at_1000 value: 37.824999999999996 - type: map_at_3 value: 30.764999999999997 - type: map_at_5 value: 33.26 - type: mrr_at_1 value: 50.163000000000004 - type: mrr_at_10 value: 61.207 - type: mrr_at_100 value: 61.675000000000004 - type: mrr_at_1000 value: 61.692 - type: mrr_at_3 value: 58.60999999999999 - type: mrr_at_5 value: 60.307 - type: ndcg_at_1 value: 50.163000000000004 - type: ndcg_at_10 value: 45.882 - type: ndcg_at_100 value: 53.239999999999995 - type: ndcg_at_1000 value: 55.852000000000004 - type: ndcg_at_3 value: 40.514 - type: ndcg_at_5 value: 42.038 - type: precision_at_1 value: 50.163000000000004 - type: precision_at_10 value: 13.466000000000001 - type: precision_at_100 value: 2.164 - type: precision_at_1000 value: 0.266 - type: precision_at_3 value: 29.707 - type: precision_at_5 value: 21.694 - type: recall_at_1 value: 22.076999999999998 - type: recall_at_10 value: 50.193 - type: recall_at_100 value: 74.993 - type: recall_at_1000 value: 89.131 - type: recall_at_3 value: 35.472 - type: recall_at_5 value: 41.814 - task: type: Retrieval dataset: name: MTEB DBPedia type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: map_at_1 value: 9.953 - type: map_at_10 value: 24.515 - type: map_at_100 value: 36.173 - type: map_at_1000 value: 38.351 - type: map_at_3 value: 16.592000000000002 - type: map_at_5 value: 20.036 - type: mrr_at_1 value: 74.25 - type: mrr_at_10 value: 81.813 - type: mrr_at_100 value: 82.006 - type: mrr_at_1000 value: 82.011 - type: mrr_at_3 value: 80.875 - type: mrr_at_5 value: 81.362 - type: ndcg_at_1 value: 62.5 - type: ndcg_at_10 value: 52.42 - type: ndcg_at_100 value: 56.808 - type: ndcg_at_1000 value: 63.532999999999994 - type: ndcg_at_3 value: 56.654 - type: ndcg_at_5 value: 54.18300000000001 - type: precision_at_1 value: 74.25 - type: precision_at_10 value: 42.699999999999996 - type: precision_at_100 value: 13.675 - type: precision_at_1000 value: 2.664 - type: precision_at_3 value: 60.5 - type: precision_at_5 value: 52.800000000000004 - type: recall_at_1 value: 9.953 - type: recall_at_10 value: 30.253999999999998 - type: recall_at_100 value: 62.516000000000005 - type: recall_at_1000 value: 84.163 - type: recall_at_3 value: 18.13 - type: recall_at_5 value: 22.771 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 79.455 - type: f1 value: 74.16798697647569 - task: type: Retrieval dataset: name: MTEB FEVER type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: map_at_1 value: 87.531 - type: map_at_10 value: 93.16799999999999 - type: map_at_100 value: 93.341 - type: map_at_1000 value: 93.349 - type: map_at_3 value: 92.444 - type: map_at_5 value: 92.865 - type: mrr_at_1 value: 94.014 - type: mrr_at_10 value: 96.761 - type: mrr_at_100 value: 96.762 - type: mrr_at_1000 value: 96.762 - type: mrr_at_3 value: 96.672 - type: mrr_at_5 value: 96.736 - type: ndcg_at_1 value: 94.014 - type: ndcg_at_10 value: 95.112 - type: ndcg_at_100 value: 95.578 - type: ndcg_at_1000 value: 95.68900000000001 - type: ndcg_at_3 value: 94.392 - type: ndcg_at_5 value: 94.72500000000001 - type: precision_at_1 value: 94.014 - type: precision_at_10 value: 11.065 - type: precision_at_100 value: 1.157 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 35.259 - type: precision_at_5 value: 21.599 - type: recall_at_1 value: 87.531 - type: recall_at_10 value: 97.356 - type: recall_at_100 value: 98.965 - type: recall_at_1000 value: 99.607 - type: recall_at_3 value: 95.312 - type: recall_at_5 value: 96.295 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: map_at_1 value: 32.055 - type: map_at_10 value: 53.114 - type: map_at_100 value: 55.235 - type: map_at_1000 value: 55.345 - type: map_at_3 value: 45.854 - type: map_at_5 value: 50.025 - type: mrr_at_1 value: 60.34 - type: mrr_at_10 value: 68.804 - type: mrr_at_100 value: 69.309 - type: mrr_at_1000 value: 69.32199999999999 - type: mrr_at_3 value: 66.40899999999999 - type: mrr_at_5 value: 67.976 - type: ndcg_at_1 value: 60.34 - type: ndcg_at_10 value: 62.031000000000006 - type: ndcg_at_100 value: 68.00500000000001 - type: ndcg_at_1000 value: 69.286 - type: ndcg_at_3 value: 56.355999999999995 - type: ndcg_at_5 value: 58.687 - type: precision_at_1 value: 60.34 - type: precision_at_10 value: 17.176 - type: precision_at_100 value: 2.36 - type: precision_at_1000 value: 0.259 - type: precision_at_3 value: 37.14 - type: precision_at_5 value: 27.809 - type: recall_at_1 value: 32.055 - type: recall_at_10 value: 70.91 - type: recall_at_100 value: 91.83 - type: recall_at_1000 value: 98.871 - type: recall_at_3 value: 51.202999999999996 - type: recall_at_5 value: 60.563 - task: type: Retrieval dataset: name: MTEB HotpotQA type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: map_at_1 value: 43.68 - type: map_at_10 value: 64.389 - type: map_at_100 value: 65.24 - type: map_at_1000 value: 65.303 - type: map_at_3 value: 61.309000000000005 - type: map_at_5 value: 63.275999999999996 - type: mrr_at_1 value: 87.36 - type: mrr_at_10 value: 91.12 - type: mrr_at_100 value: 91.227 - type: mrr_at_1000 value: 91.229 - type: mrr_at_3 value: 90.57600000000001 - type: mrr_at_5 value: 90.912 - type: ndcg_at_1 value: 87.36 - type: ndcg_at_10 value: 73.076 - type: ndcg_at_100 value: 75.895 - type: ndcg_at_1000 value: 77.049 - type: ndcg_at_3 value: 68.929 - type: ndcg_at_5 value: 71.28 - type: precision_at_1 value: 87.36 - type: precision_at_10 value: 14.741000000000001 - type: precision_at_100 value: 1.694 - type: precision_at_1000 value: 0.185 - type: precision_at_3 value: 43.043 - type: precision_at_5 value: 27.681 - type: recall_at_1 value: 43.68 - type: recall_at_10 value: 73.707 - type: recall_at_100 value: 84.7 - type: recall_at_1000 value: 92.309 - type: recall_at_3 value: 64.564 - type: recall_at_5 value: 69.203 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 96.75399999999999 - type: ap value: 95.29389839242187 - type: f1 value: 96.75348377433475 - task: type: Retrieval dataset: name: MTEB MSMARCO type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: map_at_1 value: 25.176 - type: map_at_10 value: 38.598 - type: map_at_100 value: 39.707 - type: map_at_1000 value: 39.744 - type: map_at_3 value: 34.566 - type: map_at_5 value: 36.863 - type: mrr_at_1 value: 25.874000000000002 - type: mrr_at_10 value: 39.214 - type: mrr_at_100 value: 40.251 - type: mrr_at_1000 value: 40.281 - type: mrr_at_3 value: 35.291 - type: mrr_at_5 value: 37.545 - type: ndcg_at_1 value: 25.874000000000002 - type: ndcg_at_10 value: 45.98 - type: ndcg_at_100 value: 51.197 - type: ndcg_at_1000 value: 52.073 - type: ndcg_at_3 value: 37.785999999999994 - type: ndcg_at_5 value: 41.870000000000005 - type: precision_at_1 value: 25.874000000000002 - type: precision_at_10 value: 7.181 - type: precision_at_100 value: 0.979 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 16.051000000000002 - type: precision_at_5 value: 11.713 - type: recall_at_1 value: 25.176 - type: recall_at_10 value: 68.67699999999999 - type: recall_at_100 value: 92.55 - type: recall_at_1000 value: 99.164 - type: recall_at_3 value: 46.372 - type: recall_at_5 value: 56.16 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 99.03784769721841 - type: f1 value: 98.97791641821495 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 91.88326493388054 - type: f1 value: 73.74809928034335 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 85.41358439811701 - type: f1 value: 83.503679460639 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 89.77135171486215 - type: f1 value: 88.89843747468366 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 46.22695362087359 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 44.132372165849425 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 33.35680810650402 - type: mrr value: 34.72625715637218 - task: type: Retrieval dataset: name: MTEB NFCorpus type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: map_at_1 value: 7.165000000000001 - type: map_at_10 value: 15.424 - type: map_at_100 value: 20.28 - type: map_at_1000 value: 22.065 - type: map_at_3 value: 11.236 - type: map_at_5 value: 13.025999999999998 - type: mrr_at_1 value: 51.702999999999996 - type: mrr_at_10 value: 59.965 - type: mrr_at_100 value: 60.667 - type: mrr_at_1000 value: 60.702999999999996 - type: mrr_at_3 value: 58.772000000000006 - type: mrr_at_5 value: 59.267 - type: ndcg_at_1 value: 49.536 - type: ndcg_at_10 value: 40.6 - type: ndcg_at_100 value: 37.848 - type: ndcg_at_1000 value: 46.657 - type: ndcg_at_3 value: 46.117999999999995 - type: ndcg_at_5 value: 43.619 - type: precision_at_1 value: 51.393 - type: precision_at_10 value: 30.31 - type: precision_at_100 value: 9.972 - type: precision_at_1000 value: 2.329 - type: precision_at_3 value: 43.137 - type: precision_at_5 value: 37.585 - type: recall_at_1 value: 7.165000000000001 - type: recall_at_10 value: 19.689999999999998 - type: recall_at_100 value: 39.237 - type: recall_at_1000 value: 71.417 - type: recall_at_3 value: 12.247 - type: recall_at_5 value: 14.902999999999999 - task: type: Retrieval dataset: name: MTEB NQ type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: map_at_1 value: 42.653999999999996 - type: map_at_10 value: 59.611999999999995 - type: map_at_100 value: 60.32300000000001 - type: map_at_1000 value: 60.336 - type: map_at_3 value: 55.584999999999994 - type: map_at_5 value: 58.19 - type: mrr_at_1 value: 47.683 - type: mrr_at_10 value: 62.06700000000001 - type: mrr_at_100 value: 62.537 - type: mrr_at_1000 value: 62.544999999999995 - type: mrr_at_3 value: 59.178 - type: mrr_at_5 value: 61.034 - type: ndcg_at_1 value: 47.654 - type: ndcg_at_10 value: 67.001 - type: ndcg_at_100 value: 69.73899999999999 - type: ndcg_at_1000 value: 69.986 - type: ndcg_at_3 value: 59.95700000000001 - type: ndcg_at_5 value: 64.025 - type: precision_at_1 value: 47.654 - type: precision_at_10 value: 10.367999999999999 - type: precision_at_100 value: 1.192 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 26.651000000000003 - type: precision_at_5 value: 18.459 - type: recall_at_1 value: 42.653999999999996 - type: recall_at_10 value: 86.619 - type: recall_at_100 value: 98.04899999999999 - type: recall_at_1000 value: 99.812 - type: recall_at_3 value: 68.987 - type: recall_at_5 value: 78.158 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: mteb/quora config: default split: test revision: None metrics: - type: map_at_1 value: 72.538 - type: map_at_10 value: 86.702 - type: map_at_100 value: 87.31 - type: map_at_1000 value: 87.323 - type: map_at_3 value: 83.87 - type: map_at_5 value: 85.682 - type: mrr_at_1 value: 83.31 - type: mrr_at_10 value: 89.225 - type: mrr_at_100 value: 89.30399999999999 - type: mrr_at_1000 value: 89.30399999999999 - type: mrr_at_3 value: 88.44300000000001 - type: mrr_at_5 value: 89.005 - type: ndcg_at_1 value: 83.32000000000001 - type: ndcg_at_10 value: 90.095 - type: ndcg_at_100 value: 91.12 - type: ndcg_at_1000 value: 91.179 - type: ndcg_at_3 value: 87.606 - type: ndcg_at_5 value: 89.031 - type: precision_at_1 value: 83.32000000000001 - type: precision_at_10 value: 13.641 - type: precision_at_100 value: 1.541 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 38.377 - type: precision_at_5 value: 25.162000000000003 - type: recall_at_1 value: 72.538 - type: recall_at_10 value: 96.47200000000001 - type: recall_at_100 value: 99.785 - type: recall_at_1000 value: 99.99900000000001 - type: recall_at_3 value: 89.278 - type: recall_at_5 value: 93.367 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 73.55219145406065 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 74.13437105242755 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: mteb/scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 6.873 - type: map_at_10 value: 17.944 - type: map_at_100 value: 21.171 - type: map_at_1000 value: 21.528 - type: map_at_3 value: 12.415 - type: map_at_5 value: 15.187999999999999 - type: mrr_at_1 value: 33.800000000000004 - type: mrr_at_10 value: 46.455 - type: mrr_at_100 value: 47.378 - type: mrr_at_1000 value: 47.394999999999996 - type: mrr_at_3 value: 42.367 - type: mrr_at_5 value: 44.972 - type: ndcg_at_1 value: 33.800000000000004 - type: ndcg_at_10 value: 28.907 - type: ndcg_at_100 value: 39.695 - type: ndcg_at_1000 value: 44.582 - type: ndcg_at_3 value: 26.949 - type: ndcg_at_5 value: 23.988 - type: precision_at_1 value: 33.800000000000004 - type: precision_at_10 value: 15.079999999999998 - type: precision_at_100 value: 3.056 - type: precision_at_1000 value: 0.42100000000000004 - type: precision_at_3 value: 25.167 - type: precision_at_5 value: 21.26 - type: recall_at_1 value: 6.873 - type: recall_at_10 value: 30.568 - type: recall_at_100 value: 62.062 - type: recall_at_1000 value: 85.37700000000001 - type: recall_at_3 value: 15.312999999999999 - type: recall_at_5 value: 21.575 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 82.37009118256057 - type: cos_sim_spearman value: 79.27986395671529 - type: euclidean_pearson value: 79.18037715442115 - type: euclidean_spearman value: 79.28004791561621 - type: manhattan_pearson value: 79.34062972800541 - type: manhattan_spearman value: 79.43106695543402 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 87.48474767383833 - type: cos_sim_spearman value: 79.54505388752513 - type: euclidean_pearson value: 83.43282704179565 - type: euclidean_spearman value: 79.54579919925405 - type: manhattan_pearson value: 83.77564492427952 - type: manhattan_spearman value: 79.84558396989286 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 88.803698035802 - type: cos_sim_spearman value: 88.83451367754881 - type: euclidean_pearson value: 88.28939285711628 - type: euclidean_spearman value: 88.83528996073112 - type: manhattan_pearson value: 88.28017412671795 - type: manhattan_spearman value: 88.9228828016344 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 85.27469288153428 - type: cos_sim_spearman value: 83.87477064876288 - type: euclidean_pearson value: 84.2601737035379 - type: euclidean_spearman value: 83.87431082479074 - type: manhattan_pearson value: 84.3621547772745 - type: manhattan_spearman value: 84.12094375000423 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 88.12749863201587 - type: cos_sim_spearman value: 88.54287568368565 - type: euclidean_pearson value: 87.90429700607999 - type: euclidean_spearman value: 88.5437689576261 - type: manhattan_pearson value: 88.19276653356833 - type: manhattan_spearman value: 88.99995393814679 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 85.68398747560902 - type: cos_sim_spearman value: 86.48815303460574 - type: euclidean_pearson value: 85.52356631237954 - type: euclidean_spearman value: 86.486391949551 - type: manhattan_pearson value: 85.67267981761788 - type: manhattan_spearman value: 86.7073696332485 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 88.9057107443124 - type: cos_sim_spearman value: 88.7312168757697 - type: euclidean_pearson value: 88.72810439714794 - type: euclidean_spearman value: 88.71976185854771 - type: manhattan_pearson value: 88.50433745949111 - type: manhattan_spearman value: 88.51726175544195 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 67.59391795109886 - type: cos_sim_spearman value: 66.87613008631367 - type: euclidean_pearson value: 69.23198488262217 - type: euclidean_spearman value: 66.85427723013692 - type: manhattan_pearson value: 69.50730124841084 - type: manhattan_spearman value: 67.10404669820792 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 87.0820605344619 - type: cos_sim_spearman value: 86.8518089863434 - type: euclidean_pearson value: 86.31087134689284 - type: euclidean_spearman value: 86.8518520517941 - type: manhattan_pearson value: 86.47203796160612 - type: manhattan_spearman value: 87.1080149734421 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 89.09255369305481 - type: mrr value: 97.10323445617563 - task: type: Retrieval dataset: name: MTEB SciFact type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: map_at_1 value: 61.260999999999996 - type: map_at_10 value: 74.043 - type: map_at_100 value: 74.37700000000001 - type: map_at_1000 value: 74.384 - type: map_at_3 value: 71.222 - type: map_at_5 value: 72.875 - type: mrr_at_1 value: 64.333 - type: mrr_at_10 value: 74.984 - type: mrr_at_100 value: 75.247 - type: mrr_at_1000 value: 75.25500000000001 - type: mrr_at_3 value: 73.167 - type: mrr_at_5 value: 74.35000000000001 - type: ndcg_at_1 value: 64.333 - type: ndcg_at_10 value: 79.06 - type: ndcg_at_100 value: 80.416 - type: ndcg_at_1000 value: 80.55600000000001 - type: ndcg_at_3 value: 74.753 - type: ndcg_at_5 value: 76.97500000000001 - type: precision_at_1 value: 64.333 - type: precision_at_10 value: 10.567 - type: precision_at_100 value: 1.1199999999999999 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 29.889 - type: precision_at_5 value: 19.533 - type: recall_at_1 value: 61.260999999999996 - type: recall_at_10 value: 93.167 - type: recall_at_100 value: 99.0 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 81.667 - type: recall_at_5 value: 87.394 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.71980198019801 - type: cos_sim_ap value: 92.81616007802704 - type: cos_sim_f1 value: 85.17548454688318 - type: cos_sim_precision value: 89.43894389438944 - type: cos_sim_recall value: 81.3 - type: dot_accuracy value: 99.71980198019801 - type: dot_ap value: 92.81398760591358 - type: dot_f1 value: 85.17548454688318 - type: dot_precision value: 89.43894389438944 - type: dot_recall value: 81.3 - type: euclidean_accuracy value: 99.71980198019801 - type: euclidean_ap value: 92.81560637245072 - type: euclidean_f1 value: 85.17548454688318 - type: euclidean_precision value: 89.43894389438944 - type: euclidean_recall value: 81.3 - type: manhattan_accuracy value: 99.73069306930694 - type: manhattan_ap value: 93.14005487480794 - type: manhattan_f1 value: 85.56263269639068 - type: manhattan_precision value: 91.17647058823529 - type: manhattan_recall value: 80.60000000000001 - type: max_accuracy value: 99.73069306930694 - type: max_ap value: 93.14005487480794 - type: max_f1 value: 85.56263269639068 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 79.86443362395185 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 49.40897096662564 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 55.66040806627947 - type: mrr value: 56.58670475766064 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.51015090598575 - type: cos_sim_spearman value: 31.35016454939226 - type: dot_pearson value: 31.5150068731 - type: dot_spearman value: 31.34790869023487 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: mteb/trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.254 - type: map_at_10 value: 2.064 - type: map_at_100 value: 12.909 - type: map_at_1000 value: 31.761 - type: map_at_3 value: 0.738 - type: map_at_5 value: 1.155 - type: mrr_at_1 value: 96.0 - type: mrr_at_10 value: 98.0 - type: mrr_at_100 value: 98.0 - type: mrr_at_1000 value: 98.0 - type: mrr_at_3 value: 98.0 - type: mrr_at_5 value: 98.0 - type: ndcg_at_1 value: 93.0 - type: ndcg_at_10 value: 82.258 - type: ndcg_at_100 value: 64.34 - type: ndcg_at_1000 value: 57.912 - type: ndcg_at_3 value: 90.827 - type: ndcg_at_5 value: 86.79 - type: precision_at_1 value: 96.0 - type: precision_at_10 value: 84.8 - type: precision_at_100 value: 66.0 - type: precision_at_1000 value: 25.356 - type: precision_at_3 value: 94.667 - type: precision_at_5 value: 90.4 - type: recall_at_1 value: 0.254 - type: recall_at_10 value: 2.1950000000000003 - type: recall_at_100 value: 16.088 - type: recall_at_1000 value: 54.559000000000005 - type: recall_at_3 value: 0.75 - type: recall_at_5 value: 1.191 - task: type: Retrieval dataset: name: MTEB Touche2020 type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: map_at_1 value: 2.976 - type: map_at_10 value: 11.389000000000001 - type: map_at_100 value: 18.429000000000002 - type: map_at_1000 value: 20.113 - type: map_at_3 value: 6.483 - type: map_at_5 value: 8.770999999999999 - type: mrr_at_1 value: 40.816 - type: mrr_at_10 value: 58.118 - type: mrr_at_100 value: 58.489999999999995 - type: mrr_at_1000 value: 58.489999999999995 - type: mrr_at_3 value: 53.061 - type: mrr_at_5 value: 57.041 - type: ndcg_at_1 value: 40.816 - type: ndcg_at_10 value: 30.567 - type: ndcg_at_100 value: 42.44 - type: ndcg_at_1000 value: 53.480000000000004 - type: ndcg_at_3 value: 36.016 - type: ndcg_at_5 value: 34.257 - type: precision_at_1 value: 42.857 - type: precision_at_10 value: 25.714 - type: precision_at_100 value: 8.429 - type: precision_at_1000 value: 1.5939999999999999 - type: precision_at_3 value: 36.735 - type: precision_at_5 value: 33.878 - type: recall_at_1 value: 2.976 - type: recall_at_10 value: 17.854999999999997 - type: recall_at_100 value: 51.833 - type: recall_at_1000 value: 86.223 - type: recall_at_3 value: 7.887 - type: recall_at_5 value: 12.026 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 85.1174 - type: ap value: 30.169441069345748 - type: f1 value: 69.79254701873245 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 72.58347481607245 - type: f1 value: 72.74877295564937 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 53.90586138221305 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.35769207844072 - type: cos_sim_ap value: 77.9645072410354 - type: cos_sim_f1 value: 71.32352941176471 - type: cos_sim_precision value: 66.5903890160183 - type: cos_sim_recall value: 76.78100263852242 - type: dot_accuracy value: 87.37557370209214 - type: dot_ap value: 77.96250046429908 - type: dot_f1 value: 71.28932757557064 - type: dot_precision value: 66.95249130938586 - type: dot_recall value: 76.22691292875989 - type: euclidean_accuracy value: 87.35173153722357 - type: euclidean_ap value: 77.96520460741593 - type: euclidean_f1 value: 71.32470733210104 - type: euclidean_precision value: 66.91329479768785 - type: euclidean_recall value: 76.35883905013192 - type: manhattan_accuracy value: 87.25636287774931 - type: manhattan_ap value: 77.77752485611796 - type: manhattan_f1 value: 71.18148599269183 - type: manhattan_precision value: 66.10859728506787 - type: manhattan_recall value: 77.0976253298153 - type: max_accuracy value: 87.37557370209214 - type: max_ap value: 77.96520460741593 - type: max_f1 value: 71.32470733210104 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.38176737687739 - type: cos_sim_ap value: 86.58811861657401 - type: cos_sim_f1 value: 79.09430644097604 - type: cos_sim_precision value: 75.45085977911366 - type: cos_sim_recall value: 83.10748383122882 - type: dot_accuracy value: 89.38370784336554 - type: dot_ap value: 86.58840606004333 - type: dot_f1 value: 79.10179860068133 - type: dot_precision value: 75.44546153308643 - type: dot_recall value: 83.13058207576223 - type: euclidean_accuracy value: 89.38564830985369 - type: euclidean_ap value: 86.58820721061164 - type: euclidean_f1 value: 79.09070942235888 - type: euclidean_precision value: 75.38729937194697 - type: euclidean_recall value: 83.17677856482906 - type: manhattan_accuracy value: 89.40699344122326 - type: manhattan_ap value: 86.60631843011362 - type: manhattan_f1 value: 79.14949970570925 - type: manhattan_precision value: 75.78191039729502 - type: manhattan_recall value: 82.83030489682784 - type: max_accuracy value: 89.40699344122326 - type: max_ap value: 86.60631843011362 - type: max_f1 value: 79.14949970570925 - task: type: STS dataset: name: MTEB AFQMC type: C-MTEB/AFQMC config: default split: validation revision: b44c3b011063adb25877c13823db83bb193913c4 metrics: - type: cos_sim_pearson value: 65.58442135663871 - type: cos_sim_spearman value: 72.2538631361313 - type: euclidean_pearson value: 70.97255486607429 - type: euclidean_spearman value: 72.25374250228647 - type: manhattan_pearson value: 70.83250199989911 - type: manhattan_spearman value: 72.14819496536272 - task: type: STS dataset: name: MTEB ATEC type: C-MTEB/ATEC config: default split: test revision: 0f319b1142f28d00e055a6770f3f726ae9b7d865 metrics: - type: cos_sim_pearson value: 59.99478404929932 - type: cos_sim_spearman value: 62.61836216999812 - type: euclidean_pearson value: 66.86429811933593 - type: euclidean_spearman value: 62.6183520374191 - type: manhattan_pearson value: 66.8063778911633 - type: manhattan_spearman value: 62.569607573241115 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 53.98400000000001 - type: f1 value: 51.21447361350723 - task: type: STS dataset: name: MTEB BQ type: C-MTEB/BQ config: default split: test revision: e3dda5e115e487b39ec7e618c0c6a29137052a55 metrics: - type: cos_sim_pearson value: 79.11941660686553 - type: cos_sim_spearman value: 81.25029594540435 - type: euclidean_pearson value: 82.06973504238826 - type: euclidean_spearman value: 81.2501989488524 - type: manhattan_pearson value: 82.10094630392753 - type: manhattan_spearman value: 81.27987244392389 - task: type: Clustering dataset: name: MTEB CLSClusteringP2P type: C-MTEB/CLSClusteringP2P config: default split: test revision: 4b6227591c6c1a73bc76b1055f3b7f3588e72476 metrics: - type: v_measure value: 47.07270168705156 - task: type: Clustering dataset: name: MTEB CLSClusteringS2S type: C-MTEB/CLSClusteringS2S config: default split: test revision: e458b3f5414b62b7f9f83499ac1f5497ae2e869f metrics: - type: v_measure value: 45.98511703185043 - task: type: Reranking dataset: name: MTEB CMedQAv1 type: C-MTEB/CMedQAv1-reranking config: default split: test revision: 8d7f1e942507dac42dc58017c1a001c3717da7df metrics: - type: map value: 88.19895157194931 - type: mrr value: 90.21424603174603 - task: type: Reranking dataset: name: MTEB CMedQAv2 type: C-MTEB/CMedQAv2-reranking config: default split: test revision: 23d186750531a14a0357ca22cd92d712fd512ea0 metrics: - type: map value: 88.03317320980119 - type: mrr value: 89.9461507936508 - task: type: Retrieval dataset: name: MTEB CmedqaRetrieval type: C-MTEB/CmedqaRetrieval config: default split: dev revision: cd540c506dae1cf9e9a59c3e06f42030d54e7301 metrics: - type: map_at_1 value: 29.037000000000003 - type: map_at_10 value: 42.001 - type: map_at_100 value: 43.773 - type: map_at_1000 value: 43.878 - type: map_at_3 value: 37.637 - type: map_at_5 value: 40.034 - type: mrr_at_1 value: 43.136 - type: mrr_at_10 value: 51.158 - type: mrr_at_100 value: 52.083 - type: mrr_at_1000 value: 52.12 - type: mrr_at_3 value: 48.733 - type: mrr_at_5 value: 50.025 - type: ndcg_at_1 value: 43.136 - type: ndcg_at_10 value: 48.685 - type: ndcg_at_100 value: 55.513 - type: ndcg_at_1000 value: 57.242000000000004 - type: ndcg_at_3 value: 43.329 - type: ndcg_at_5 value: 45.438 - type: precision_at_1 value: 43.136 - type: precision_at_10 value: 10.56 - type: precision_at_100 value: 1.6129999999999998 - type: precision_at_1000 value: 0.184 - type: precision_at_3 value: 24.064 - type: precision_at_5 value: 17.269000000000002 - type: recall_at_1 value: 29.037000000000003 - type: recall_at_10 value: 59.245000000000005 - type: recall_at_100 value: 87.355 - type: recall_at_1000 value: 98.74000000000001 - type: recall_at_3 value: 42.99 - type: recall_at_5 value: 49.681999999999995 - task: type: PairClassification dataset: name: MTEB Cmnli type: C-MTEB/CMNLI config: default split: validation revision: 41bc36f332156f7adc9e38f53777c959b2ae9766 metrics: - type: cos_sim_accuracy value: 82.68190018039687 - type: cos_sim_ap value: 90.18017125327886 - type: cos_sim_f1 value: 83.64080906868193 - type: cos_sim_precision value: 79.7076890489303 - type: cos_sim_recall value: 87.98223053542202 - type: dot_accuracy value: 82.68190018039687 - type: dot_ap value: 90.18782350103646 - type: dot_f1 value: 83.64242087729039 - type: dot_precision value: 79.65313028764805 - type: dot_recall value: 88.05237315875614 - type: euclidean_accuracy value: 82.68190018039687 - type: euclidean_ap value: 90.1801957900632 - type: euclidean_f1 value: 83.63636363636364 - type: euclidean_precision value: 79.52772506852203 - type: euclidean_recall value: 88.19265840542437 - type: manhattan_accuracy value: 82.14070956103427 - type: manhattan_ap value: 89.96178420101427 - type: manhattan_f1 value: 83.21087838578791 - type: manhattan_precision value: 78.35605121850475 - type: manhattan_recall value: 88.70703764320785 - type: max_accuracy value: 82.68190018039687 - type: max_ap value: 90.18782350103646 - type: max_f1 value: 83.64242087729039 - task: type: Retrieval dataset: name: MTEB CovidRetrieval type: C-MTEB/CovidRetrieval config: default split: dev revision: 1271c7809071a13532e05f25fb53511ffce77117 metrics: - type: map_at_1 value: 72.234 - type: map_at_10 value: 80.10000000000001 - type: map_at_100 value: 80.36 - type: map_at_1000 value: 80.363 - type: map_at_3 value: 78.315 - type: map_at_5 value: 79.607 - type: mrr_at_1 value: 72.392 - type: mrr_at_10 value: 80.117 - type: mrr_at_100 value: 80.36999999999999 - type: mrr_at_1000 value: 80.373 - type: mrr_at_3 value: 78.469 - type: mrr_at_5 value: 79.633 - type: ndcg_at_1 value: 72.392 - type: ndcg_at_10 value: 83.651 - type: ndcg_at_100 value: 84.749 - type: ndcg_at_1000 value: 84.83000000000001 - type: ndcg_at_3 value: 80.253 - type: ndcg_at_5 value: 82.485 - type: precision_at_1 value: 72.392 - type: precision_at_10 value: 9.557 - type: precision_at_100 value: 1.004 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 28.732000000000003 - type: precision_at_5 value: 18.377 - type: recall_at_1 value: 72.234 - type: recall_at_10 value: 94.573 - type: recall_at_100 value: 99.368 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 85.669 - type: recall_at_5 value: 91.01700000000001 - task: type: Retrieval dataset: name: MTEB DuRetrieval type: C-MTEB/DuRetrieval config: default split: dev revision: a1a333e290fe30b10f3f56498e3a0d911a693ced metrics: - type: map_at_1 value: 26.173999999999996 - type: map_at_10 value: 80.04 - type: map_at_100 value: 82.94500000000001 - type: map_at_1000 value: 82.98100000000001 - type: map_at_3 value: 55.562999999999995 - type: map_at_5 value: 69.89800000000001 - type: mrr_at_1 value: 89.5 - type: mrr_at_10 value: 92.996 - type: mrr_at_100 value: 93.06400000000001 - type: mrr_at_1000 value: 93.065 - type: mrr_at_3 value: 92.658 - type: mrr_at_5 value: 92.84599999999999 - type: ndcg_at_1 value: 89.5 - type: ndcg_at_10 value: 87.443 - type: ndcg_at_100 value: 90.253 - type: ndcg_at_1000 value: 90.549 - type: ndcg_at_3 value: 85.874 - type: ndcg_at_5 value: 84.842 - type: precision_at_1 value: 89.5 - type: precision_at_10 value: 41.805 - type: precision_at_100 value: 4.827 - type: precision_at_1000 value: 0.49 - type: precision_at_3 value: 76.85 - type: precision_at_5 value: 64.8 - type: recall_at_1 value: 26.173999999999996 - type: recall_at_10 value: 89.101 - type: recall_at_100 value: 98.08099999999999 - type: recall_at_1000 value: 99.529 - type: recall_at_3 value: 57.902 - type: recall_at_5 value: 74.602 - task: type: Retrieval dataset: name: MTEB EcomRetrieval type: C-MTEB/EcomRetrieval config: default split: dev revision: 687de13dc7294d6fd9be10c6945f9e8fec8166b9 metrics: - type: map_at_1 value: 56.10000000000001 - type: map_at_10 value: 66.15299999999999 - type: map_at_100 value: 66.625 - type: map_at_1000 value: 66.636 - type: map_at_3 value: 63.632999999999996 - type: map_at_5 value: 65.293 - type: mrr_at_1 value: 56.10000000000001 - type: mrr_at_10 value: 66.15299999999999 - type: mrr_at_100 value: 66.625 - type: mrr_at_1000 value: 66.636 - type: mrr_at_3 value: 63.632999999999996 - type: mrr_at_5 value: 65.293 - type: ndcg_at_1 value: 56.10000000000001 - type: ndcg_at_10 value: 71.146 - type: ndcg_at_100 value: 73.27799999999999 - type: ndcg_at_1000 value: 73.529 - type: ndcg_at_3 value: 66.09 - type: ndcg_at_5 value: 69.08999999999999 - type: precision_at_1 value: 56.10000000000001 - type: precision_at_10 value: 8.68 - type: precision_at_100 value: 0.964 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 24.4 - type: precision_at_5 value: 16.1 - type: recall_at_1 value: 56.10000000000001 - type: recall_at_10 value: 86.8 - type: recall_at_100 value: 96.39999999999999 - type: recall_at_1000 value: 98.3 - type: recall_at_3 value: 73.2 - type: recall_at_5 value: 80.5 - task: type: Classification dataset: name: MTEB IFlyTek type: C-MTEB/IFlyTek-classification config: default split: validation revision: 421605374b29664c5fc098418fe20ada9bd55f8a metrics: - type: accuracy value: 54.52096960369373 - type: f1 value: 40.930845295808695 - task: type: Classification dataset: name: MTEB JDReview type: C-MTEB/JDReview-classification config: default split: test revision: b7c64bd89eb87f8ded463478346f76731f07bf8b metrics: - type: accuracy value: 86.51031894934334 - type: ap value: 55.9516014323483 - type: f1 value: 81.54813679326381 - task: type: STS dataset: name: MTEB LCQMC type: C-MTEB/LCQMC config: default split: test revision: 17f9b096f80380fce5ed12a9be8be7784b337daf metrics: - type: cos_sim_pearson value: 69.67437838574276 - type: cos_sim_spearman value: 73.81314174653045 - type: euclidean_pearson value: 72.63430276680275 - type: euclidean_spearman value: 73.81358736777001 - type: manhattan_pearson value: 72.58743833842829 - type: manhattan_spearman value: 73.7590419009179 - task: type: Reranking dataset: name: MTEB MMarcoReranking type: C-MTEB/Mmarco-reranking config: default split: dev revision: None metrics: - type: map value: 31.648613483640254 - type: mrr value: 30.37420634920635 - task: type: Retrieval dataset: name: MTEB MMarcoRetrieval type: C-MTEB/MMarcoRetrieval config: default split: dev revision: 539bbde593d947e2a124ba72651aafc09eb33fc2 metrics: - type: map_at_1 value: 73.28099999999999 - type: map_at_10 value: 81.977 - type: map_at_100 value: 82.222 - type: map_at_1000 value: 82.22699999999999 - type: map_at_3 value: 80.441 - type: map_at_5 value: 81.46600000000001 - type: mrr_at_1 value: 75.673 - type: mrr_at_10 value: 82.41000000000001 - type: mrr_at_100 value: 82.616 - type: mrr_at_1000 value: 82.621 - type: mrr_at_3 value: 81.094 - type: mrr_at_5 value: 81.962 - type: ndcg_at_1 value: 75.673 - type: ndcg_at_10 value: 85.15599999999999 - type: ndcg_at_100 value: 86.151 - type: ndcg_at_1000 value: 86.26899999999999 - type: ndcg_at_3 value: 82.304 - type: ndcg_at_5 value: 84.009 - type: precision_at_1 value: 75.673 - type: precision_at_10 value: 10.042 - type: precision_at_100 value: 1.052 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 30.673000000000002 - type: precision_at_5 value: 19.326999999999998 - type: recall_at_1 value: 73.28099999999999 - type: recall_at_10 value: 94.446 - type: recall_at_100 value: 98.737 - type: recall_at_1000 value: 99.649 - type: recall_at_3 value: 86.984 - type: recall_at_5 value: 91.024 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 81.08607935440484 - type: f1 value: 78.24879986066307 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 86.05917955615332 - type: f1 value: 85.05279279434997 - task: type: Retrieval dataset: name: MTEB MedicalRetrieval type: C-MTEB/MedicalRetrieval config: default split: dev revision: 2039188fb5800a9803ba5048df7b76e6fb151fc6 metrics: - type: map_at_1 value: 56.2 - type: map_at_10 value: 62.57899999999999 - type: map_at_100 value: 63.154999999999994 - type: map_at_1000 value: 63.193 - type: map_at_3 value: 61.217 - type: map_at_5 value: 62.012 - type: mrr_at_1 value: 56.3 - type: mrr_at_10 value: 62.629000000000005 - type: mrr_at_100 value: 63.205999999999996 - type: mrr_at_1000 value: 63.244 - type: mrr_at_3 value: 61.267 - type: mrr_at_5 value: 62.062 - type: ndcg_at_1 value: 56.2 - type: ndcg_at_10 value: 65.592 - type: ndcg_at_100 value: 68.657 - type: ndcg_at_1000 value: 69.671 - type: ndcg_at_3 value: 62.808 - type: ndcg_at_5 value: 64.24499999999999 - type: precision_at_1 value: 56.2 - type: precision_at_10 value: 7.5 - type: precision_at_100 value: 0.899 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 22.467000000000002 - type: precision_at_5 value: 14.180000000000001 - type: recall_at_1 value: 56.2 - type: recall_at_10 value: 75.0 - type: recall_at_100 value: 89.9 - type: recall_at_1000 value: 97.89999999999999 - type: recall_at_3 value: 67.4 - type: recall_at_5 value: 70.89999999999999 - task: type: Classification dataset: name: MTEB MultilingualSentiment type: C-MTEB/MultilingualSentiment-classification config: default split: validation revision: 46958b007a63fdbf239b7672c25d0bea67b5ea1a metrics: - type: accuracy value: 76.87666666666667 - type: f1 value: 76.7317686219665 - task: type: PairClassification dataset: name: MTEB Ocnli type: C-MTEB/OCNLI config: default split: validation revision: 66e76a618a34d6d565d5538088562851e6daa7ec metrics: - type: cos_sim_accuracy value: 79.64266377910124 - type: cos_sim_ap value: 84.78274442344829 - type: cos_sim_f1 value: 81.16947472745292 - type: cos_sim_precision value: 76.47058823529412 - type: cos_sim_recall value: 86.48363252375924 - type: dot_accuracy value: 79.64266377910124 - type: dot_ap value: 84.7851404063692 - type: dot_f1 value: 81.16947472745292 - type: dot_precision value: 76.47058823529412 - type: dot_recall value: 86.48363252375924 - type: euclidean_accuracy value: 79.64266377910124 - type: euclidean_ap value: 84.78068373762378 - type: euclidean_f1 value: 81.14794656110837 - type: euclidean_precision value: 76.35009310986965 - type: euclidean_recall value: 86.58922914466737 - type: manhattan_accuracy value: 79.48023822414727 - type: manhattan_ap value: 84.72928897427576 - type: manhattan_f1 value: 81.32084770823064 - type: manhattan_precision value: 76.24768946395564 - type: manhattan_recall value: 87.11721224920802 - type: max_accuracy value: 79.64266377910124 - type: max_ap value: 84.7851404063692 - type: max_f1 value: 81.32084770823064 - task: type: Classification dataset: name: MTEB OnlineShopping type: C-MTEB/OnlineShopping-classification config: default split: test revision: e610f2ebd179a8fda30ae534c3878750a96db120 metrics: - type: accuracy value: 94.3 - type: ap value: 92.8664032274438 - type: f1 value: 94.29311102997727 - task: type: STS dataset: name: MTEB PAWSX type: C-MTEB/PAWSX config: default split: test revision: 9c6a90e430ac22b5779fb019a23e820b11a8b5e1 metrics: - type: cos_sim_pearson value: 48.51392279882909 - type: cos_sim_spearman value: 54.06338895994974 - type: euclidean_pearson value: 52.58480559573412 - type: euclidean_spearman value: 54.06417276612201 - type: manhattan_pearson value: 52.69525121721343 - type: manhattan_spearman value: 54.048147455389675 - task: type: STS dataset: name: MTEB QBQTC type: C-MTEB/QBQTC config: default split: test revision: 790b0510dc52b1553e8c49f3d2afb48c0e5c48b7 metrics: - type: cos_sim_pearson value: 29.728387290757325 - type: cos_sim_spearman value: 31.366121633635284 - type: euclidean_pearson value: 29.14588368552961 - type: euclidean_spearman value: 31.36764411112844 - type: manhattan_pearson value: 29.63517350523121 - type: manhattan_spearman value: 31.94157020583762 - task: type: STS dataset: name: MTEB STS22 (zh) type: mteb/sts22-crosslingual-sts config: zh split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 63.64868296271406 - type: cos_sim_spearman value: 66.12800618164744 - type: euclidean_pearson value: 63.21405767340238 - type: euclidean_spearman value: 66.12786567790748 - type: manhattan_pearson value: 64.04300276525848 - type: manhattan_spearman value: 66.5066857145652 - task: type: STS dataset: name: MTEB STSB type: C-MTEB/STSB config: default split: test revision: 0cde68302b3541bb8b3c340dc0644b0b745b3dc0 metrics: - type: cos_sim_pearson value: 81.2302623912794 - type: cos_sim_spearman value: 81.16833673266562 - type: euclidean_pearson value: 79.47647843876024 - type: euclidean_spearman value: 81.16944349524972 - type: manhattan_pearson value: 79.84947238492208 - type: manhattan_spearman value: 81.64626599410026 - task: type: Reranking dataset: name: MTEB T2Reranking type: C-MTEB/T2Reranking config: default split: dev revision: 76631901a18387f85eaa53e5450019b87ad58ef9 metrics: - type: map value: 67.80129586475687 - type: mrr value: 77.77402311635554 - task: type: Retrieval dataset: name: MTEB T2Retrieval type: C-MTEB/T2Retrieval config: default split: dev revision: 8731a845f1bf500a4f111cf1070785c793d10e64 metrics: - type: map_at_1 value: 28.666999999999998 - type: map_at_10 value: 81.063 - type: map_at_100 value: 84.504 - type: map_at_1000 value: 84.552 - type: map_at_3 value: 56.897 - type: map_at_5 value: 70.073 - type: mrr_at_1 value: 92.087 - type: mrr_at_10 value: 94.132 - type: mrr_at_100 value: 94.19800000000001 - type: mrr_at_1000 value: 94.19999999999999 - type: mrr_at_3 value: 93.78999999999999 - type: mrr_at_5 value: 94.002 - type: ndcg_at_1 value: 92.087 - type: ndcg_at_10 value: 87.734 - type: ndcg_at_100 value: 90.736 - type: ndcg_at_1000 value: 91.184 - type: ndcg_at_3 value: 88.78 - type: ndcg_at_5 value: 87.676 - type: precision_at_1 value: 92.087 - type: precision_at_10 value: 43.46 - type: precision_at_100 value: 5.07 - type: precision_at_1000 value: 0.518 - type: precision_at_3 value: 77.49000000000001 - type: precision_at_5 value: 65.194 - type: recall_at_1 value: 28.666999999999998 - type: recall_at_10 value: 86.632 - type: recall_at_100 value: 96.646 - type: recall_at_1000 value: 98.917 - type: recall_at_3 value: 58.333999999999996 - type: recall_at_5 value: 72.974 - task: type: Classification dataset: name: MTEB TNews type: C-MTEB/TNews-classification config: default split: validation revision: 317f262bf1e6126357bbe89e875451e4b0938fe4 metrics: - type: accuracy value: 52.971999999999994 - type: f1 value: 50.2898280984929 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringP2P type: C-MTEB/ThuNewsClusteringP2P config: default split: test revision: 5798586b105c0434e4f0fe5e767abe619442cf93 metrics: - type: v_measure value: 86.0797948663824 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringS2S type: C-MTEB/ThuNewsClusteringS2S config: default split: test revision: 8a8b2caeda43f39e13c4bc5bea0f8a667896e10d metrics: - type: v_measure value: 85.10759092255017 - task: type: Retrieval dataset: name: MTEB VideoRetrieval type: C-MTEB/VideoRetrieval config: default split: dev revision: 58c2597a5943a2ba48f4668c3b90d796283c5639 metrics: - type: map_at_1 value: 65.60000000000001 - type: map_at_10 value: 74.773 - type: map_at_100 value: 75.128 - type: map_at_1000 value: 75.136 - type: map_at_3 value: 73.05 - type: map_at_5 value: 74.13499999999999 - type: mrr_at_1 value: 65.60000000000001 - type: mrr_at_10 value: 74.773 - type: mrr_at_100 value: 75.128 - type: mrr_at_1000 value: 75.136 - type: mrr_at_3 value: 73.05 - type: mrr_at_5 value: 74.13499999999999 - type: ndcg_at_1 value: 65.60000000000001 - type: ndcg_at_10 value: 78.84299999999999 - type: ndcg_at_100 value: 80.40899999999999 - type: ndcg_at_1000 value: 80.57 - type: ndcg_at_3 value: 75.40599999999999 - type: ndcg_at_5 value: 77.351 - type: precision_at_1 value: 65.60000000000001 - type: precision_at_10 value: 9.139999999999999 - type: precision_at_100 value: 0.984 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 27.400000000000002 - type: precision_at_5 value: 17.380000000000003 - type: recall_at_1 value: 65.60000000000001 - type: recall_at_10 value: 91.4 - type: recall_at_100 value: 98.4 - type: recall_at_1000 value: 99.6 - type: recall_at_3 value: 82.19999999999999 - type: recall_at_5 value: 86.9 - task: type: Classification dataset: name: MTEB Waimai type: C-MTEB/waimai-classification config: default split: test revision: 339287def212450dcaa9df8c22bf93e9980c7023 metrics: - type: accuracy value: 89.47 - type: ap value: 75.59561751845389 - type: f1 value: 87.95207751382563 - task: type: Clustering dataset: name: MTEB AlloProfClusteringP2P type: lyon-nlp/alloprof config: default split: test revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b metrics: - type: v_measure value: 76.05592323841036 - type: v_measure value: 64.51718058866508 - task: type: Reranking dataset: name: MTEB AlloprofReranking type: lyon-nlp/mteb-fr-reranking-alloprof-s2p config: default split: test revision: 666fdacebe0291776e86f29345663dfaf80a0db9 metrics: - type: map value: 73.08278490943373 - type: mrr value: 74.66561454570449 - task: type: Retrieval dataset: name: MTEB AlloprofRetrieval type: lyon-nlp/alloprof config: default split: test revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b metrics: - type: map_at_1 value: 38.912 - type: map_at_10 value: 52.437999999999995 - type: map_at_100 value: 53.38 - type: map_at_1000 value: 53.427 - type: map_at_3 value: 48.879 - type: map_at_5 value: 50.934000000000005 - type: mrr_at_1 value: 44.085 - type: mrr_at_10 value: 55.337 - type: mrr_at_100 value: 56.016999999999996 - type: mrr_at_1000 value: 56.043 - type: mrr_at_3 value: 52.55499999999999 - type: mrr_at_5 value: 54.20399999999999 - type: ndcg_at_1 value: 44.085 - type: ndcg_at_10 value: 58.876 - type: ndcg_at_100 value: 62.714000000000006 - type: ndcg_at_1000 value: 63.721000000000004 - type: ndcg_at_3 value: 52.444 - type: ndcg_at_5 value: 55.692 - type: precision_at_1 value: 44.085 - type: precision_at_10 value: 9.21 - type: precision_at_100 value: 1.164 - type: precision_at_1000 value: 0.128 - type: precision_at_3 value: 23.043 - type: precision_at_5 value: 15.898000000000001 - type: recall_at_1 value: 38.912 - type: recall_at_10 value: 75.577 - type: recall_at_100 value: 92.038 - type: recall_at_1000 value: 99.325 - type: recall_at_3 value: 58.592 - type: recall_at_5 value: 66.235 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (fr) type: mteb/amazon_reviews_multi config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 55.532000000000004 - type: f1 value: 52.5783943471605 - task: type: Retrieval dataset: name: MTEB BSARDRetrieval type: maastrichtlawtech/bsard config: default split: test revision: 5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59 metrics: - type: map_at_1 value: 8.108 - type: map_at_10 value: 14.710999999999999 - type: map_at_100 value: 15.891 - type: map_at_1000 value: 15.983 - type: map_at_3 value: 12.237 - type: map_at_5 value: 13.679 - type: mrr_at_1 value: 8.108 - type: mrr_at_10 value: 14.710999999999999 - type: mrr_at_100 value: 15.891 - type: mrr_at_1000 value: 15.983 - type: mrr_at_3 value: 12.237 - type: mrr_at_5 value: 13.679 - type: ndcg_at_1 value: 8.108 - type: ndcg_at_10 value: 18.796 - type: ndcg_at_100 value: 25.098 - type: ndcg_at_1000 value: 27.951999999999998 - type: ndcg_at_3 value: 13.712 - type: ndcg_at_5 value: 16.309 - type: precision_at_1 value: 8.108 - type: precision_at_10 value: 3.198 - type: precision_at_100 value: 0.626 - type: precision_at_1000 value: 0.086 - type: precision_at_3 value: 6.006 - type: precision_at_5 value: 4.865 - type: recall_at_1 value: 8.108 - type: recall_at_10 value: 31.982 - type: recall_at_100 value: 62.613 - type: recall_at_1000 value: 86.036 - type: recall_at_3 value: 18.018 - type: recall_at_5 value: 24.324 - task: type: Clustering dataset: name: MTEB HALClusteringS2S type: lyon-nlp/clustering-hal-s2s config: default split: test revision: e06ebbbb123f8144bef1a5d18796f3dec9ae2915 metrics: - type: v_measure value: 30.833269778867116 - task: type: Clustering dataset: name: MTEB MLSUMClusteringP2P type: mlsum config: default split: test revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7 metrics: - type: v_measure value: 50.0281928004713 - type: v_measure value: 43.699961510636534 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (fr) type: mteb/mtop_domain config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 96.68963357344191 - type: f1 value: 96.45175170820961 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (fr) type: mteb/mtop_intent config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 87.46946445349202 - type: f1 value: 65.79860440988624 - task: type: Classification dataset: name: MTEB MasakhaNEWSClassification (fra) type: masakhane/masakhanews config: fra split: test revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 metrics: - type: accuracy value: 82.60663507109005 - type: f1 value: 77.20462646604777 - task: type: Clustering dataset: name: MTEB MasakhaNEWSClusteringP2P (fra) type: masakhane/masakhanews config: fra split: test revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 metrics: - type: v_measure value: 60.19311264967803 - type: v_measure value: 63.6235764409785 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fr) type: mteb/amazon_massive_intent config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 81.65097511768661 - type: f1 value: 78.77796091490924 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fr) type: mteb/amazon_massive_scenario config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 86.64425016812373 - type: f1 value: 85.4912728670017 - task: type: Retrieval dataset: name: MTEB MintakaRetrieval (fr) type: jinaai/mintakaqa config: fr split: test revision: efa78cc2f74bbcd21eff2261f9e13aebe40b814e metrics: - type: map_at_1 value: 35.913000000000004 - type: map_at_10 value: 48.147 - type: map_at_100 value: 48.91 - type: map_at_1000 value: 48.949 - type: map_at_3 value: 45.269999999999996 - type: map_at_5 value: 47.115 - type: mrr_at_1 value: 35.913000000000004 - type: mrr_at_10 value: 48.147 - type: mrr_at_100 value: 48.91 - type: mrr_at_1000 value: 48.949 - type: mrr_at_3 value: 45.269999999999996 - type: mrr_at_5 value: 47.115 - type: ndcg_at_1 value: 35.913000000000004 - type: ndcg_at_10 value: 54.03 - type: ndcg_at_100 value: 57.839 - type: ndcg_at_1000 value: 58.925000000000004 - type: ndcg_at_3 value: 48.217999999999996 - type: ndcg_at_5 value: 51.56699999999999 - type: precision_at_1 value: 35.913000000000004 - type: precision_at_10 value: 7.244000000000001 - type: precision_at_100 value: 0.9039999999999999 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 18.905 - type: precision_at_5 value: 12.981000000000002 - type: recall_at_1 value: 35.913000000000004 - type: recall_at_10 value: 72.441 - type: recall_at_100 value: 90.41799999999999 - type: recall_at_1000 value: 99.099 - type: recall_at_3 value: 56.716 - type: recall_at_5 value: 64.90599999999999 - task: type: PairClassification dataset: name: MTEB OpusparcusPC (fr) type: GEM/opusparcus config: fr split: test revision: 9e9b1f8ef51616073f47f306f7f47dd91663f86a metrics: - type: cos_sim_accuracy value: 99.90069513406156 - type: cos_sim_ap value: 100.0 - type: cos_sim_f1 value: 99.95032290114257 - type: cos_sim_precision value: 100.0 - type: cos_sim_recall value: 99.90069513406156 - type: dot_accuracy value: 99.90069513406156 - type: dot_ap value: 100.0 - type: dot_f1 value: 99.95032290114257 - type: dot_precision value: 100.0 - type: dot_recall value: 99.90069513406156 - type: euclidean_accuracy value: 99.90069513406156 - type: euclidean_ap value: 100.0 - type: euclidean_f1 value: 99.95032290114257 - type: euclidean_precision value: 100.0 - type: euclidean_recall value: 99.90069513406156 - type: manhattan_accuracy value: 99.90069513406156 - type: manhattan_ap value: 100.0 - type: manhattan_f1 value: 99.95032290114257 - type: manhattan_precision value: 100.0 - type: manhattan_recall value: 99.90069513406156 - type: max_accuracy value: 99.90069513406156 - type: max_ap value: 100.0 - type: max_f1 value: 99.95032290114257 - task: type: PairClassification dataset: name: MTEB PawsX (fr) type: paws-x config: fr split: test revision: 8a04d940a42cd40658986fdd8e3da561533a3646 metrics: - type: cos_sim_accuracy value: 75.25 - type: cos_sim_ap value: 80.86376001270014 - type: cos_sim_f1 value: 73.65945437441204 - type: cos_sim_precision value: 64.02289452166802 - type: cos_sim_recall value: 86.71096345514951 - type: dot_accuracy value: 75.25 - type: dot_ap value: 80.93686107633002 - type: dot_f1 value: 73.65945437441204 - type: dot_precision value: 64.02289452166802 - type: dot_recall value: 86.71096345514951 - type: euclidean_accuracy value: 75.25 - type: euclidean_ap value: 80.86379136218862 - type: euclidean_f1 value: 73.65945437441204 - type: euclidean_precision value: 64.02289452166802 - type: euclidean_recall value: 86.71096345514951 - type: manhattan_accuracy value: 75.3 - type: manhattan_ap value: 80.87826606097734 - type: manhattan_f1 value: 73.68421052631581 - type: manhattan_precision value: 64.0 - type: manhattan_recall value: 86.82170542635659 - type: max_accuracy value: 75.3 - type: max_ap value: 80.93686107633002 - type: max_f1 value: 73.68421052631581 - task: type: STS dataset: name: MTEB SICKFr type: Lajavaness/SICK-fr config: default split: test revision: e077ab4cf4774a1e36d86d593b150422fafd8e8a metrics: - type: cos_sim_pearson value: 81.42349425981143 - type: cos_sim_spearman value: 78.90454327031226 - type: euclidean_pearson value: 78.39086497435166 - type: euclidean_spearman value: 78.9046133980509 - type: manhattan_pearson value: 78.63743094286502 - type: manhattan_spearman value: 79.12136348449269 - task: type: STS dataset: name: MTEB STS22 (fr) type: mteb/sts22-crosslingual-sts config: fr split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 81.452697919749 - type: cos_sim_spearman value: 82.58116836039301 - type: euclidean_pearson value: 81.04038478932786 - type: euclidean_spearman value: 82.58116836039301 - type: manhattan_pearson value: 81.37075396187771 - type: manhattan_spearman value: 82.73678231355368 - task: type: STS dataset: name: MTEB STSBenchmarkMultilingualSTS (fr) type: stsb_multi_mt config: fr split: test revision: 93d57ef91790589e3ce9c365164337a8a78b7632 metrics: - type: cos_sim_pearson value: 85.7419764013806 - type: cos_sim_spearman value: 85.46085808849622 - type: euclidean_pearson value: 83.70449639870063 - type: euclidean_spearman value: 85.46159013076233 - type: manhattan_pearson value: 83.95259510313929 - type: manhattan_spearman value: 85.8029724659458 - task: type: Summarization dataset: name: MTEB SummEvalFr type: lyon-nlp/summarization-summeval-fr-p2p config: default split: test revision: b385812de6a9577b6f4d0f88c6a6e35395a94054 metrics: - type: cos_sim_pearson value: 32.61063271753325 - type: cos_sim_spearman value: 31.454589417353603 - type: dot_pearson value: 32.6106288643431 - type: dot_spearman value: 31.454589417353603 - task: type: Reranking dataset: name: MTEB SyntecReranking type: lyon-nlp/mteb-fr-reranking-syntec-s2p config: default split: test revision: b205c5084a0934ce8af14338bf03feb19499c84d metrics: - type: map value: 84.31666666666666 - type: mrr value: 84.31666666666666 - task: type: Retrieval dataset: name: MTEB SyntecRetrieval type: lyon-nlp/mteb-fr-retrieval-syntec-s2p config: default split: test revision: 77f7e271bf4a92b24fce5119f3486b583ca016ff metrics: - type: map_at_1 value: 63.0 - type: map_at_10 value: 73.471 - type: map_at_100 value: 73.87 - type: map_at_1000 value: 73.87 - type: map_at_3 value: 70.5 - type: map_at_5 value: 73.05 - type: mrr_at_1 value: 63.0 - type: mrr_at_10 value: 73.471 - type: mrr_at_100 value: 73.87 - type: mrr_at_1000 value: 73.87 - type: mrr_at_3 value: 70.5 - type: mrr_at_5 value: 73.05 - type: ndcg_at_1 value: 63.0 - type: ndcg_at_10 value: 78.255 - type: ndcg_at_100 value: 79.88 - type: ndcg_at_1000 value: 79.88 - type: ndcg_at_3 value: 72.702 - type: ndcg_at_5 value: 77.264 - type: precision_at_1 value: 63.0 - type: precision_at_10 value: 9.3 - type: precision_at_100 value: 1.0 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 26.333000000000002 - type: precision_at_5 value: 18.0 - type: recall_at_1 value: 63.0 - type: recall_at_10 value: 93.0 - type: recall_at_100 value: 100.0 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 79.0 - type: recall_at_5 value: 90.0 - task: type: Retrieval dataset: name: MTEB XPQARetrieval (fr) type: jinaai/xpqa config: fr split: test revision: c99d599f0a6ab9b85b065da6f9d94f9cf731679f metrics: - type: map_at_1 value: 40.338 - type: map_at_10 value: 61.927 - type: map_at_100 value: 63.361999999999995 - type: map_at_1000 value: 63.405 - type: map_at_3 value: 55.479 - type: map_at_5 value: 59.732 - type: mrr_at_1 value: 63.551 - type: mrr_at_10 value: 71.006 - type: mrr_at_100 value: 71.501 - type: mrr_at_1000 value: 71.509 - type: mrr_at_3 value: 69.07 - type: mrr_at_5 value: 70.165 - type: ndcg_at_1 value: 63.551 - type: ndcg_at_10 value: 68.297 - type: ndcg_at_100 value: 73.13199999999999 - type: ndcg_at_1000 value: 73.751 - type: ndcg_at_3 value: 62.999 - type: ndcg_at_5 value: 64.89 - type: precision_at_1 value: 63.551 - type: precision_at_10 value: 15.661 - type: precision_at_100 value: 1.9789999999999999 - type: precision_at_1000 value: 0.207 - type: precision_at_3 value: 38.273 - type: precision_at_5 value: 27.61 - type: recall_at_1 value: 40.338 - type: recall_at_10 value: 77.267 - type: recall_at_100 value: 95.892 - type: recall_at_1000 value: 99.75500000000001 - type: recall_at_3 value: 60.36 - type: recall_at_5 value: 68.825 - task: type: Clustering dataset: name: MTEB 8TagsClustering type: PL-MTEB/8tags-clustering config: default split: test revision: None metrics: - type: v_measure value: 51.36126303874126 - task: type: Classification dataset: name: MTEB AllegroReviews type: PL-MTEB/allegro-reviews config: default split: test revision: None metrics: - type: accuracy value: 67.13717693836979 - type: f1 value: 57.27609848003782 - task: type: Retrieval dataset: name: MTEB ArguAna-PL type: clarin-knext/arguana-pl config: default split: test revision: 63fc86750af76253e8c760fc9e534bbf24d260a2 metrics: - type: map_at_1 value: 35.276999999999994 - type: map_at_10 value: 51.086 - type: map_at_100 value: 51.788000000000004 - type: map_at_1000 value: 51.791 - type: map_at_3 value: 46.147 - type: map_at_5 value: 49.078 - type: mrr_at_1 value: 35.917 - type: mrr_at_10 value: 51.315999999999995 - type: mrr_at_100 value: 52.018 - type: mrr_at_1000 value: 52.022 - type: mrr_at_3 value: 46.349000000000004 - type: mrr_at_5 value: 49.297000000000004 - type: ndcg_at_1 value: 35.276999999999994 - type: ndcg_at_10 value: 59.870999999999995 - type: ndcg_at_100 value: 62.590999999999994 - type: ndcg_at_1000 value: 62.661 - type: ndcg_at_3 value: 49.745 - type: ndcg_at_5 value: 55.067 - type: precision_at_1 value: 35.276999999999994 - type: precision_at_10 value: 8.791 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 20.057 - type: precision_at_5 value: 14.637 - type: recall_at_1 value: 35.276999999999994 - type: recall_at_10 value: 87.909 - type: recall_at_100 value: 99.14699999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 60.171 - type: recall_at_5 value: 73.18599999999999 - task: type: Classification dataset: name: MTEB CBD type: PL-MTEB/cbd config: default split: test revision: None metrics: - type: accuracy value: 78.03000000000002 - type: ap value: 29.12548553897622 - type: f1 value: 66.54857118886073 - task: type: PairClassification dataset: name: MTEB CDSC-E type: PL-MTEB/cdsce-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 89.0 - type: cos_sim_ap value: 76.75437826834582 - type: cos_sim_f1 value: 66.4850136239782 - type: cos_sim_precision value: 68.92655367231639 - type: cos_sim_recall value: 64.21052631578948 - type: dot_accuracy value: 89.0 - type: dot_ap value: 76.75437826834582 - type: dot_f1 value: 66.4850136239782 - type: dot_precision value: 68.92655367231639 - type: dot_recall value: 64.21052631578948 - type: euclidean_accuracy value: 89.0 - type: euclidean_ap value: 76.75437826834582 - type: euclidean_f1 value: 66.4850136239782 - type: euclidean_precision value: 68.92655367231639 - type: euclidean_recall value: 64.21052631578948 - type: manhattan_accuracy value: 89.0 - type: manhattan_ap value: 76.66074220647083 - type: manhattan_f1 value: 66.47058823529412 - type: manhattan_precision value: 75.33333333333333 - type: manhattan_recall value: 59.473684210526315 - type: max_accuracy value: 89.0 - type: max_ap value: 76.75437826834582 - type: max_f1 value: 66.4850136239782 - task: type: STS dataset: name: MTEB CDSC-R type: PL-MTEB/cdscr-sts config: default split: test revision: None metrics: - type: cos_sim_pearson value: 93.12903172428328 - type: cos_sim_spearman value: 92.66381487060741 - type: euclidean_pearson value: 90.37278396708922 - type: euclidean_spearman value: 92.66381487060741 - type: manhattan_pearson value: 90.32503296540962 - type: manhattan_spearman value: 92.6902938354313 - task: type: Retrieval dataset: name: MTEB DBPedia-PL type: clarin-knext/dbpedia-pl config: default split: test revision: 76afe41d9af165cc40999fcaa92312b8b012064a metrics: - type: map_at_1 value: 8.83 - type: map_at_10 value: 18.326 - type: map_at_100 value: 26.496 - type: map_at_1000 value: 28.455000000000002 - type: map_at_3 value: 12.933 - type: map_at_5 value: 15.168000000000001 - type: mrr_at_1 value: 66.0 - type: mrr_at_10 value: 72.76700000000001 - type: mrr_at_100 value: 73.203 - type: mrr_at_1000 value: 73.219 - type: mrr_at_3 value: 71.458 - type: mrr_at_5 value: 72.246 - type: ndcg_at_1 value: 55.375 - type: ndcg_at_10 value: 41.3 - type: ndcg_at_100 value: 45.891 - type: ndcg_at_1000 value: 52.905 - type: ndcg_at_3 value: 46.472 - type: ndcg_at_5 value: 43.734 - type: precision_at_1 value: 66.0 - type: precision_at_10 value: 33.074999999999996 - type: precision_at_100 value: 11.094999999999999 - type: precision_at_1000 value: 2.374 - type: precision_at_3 value: 48.583 - type: precision_at_5 value: 42.0 - type: recall_at_1 value: 8.83 - type: recall_at_10 value: 22.587 - type: recall_at_100 value: 50.61600000000001 - type: recall_at_1000 value: 73.559 - type: recall_at_3 value: 13.688 - type: recall_at_5 value: 16.855 - task: type: Retrieval dataset: name: MTEB FiQA-PL type: clarin-knext/fiqa-pl config: default split: test revision: 2e535829717f8bf9dc829b7f911cc5bbd4e6608e metrics: - type: map_at_1 value: 20.587 - type: map_at_10 value: 33.095 - type: map_at_100 value: 35.24 - type: map_at_1000 value: 35.429 - type: map_at_3 value: 28.626 - type: map_at_5 value: 31.136999999999997 - type: mrr_at_1 value: 40.586 - type: mrr_at_10 value: 49.033 - type: mrr_at_100 value: 49.952999999999996 - type: mrr_at_1000 value: 49.992 - type: mrr_at_3 value: 46.553 - type: mrr_at_5 value: 48.035 - type: ndcg_at_1 value: 40.586 - type: ndcg_at_10 value: 41.046 - type: ndcg_at_100 value: 48.586 - type: ndcg_at_1000 value: 51.634 - type: ndcg_at_3 value: 36.773 - type: ndcg_at_5 value: 38.389 - type: precision_at_1 value: 40.586 - type: precision_at_10 value: 11.466 - type: precision_at_100 value: 1.909 - type: precision_at_1000 value: 0.245 - type: precision_at_3 value: 24.434 - type: precision_at_5 value: 18.426000000000002 - type: recall_at_1 value: 20.587 - type: recall_at_10 value: 47.986000000000004 - type: recall_at_100 value: 75.761 - type: recall_at_1000 value: 94.065 - type: recall_at_3 value: 33.339 - type: recall_at_5 value: 39.765 - task: type: Retrieval dataset: name: MTEB HotpotQA-PL type: clarin-knext/hotpotqa-pl config: default split: test revision: a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907 metrics: - type: map_at_1 value: 40.878 - type: map_at_10 value: 58.775999999999996 - type: map_at_100 value: 59.632 - type: map_at_1000 value: 59.707 - type: map_at_3 value: 56.074 - type: map_at_5 value: 57.629 - type: mrr_at_1 value: 81.756 - type: mrr_at_10 value: 86.117 - type: mrr_at_100 value: 86.299 - type: mrr_at_1000 value: 86.30600000000001 - type: mrr_at_3 value: 85.345 - type: mrr_at_5 value: 85.832 - type: ndcg_at_1 value: 81.756 - type: ndcg_at_10 value: 67.608 - type: ndcg_at_100 value: 70.575 - type: ndcg_at_1000 value: 71.99600000000001 - type: ndcg_at_3 value: 63.723 - type: ndcg_at_5 value: 65.70700000000001 - type: precision_at_1 value: 81.756 - type: precision_at_10 value: 13.619 - type: precision_at_100 value: 1.5939999999999999 - type: precision_at_1000 value: 0.178 - type: precision_at_3 value: 39.604 - type: precision_at_5 value: 25.332 - type: recall_at_1 value: 40.878 - type: recall_at_10 value: 68.096 - type: recall_at_100 value: 79.696 - type: recall_at_1000 value: 89.082 - type: recall_at_3 value: 59.406000000000006 - type: recall_at_5 value: 63.329 - task: type: Retrieval dataset: name: MTEB MSMARCO-PL type: clarin-knext/msmarco-pl config: default split: test revision: 8634c07806d5cce3a6138e260e59b81760a0a640 metrics: - type: map_at_1 value: 2.1839999999999997 - type: map_at_10 value: 11.346 - type: map_at_100 value: 30.325000000000003 - type: map_at_1000 value: 37.806 - type: map_at_3 value: 4.842 - type: map_at_5 value: 6.891 - type: mrr_at_1 value: 86.047 - type: mrr_at_10 value: 89.14699999999999 - type: mrr_at_100 value: 89.46600000000001 - type: mrr_at_1000 value: 89.46600000000001 - type: mrr_at_3 value: 89.14699999999999 - type: mrr_at_5 value: 89.14699999999999 - type: ndcg_at_1 value: 67.829 - type: ndcg_at_10 value: 62.222 - type: ndcg_at_100 value: 55.337 - type: ndcg_at_1000 value: 64.076 - type: ndcg_at_3 value: 68.12700000000001 - type: ndcg_at_5 value: 64.987 - type: precision_at_1 value: 86.047 - type: precision_at_10 value: 69.535 - type: precision_at_100 value: 32.93 - type: precision_at_1000 value: 6.6049999999999995 - type: precision_at_3 value: 79.845 - type: precision_at_5 value: 75.349 - type: recall_at_1 value: 2.1839999999999997 - type: recall_at_10 value: 12.866 - type: recall_at_100 value: 43.505 - type: recall_at_1000 value: 72.366 - type: recall_at_3 value: 4.947 - type: recall_at_5 value: 7.192 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pl) type: mteb/amazon_massive_intent config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 80.75319435104238 - type: f1 value: 77.58961444860606 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pl) type: mteb/amazon_massive_scenario config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 85.54472091459313 - type: f1 value: 84.29498563572106 - task: type: Retrieval dataset: name: MTEB NFCorpus-PL type: clarin-knext/nfcorpus-pl config: default split: test revision: 9a6f9567fda928260afed2de480d79c98bf0bec0 metrics: - type: map_at_1 value: 4.367 - type: map_at_10 value: 10.38 - type: map_at_100 value: 13.516 - type: map_at_1000 value: 14.982000000000001 - type: map_at_3 value: 7.367 - type: map_at_5 value: 8.59 - type: mrr_at_1 value: 41.486000000000004 - type: mrr_at_10 value: 48.886 - type: mrr_at_100 value: 49.657000000000004 - type: mrr_at_1000 value: 49.713 - type: mrr_at_3 value: 46.904 - type: mrr_at_5 value: 48.065000000000005 - type: ndcg_at_1 value: 40.402 - type: ndcg_at_10 value: 30.885 - type: ndcg_at_100 value: 28.393 - type: ndcg_at_1000 value: 37.428 - type: ndcg_at_3 value: 35.394999999999996 - type: ndcg_at_5 value: 33.391999999999996 - type: precision_at_1 value: 41.486000000000004 - type: precision_at_10 value: 23.437 - type: precision_at_100 value: 7.638 - type: precision_at_1000 value: 2.0389999999999997 - type: precision_at_3 value: 32.817 - type: precision_at_5 value: 28.915999999999997 - type: recall_at_1 value: 4.367 - type: recall_at_10 value: 14.655000000000001 - type: recall_at_100 value: 29.665999999999997 - type: recall_at_1000 value: 62.073 - type: recall_at_3 value: 8.51 - type: recall_at_5 value: 10.689 - task: type: Retrieval dataset: name: MTEB NQ-PL type: clarin-knext/nq-pl config: default split: test revision: f171245712cf85dd4700b06bef18001578d0ca8d metrics: - type: map_at_1 value: 28.616000000000003 - type: map_at_10 value: 41.626000000000005 - type: map_at_100 value: 42.689 - type: map_at_1000 value: 42.733 - type: map_at_3 value: 37.729 - type: map_at_5 value: 39.879999999999995 - type: mrr_at_1 value: 32.068000000000005 - type: mrr_at_10 value: 44.029 - type: mrr_at_100 value: 44.87 - type: mrr_at_1000 value: 44.901 - type: mrr_at_3 value: 40.687 - type: mrr_at_5 value: 42.625 - type: ndcg_at_1 value: 32.068000000000005 - type: ndcg_at_10 value: 48.449999999999996 - type: ndcg_at_100 value: 53.13 - type: ndcg_at_1000 value: 54.186 - type: ndcg_at_3 value: 40.983999999999995 - type: ndcg_at_5 value: 44.628 - type: precision_at_1 value: 32.068000000000005 - type: precision_at_10 value: 7.9750000000000005 - type: precision_at_100 value: 1.061 - type: precision_at_1000 value: 0.116 - type: precision_at_3 value: 18.404999999999998 - type: precision_at_5 value: 13.111 - type: recall_at_1 value: 28.616000000000003 - type: recall_at_10 value: 66.956 - type: recall_at_100 value: 87.657 - type: recall_at_1000 value: 95.548 - type: recall_at_3 value: 47.453 - type: recall_at_5 value: 55.87800000000001 - task: type: Classification dataset: name: MTEB PAC type: laugustyniak/abusive-clauses-pl config: default split: test revision: None metrics: - type: accuracy value: 69.04141326382856 - type: ap value: 77.47589122111044 - type: f1 value: 66.6332277374775 - task: type: PairClassification dataset: name: MTEB PPC type: PL-MTEB/ppc-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 86.4 - type: cos_sim_ap value: 94.1044939667201 - type: cos_sim_f1 value: 88.78048780487805 - type: cos_sim_precision value: 87.22044728434504 - type: cos_sim_recall value: 90.39735099337747 - type: dot_accuracy value: 86.4 - type: dot_ap value: 94.1044939667201 - type: dot_f1 value: 88.78048780487805 - type: dot_precision value: 87.22044728434504 - type: dot_recall value: 90.39735099337747 - type: euclidean_accuracy value: 86.4 - type: euclidean_ap value: 94.1044939667201 - type: euclidean_f1 value: 88.78048780487805 - type: euclidean_precision value: 87.22044728434504 - type: euclidean_recall value: 90.39735099337747 - type: manhattan_accuracy value: 86.4 - type: manhattan_ap value: 94.11438365697387 - type: manhattan_f1 value: 88.77968877968877 - type: manhattan_precision value: 87.84440842787681 - type: manhattan_recall value: 89.73509933774835 - type: max_accuracy value: 86.4 - type: max_ap value: 94.11438365697387 - type: max_f1 value: 88.78048780487805 - task: type: PairClassification dataset: name: MTEB PSC type: PL-MTEB/psc-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 97.86641929499072 - type: cos_sim_ap value: 99.36904211868182 - type: cos_sim_f1 value: 96.56203288490283 - type: cos_sim_precision value: 94.72140762463343 - type: cos_sim_recall value: 98.47560975609755 - type: dot_accuracy value: 97.86641929499072 - type: dot_ap value: 99.36904211868183 - type: dot_f1 value: 96.56203288490283 - type: dot_precision value: 94.72140762463343 - type: dot_recall value: 98.47560975609755 - type: euclidean_accuracy value: 97.86641929499072 - type: euclidean_ap value: 99.36904211868183 - type: euclidean_f1 value: 96.56203288490283 - type: euclidean_precision value: 94.72140762463343 - type: euclidean_recall value: 98.47560975609755 - type: manhattan_accuracy value: 98.14471243042672 - type: manhattan_ap value: 99.43359540492416 - type: manhattan_f1 value: 96.98795180722892 - type: manhattan_precision value: 95.83333333333334 - type: manhattan_recall value: 98.17073170731707 - type: max_accuracy value: 98.14471243042672 - type: max_ap value: 99.43359540492416 - type: max_f1 value: 96.98795180722892 - task: type: Classification dataset: name: MTEB PolEmo2.0-IN type: PL-MTEB/polemo2_in config: default split: test revision: None metrics: - type: accuracy value: 89.39058171745152 - type: f1 value: 86.8552093529568 - task: type: Classification dataset: name: MTEB PolEmo2.0-OUT type: PL-MTEB/polemo2_out config: default split: test revision: None metrics: - type: accuracy value: 74.97975708502024 - type: f1 value: 58.73081628832407 - task: type: Retrieval dataset: name: MTEB Quora-PL type: clarin-knext/quora-pl config: default split: test revision: 0be27e93455051e531182b85e85e425aba12e9d4 metrics: - type: map_at_1 value: 64.917 - type: map_at_10 value: 78.74600000000001 - type: map_at_100 value: 79.501 - type: map_at_1000 value: 79.524 - type: map_at_3 value: 75.549 - type: map_at_5 value: 77.495 - type: mrr_at_1 value: 74.9 - type: mrr_at_10 value: 82.112 - type: mrr_at_100 value: 82.314 - type: mrr_at_1000 value: 82.317 - type: mrr_at_3 value: 80.745 - type: mrr_at_5 value: 81.607 - type: ndcg_at_1 value: 74.83999999999999 - type: ndcg_at_10 value: 83.214 - type: ndcg_at_100 value: 84.997 - type: ndcg_at_1000 value: 85.207 - type: ndcg_at_3 value: 79.547 - type: ndcg_at_5 value: 81.46600000000001 - type: precision_at_1 value: 74.83999999999999 - type: precision_at_10 value: 12.822 - type: precision_at_100 value: 1.506 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 34.903 - type: precision_at_5 value: 23.16 - type: recall_at_1 value: 64.917 - type: recall_at_10 value: 92.27199999999999 - type: recall_at_100 value: 98.715 - type: recall_at_1000 value: 99.854 - type: recall_at_3 value: 82.04599999999999 - type: recall_at_5 value: 87.2 - task: type: Retrieval dataset: name: MTEB SCIDOCS-PL type: clarin-knext/scidocs-pl config: default split: test revision: 45452b03f05560207ef19149545f168e596c9337 metrics: - type: map_at_1 value: 3.51 - type: map_at_10 value: 9.046999999999999 - type: map_at_100 value: 10.823 - type: map_at_1000 value: 11.144 - type: map_at_3 value: 6.257 - type: map_at_5 value: 7.648000000000001 - type: mrr_at_1 value: 17.299999999999997 - type: mrr_at_10 value: 27.419 - type: mrr_at_100 value: 28.618 - type: mrr_at_1000 value: 28.685 - type: mrr_at_3 value: 23.817 - type: mrr_at_5 value: 25.927 - type: ndcg_at_1 value: 17.299999999999997 - type: ndcg_at_10 value: 16.084 - type: ndcg_at_100 value: 23.729 - type: ndcg_at_1000 value: 29.476999999999997 - type: ndcg_at_3 value: 14.327000000000002 - type: ndcg_at_5 value: 13.017999999999999 - type: precision_at_1 value: 17.299999999999997 - type: precision_at_10 value: 8.63 - type: precision_at_100 value: 1.981 - type: precision_at_1000 value: 0.336 - type: precision_at_3 value: 13.4 - type: precision_at_5 value: 11.700000000000001 - type: recall_at_1 value: 3.51 - type: recall_at_10 value: 17.518 - type: recall_at_100 value: 40.275 - type: recall_at_1000 value: 68.203 - type: recall_at_3 value: 8.155 - type: recall_at_5 value: 11.875 - task: type: PairClassification dataset: name: MTEB SICK-E-PL type: PL-MTEB/sicke-pl-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 86.30248675091724 - type: cos_sim_ap value: 83.6756734006714 - type: cos_sim_f1 value: 74.97367497367497 - type: cos_sim_precision value: 73.91003460207612 - type: cos_sim_recall value: 76.06837606837607 - type: dot_accuracy value: 86.30248675091724 - type: dot_ap value: 83.6756734006714 - type: dot_f1 value: 74.97367497367497 - type: dot_precision value: 73.91003460207612 - type: dot_recall value: 76.06837606837607 - type: euclidean_accuracy value: 86.30248675091724 - type: euclidean_ap value: 83.67566984333091 - type: euclidean_f1 value: 74.97367497367497 - type: euclidean_precision value: 73.91003460207612 - type: euclidean_recall value: 76.06837606837607 - type: manhattan_accuracy value: 86.28210354667753 - type: manhattan_ap value: 83.64216119130171 - type: manhattan_f1 value: 74.92152075340078 - type: manhattan_precision value: 73.4107997265892 - type: manhattan_recall value: 76.49572649572649 - type: max_accuracy value: 86.30248675091724 - type: max_ap value: 83.6756734006714 - type: max_f1 value: 74.97367497367497 - task: type: STS dataset: name: MTEB SICK-R-PL type: PL-MTEB/sickr-pl-sts config: default split: test revision: None metrics: - type: cos_sim_pearson value: 82.23295940859121 - type: cos_sim_spearman value: 78.89329160768719 - type: euclidean_pearson value: 79.56019107076818 - type: euclidean_spearman value: 78.89330209904084 - type: manhattan_pearson value: 79.76098513973719 - type: manhattan_spearman value: 79.05490162570123 - task: type: STS dataset: name: MTEB STS22 (pl) type: mteb/sts22-crosslingual-sts config: pl split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 37.732606308062486 - type: cos_sim_spearman value: 41.01645667030284 - type: euclidean_pearson value: 26.61722556367085 - type: euclidean_spearman value: 41.01645667030284 - type: manhattan_pearson value: 26.60917378970807 - type: manhattan_spearman value: 41.51335727617614 - task: type: Retrieval dataset: name: MTEB SciFact-PL type: clarin-knext/scifact-pl config: default split: test revision: 47932a35f045ef8ed01ba82bf9ff67f6e109207e metrics: - type: map_at_1 value: 54.31700000000001 - type: map_at_10 value: 65.564 - type: map_at_100 value: 66.062 - type: map_at_1000 value: 66.08699999999999 - type: map_at_3 value: 62.592999999999996 - type: map_at_5 value: 63.888 - type: mrr_at_1 value: 56.99999999999999 - type: mrr_at_10 value: 66.412 - type: mrr_at_100 value: 66.85900000000001 - type: mrr_at_1000 value: 66.88 - type: mrr_at_3 value: 64.22200000000001 - type: mrr_at_5 value: 65.206 - type: ndcg_at_1 value: 56.99999999999999 - type: ndcg_at_10 value: 70.577 - type: ndcg_at_100 value: 72.879 - type: ndcg_at_1000 value: 73.45 - type: ndcg_at_3 value: 65.5 - type: ndcg_at_5 value: 67.278 - type: precision_at_1 value: 56.99999999999999 - type: precision_at_10 value: 9.667 - type: precision_at_100 value: 1.083 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 26.0 - type: precision_at_5 value: 16.933 - type: recall_at_1 value: 54.31700000000001 - type: recall_at_10 value: 85.056 - type: recall_at_100 value: 95.667 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 71.0 - type: recall_at_5 value: 75.672 - task: type: Retrieval dataset: name: MTEB TRECCOVID-PL type: clarin-knext/trec-covid-pl config: default split: test revision: 81bcb408f33366c2a20ac54adafad1ae7e877fdd metrics: - type: map_at_1 value: 0.245 - type: map_at_10 value: 2.051 - type: map_at_100 value: 12.009 - type: map_at_1000 value: 27.448 - type: map_at_3 value: 0.721 - type: map_at_5 value: 1.13 - type: mrr_at_1 value: 88.0 - type: mrr_at_10 value: 93.0 - type: mrr_at_100 value: 93.0 - type: mrr_at_1000 value: 93.0 - type: mrr_at_3 value: 93.0 - type: mrr_at_5 value: 93.0 - type: ndcg_at_1 value: 85.0 - type: ndcg_at_10 value: 80.303 - type: ndcg_at_100 value: 61.23499999999999 - type: ndcg_at_1000 value: 52.978 - type: ndcg_at_3 value: 84.419 - type: ndcg_at_5 value: 82.976 - type: precision_at_1 value: 88.0 - type: precision_at_10 value: 83.39999999999999 - type: precision_at_100 value: 61.96 - type: precision_at_1000 value: 22.648 - type: precision_at_3 value: 89.333 - type: precision_at_5 value: 87.2 - type: recall_at_1 value: 0.245 - type: recall_at_10 value: 2.193 - type: recall_at_100 value: 14.938 - type: recall_at_1000 value: 48.563 - type: recall_at_3 value: 0.738 - type: recall_at_5 value: 1.173 --- # niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF This model was converted to GGUF format from [`Alibaba-NLP/gte-Qwen2-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo niancheng/gte-Qwen2-7B-instruct-Q4_K_M-GGUF --hf-file gte-qwen2-7b-instruct-q4_k_m.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
Dulfary/roberta-large-bne-capitel-ner_spanish
Dulfary
token-classification
[ "transformers", "tensorboard", "safetensors", "roberta", "token-classification", "generated_from_trainer", "es", "base_model:PlanTL-GOB-ES/roberta-large-bne-capitel-ner", "base_model:finetune:PlanTL-GOB-ES/roberta-large-bne-capitel-ner", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-25T05:00:28Z
2024-07-25T05:42:16+00:00
18
0
--- base_model: PlanTL-GOB-ES/roberta-large-bne-capitel-ner language: - es license: apache-2.0 metrics: - precision - recall - f1 - accuracy tags: - token-classification - generated_from_trainer widget: - text: 'Karen Lopez, resisdente de la 105 # 58- 41, se encuentra en el área de cuidados intensivos' model-index: - name: roberta-large-bne-capitel-ner_spanish results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # roberta-large-bne-capitel-ner_spanish This model is a fine-tuned version of [PlanTL-GOB-ES/roberta-large-bne-capitel-ner](https://huggingface.co/PlanTL-GOB-ES/roberta-large-bne-capitel-ner) on the MEDDOCAN dataset. It achieves the following results on the evaluation set: - Loss: 0.1915 - Precision: 0.8269 - Recall: 0.6719 - F1: 0.7414 - Accuracy: 0.9561 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results ### Framework versions - Transformers 4.42.4 - Pytorch 2.3.1+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
[ "MEDDOCAN" ]
huskyhong/noname-ai-v2_5
huskyhong
text-generation
[ "transformers", "safetensors", "qwen", "text-generation", "custom_code", "autotrain_compatible", "region:us" ]
2024-07-26T08:51:03Z
2024-08-09T14:16:29+00:00
18
0
--- {} --- [切换到中文版本](README_zh.md) [Switch to English Version](README.md) # Noname AI Projects related to Noname AI/Noname AI, involving AI programs aimed at generating Noname skill codes by inputting skill effects. [modelscope Online Experience](https://www.modelscope.cn/studios/huskyhong/nonameai) Due to limited computing power, the online experience version is only a lightweight CPU version with limited precision. If needed, please choose the GPU version or full version for inference. Fine-tuned from QWen. ## Configuration Requirements To better meet usage requirements, please try to meet the following requirements: - Computer (required) - Hard disk storage space of 20G or more (required) - If using the full non-quantized version/GPU version lazy one-click package, for computers with NVIDIA graphics cards, GPU inference is used, requiring half of the graphics memory + computer physical memory (physical memory does not include virtual memory) >= 16G - If using the full non-quantized version/CPU version lazy one-click package, CPU inference is used, requiring memory (including virtual memory) to be as close as possible to >= 32G for computers without graphics cards - If using the lightweight version/GPU version lightweight lazy one-click package, for computers with NVIDIA graphics cards, GPU inference is used, requiring half of the graphics memory + computer physical memory (physical memory does not include virtual memory) >= 4G - If using the lightweight version/CPU version lightweight lazy one-click package, CPU inference is used, requiring memory (including virtual memory) to be as close as possible to >= 12G for computers without graphics cards ## Usage ### Full Model Method 1. Install Python and the corresponding Python compiler. - Note: Python compatible versions are 3.8, 3.9, 3.10, 3.11. Please do not install versions that are too high or too low. 2. Enter the following command in the terminal to install the required environment: ```bash pip install -r requirements.txt ``` 3. Run the program using the following Python code. The model will be automatically downloaded, and the code defaults to version 2.0 full version. ```python from transformers import AutoModelForCausalLM, AutoTokenizer from transformers.generation import GenerationConfig tokenizer = AutoTokenizer.from_pretrained("huskyhong/noname-ai-v2_5", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained("huskyhong/noname-ai-v2_5", device_map="auto", trust_remote_code=True).eval() # Load the model using GPU # model = AutoModelForCausalLM.from_pretrained("huskyhong/noname-ai-v2_5", device_map="cpu", trust_remote_code=True).eval() # Load the model using CPU 5model.generation_config = GenerationConfig.from_pretrained("huskyhong/noname-ai-v2_5", trust_remote_code=True) # You can specify different generation lengths, top_p, 和 other related hyperparameters # For the first generation model, replace "huskyhong/noname-ai-v2_5" with "huskyhong/noname-ai-v1". For lightweight version v2.5 model, replace "huskyhong/noname-ai-v2_5" with "huskyhong/noname-ai-v2_5-light" prompt = "请帮我编写一个技能,技能效果如下:" + input("请输入技能效果:") response, history = model.chat(tokenizer, prompt, history = []) print(response) prompt = "请帮我编写一张卡牌,卡牌效果如下::" + input("请输入卡牌效果:") response, history = model.chat(tokenizer, prompt, history = []) print(response) ``` Alternatively, you can use Hugging Face's pipeline for inference. ```python from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM, GenerationConfig generator = pipeline( "text-generation", model="huskyhong/noname-ai-v2_5", tokenizer="huskyhong/noname-ai-v2_5", device=0, # Choose GPU device. If you want to use CPU, you can set device=-1 trust_remote_code=True ) prompt = "请帮我编写一个技能,技能效果如下:" + input("请输入技能效果:") response = generator(prompt, max_length=50, top_p=0.95) # You can adjust parameters such as generation length, top_p as needed print(response[0]['generated_text']) prompt = "请帮我编写一张卡牌,卡牌效果如下:" + input("请输入卡牌效果:") response = generator(prompt, max_length=50, top_p=0.95) # You can adjust parameters such as generation length, top_p as needed print(response[0]['generated_text']) ``` 4. If automatic downloading fails, you can manually download the model files and modify "huskyhong/noname-ai-v2" to the corresponding location in the code. Download links for the second-generation model: - [v2.5 Hugging Face address (full version)](https://huggingface.co/huskyhong/noname-ai-v2_5) - [v2.5 Hugging Face address (lightweight version)](https://huggingface.co/huskyhong/noname-ai-v2_5-light) - [Baidu Netdisk address](https://pan.baidu.com/s/1m9RfGqnuQbRYROE_UzuG-Q?pwd=6666) Baidu Netdisk extraction code: 6666 Download links for the first-generation model: - [Hugging Face address](https://huggingface.co/huskyhong/noname-ai-v1) - [Baidu Netdisk address](https://pan.baidu.com/s/1Ox471XuHF_gJbcPPnSZe7g?pwd=6666) Baidu Netdisk extraction code: 6666 Remember to choose whether to load the model using GPU or CPU, and replace `your_model_name` with your actual model path. ## Lazy One-Click Package - One-click installation, no worries. - Please choose the appropriate lazy one-click package according to your own configuration. - [Lazy One-Click Package Baidu Netdisk Download Address (Updated to v2.5)](https://pan.baidu.com/s/1zIcRZtQv5oIdu7_abie9Vw?pwd=6666) Baidu Netdisk extraction code: 6666 - [Lazy One-Click Package 123 Netdisk Download Address (Updated to v2.5)](https://www.123pan.com/s/lOcnjv-pnOG3.html) 123 Netdisk extraction code: 6666 - Please pay attention to the version time of the lazy one-click package to ensure that the version is the latest! - Lazy package related videos - [Comparison of Effects of Lazy Package v2.5](https://www.bilibili.com/video/BV1KKY4e8EaC/) ## Web Version/Server Deployment - Install Python - Install dependencies ```bash pip install -r requirements.txt ``` - Install Streamlit ```bash pip install streamlit ``` - Allow port 8501 on the server (can also be changed to others, corresponding to webdemo.py file) - Run webdemo ```bash streamlit run webdemo.py ``` ## Training Training requires installing new dependencies: ```python pip install peft deepspeed ``` Clone the project和download the v2.3 version of the model files, taking the lightweight version as an example: ```bash git lfs install git clone https://github.com/204313508/noname_llm.git git clone https://huggingface.co/huskyhong/noname-ai-v2_3-light cd noname_llm/finetune ``` Modify the parameters required for training in the finetune script, such as model and dataset locations, then enter the following command to start training: ```bash bash finetune.sh ``` Please refer to the [Fine-tuning Guide](./finetune/README.md) for detailed steps. ## Web Version/Server Example ![webdemo1](./webdemo1.png) ![webdemo2](./webdemo2.png) ## Notes - AI generation is subject to uncontrollable factors, and the generated code does not guarantee 100% effectiveness. Bugs, redundant code, or additional special characters may still occur and require manual modification. - (Important) Follow AI specifications. This AI model is for learning and communication purposes only. Please do not use it for illegal or commercial purposes. The purpose of releasing this model is to encourage better learning and communication, and all related information involved in the model is public. I bear no responsibility for malicious use of this AI model. ## Other Content If you have any related questions, please raise them in the official GitHub issue. ## Demo Images These demo images are based on version 2.3 release. ![demo](./demo.png) ## Sponsorship - Shamelessly begging for sponsorship ![sponsor](./sponsor.jpg)
[ "BEAR" ]
BookingCare/multilingual-e5-base-similarity-v1-onnx-quantized
BookingCare
sentence-similarity
[ "sentence-transformers", "onnx", "xlm-roberta", "mteb", "Sentence Transformers", "sentence-similarity", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "om", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sa", "sd", "si", "sk", "sl", "so", "sq", "sr", "su", "sv", "sw", "ta", "te", "th", "tl", "tr", "ug", "uk", "ur", "uz", "vi", "xh", "yi", "zh", "arxiv:2402.05672", "arxiv:2108.08787", "arxiv:2104.08663", "arxiv:2210.07316", "license:mit", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-08-06T09:48:26Z
2024-12-05T08:51:34+00:00
18
0
--- language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn - mr - ms - my - ne - nl - 'no' - om - or - pa - pl - ps - pt - ro - ru - sa - sd - si - sk - sl - so - sq - sr - su - sv - sw - ta - te - th - tl - tr - ug - uk - ur - uz - vi - xh - yi - zh license: mit tags: - mteb - Sentence Transformers - sentence-similarity - sentence-transformers model-index: - name: multilingual-e5-base results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 78.97014925373135 - type: ap value: 43.69351129103008 - type: f1 value: 73.38075030070492 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (de) type: mteb/amazon_counterfactual config: de split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 71.7237687366167 - type: ap value: 82.22089859962671 - type: f1 value: 69.95532758884401 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en-ext) type: mteb/amazon_counterfactual config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 79.65517241379312 - type: ap value: 28.507918657094738 - type: f1 value: 66.84516013726119 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (ja) type: mteb/amazon_counterfactual config: ja split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.32976445396146 - type: ap value: 20.720481637566014 - type: f1 value: 59.78002763416003 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 90.63775 - type: ap value: 87.22277903861716 - type: f1 value: 90.60378636386807 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.546 - type: f1 value: 44.05666638370923 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (de) type: mteb/amazon_reviews_multi config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 41.828 - type: f1 value: 41.2710255644252 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (es) type: mteb/amazon_reviews_multi config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.534 - type: f1 value: 39.820743174270326 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (fr) type: mteb/amazon_reviews_multi config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 39.684 - type: f1 value: 39.11052682815307 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (ja) type: mteb/amazon_reviews_multi config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.436 - type: f1 value: 37.07082931930871 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.226000000000006 - type: f1 value: 36.65372077739185 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 22.831000000000003 - type: map_at_10 value: 36.42 - type: map_at_100 value: 37.699 - type: map_at_1000 value: 37.724000000000004 - type: map_at_3 value: 32.207 - type: map_at_5 value: 34.312 - type: mrr_at_1 value: 23.257 - type: mrr_at_10 value: 36.574 - type: mrr_at_100 value: 37.854 - type: mrr_at_1000 value: 37.878 - type: mrr_at_3 value: 32.385000000000005 - type: mrr_at_5 value: 34.48 - type: ndcg_at_1 value: 22.831000000000003 - type: ndcg_at_10 value: 44.230000000000004 - type: ndcg_at_100 value: 49.974000000000004 - type: ndcg_at_1000 value: 50.522999999999996 - type: ndcg_at_3 value: 35.363 - type: ndcg_at_5 value: 39.164 - type: precision_at_1 value: 22.831000000000003 - type: precision_at_10 value: 6.935 - type: precision_at_100 value: 0.9520000000000001 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 14.841 - type: precision_at_5 value: 10.754 - type: recall_at_1 value: 22.831000000000003 - type: recall_at_10 value: 69.346 - type: recall_at_100 value: 95.235 - type: recall_at_1000 value: 99.36 - type: recall_at_3 value: 44.523 - type: recall_at_5 value: 53.769999999999996 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 40.27789869854063 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 35.41979463347428 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 58.22752045109304 - type: mrr value: 71.51112430198303 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 84.71147646622866 - type: cos_sim_spearman value: 85.059167046486 - type: euclidean_pearson value: 75.88421613600647 - type: euclidean_spearman value: 75.12821787150585 - type: manhattan_pearson value: 75.22005646957604 - type: manhattan_spearman value: 74.42880434453272 - task: type: BitextMining dataset: name: MTEB BUCC (de-en) type: mteb/bucc-bitext-mining config: de-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.23799582463465 - type: f1 value: 99.12665274878218 - type: precision value: 99.07098121085595 - type: recall value: 99.23799582463465 - task: type: BitextMining dataset: name: MTEB BUCC (fr-en) type: mteb/bucc-bitext-mining config: fr-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 97.88685890380806 - type: f1 value: 97.59336708489249 - type: precision value: 97.44662117543473 - type: recall value: 97.88685890380806 - task: type: BitextMining dataset: name: MTEB BUCC (ru-en) type: mteb/bucc-bitext-mining config: ru-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 97.47142362313821 - type: f1 value: 97.1989377670015 - type: precision value: 97.06384944001847 - type: recall value: 97.47142362313821 - task: type: BitextMining dataset: name: MTEB BUCC (zh-en) type: mteb/bucc-bitext-mining config: zh-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 98.4728804634018 - type: f1 value: 98.2973494821836 - type: precision value: 98.2095839915745 - type: recall value: 98.4728804634018 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 82.74025974025975 - type: f1 value: 82.67420447730439 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 35.0380848063507 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 29.45956405670166 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.122 - type: map_at_10 value: 42.03 - type: map_at_100 value: 43.364000000000004 - type: map_at_1000 value: 43.474000000000004 - type: map_at_3 value: 38.804 - type: map_at_5 value: 40.585 - type: mrr_at_1 value: 39.914 - type: mrr_at_10 value: 48.227 - type: mrr_at_100 value: 49.018 - type: mrr_at_1000 value: 49.064 - type: mrr_at_3 value: 45.994 - type: mrr_at_5 value: 47.396 - type: ndcg_at_1 value: 39.914 - type: ndcg_at_10 value: 47.825 - type: ndcg_at_100 value: 52.852 - type: ndcg_at_1000 value: 54.891 - type: ndcg_at_3 value: 43.517 - type: ndcg_at_5 value: 45.493 - type: precision_at_1 value: 39.914 - type: precision_at_10 value: 8.956 - type: precision_at_100 value: 1.388 - type: precision_at_1000 value: 0.182 - type: precision_at_3 value: 20.791999999999998 - type: precision_at_5 value: 14.821000000000002 - type: recall_at_1 value: 32.122 - type: recall_at_10 value: 58.294999999999995 - type: recall_at_100 value: 79.726 - type: recall_at_1000 value: 93.099 - type: recall_at_3 value: 45.017 - type: recall_at_5 value: 51.002 - type: map_at_1 value: 29.677999999999997 - type: map_at_10 value: 38.684000000000005 - type: map_at_100 value: 39.812999999999995 - type: map_at_1000 value: 39.945 - type: map_at_3 value: 35.831 - type: map_at_5 value: 37.446 - type: mrr_at_1 value: 37.771 - type: mrr_at_10 value: 44.936 - type: mrr_at_100 value: 45.583 - type: mrr_at_1000 value: 45.634 - type: mrr_at_3 value: 42.771 - type: mrr_at_5 value: 43.994 - type: ndcg_at_1 value: 37.771 - type: ndcg_at_10 value: 44.059 - type: ndcg_at_100 value: 48.192 - type: ndcg_at_1000 value: 50.375 - type: ndcg_at_3 value: 40.172000000000004 - type: ndcg_at_5 value: 41.899 - type: precision_at_1 value: 37.771 - type: precision_at_10 value: 8.286999999999999 - type: precision_at_100 value: 1.322 - type: precision_at_1000 value: 0.178 - type: precision_at_3 value: 19.406000000000002 - type: precision_at_5 value: 13.745 - type: recall_at_1 value: 29.677999999999997 - type: recall_at_10 value: 53.071 - type: recall_at_100 value: 70.812 - type: recall_at_1000 value: 84.841 - type: recall_at_3 value: 41.016000000000005 - type: recall_at_5 value: 46.22 - type: map_at_1 value: 42.675000000000004 - type: map_at_10 value: 53.93599999999999 - type: map_at_100 value: 54.806999999999995 - type: map_at_1000 value: 54.867 - type: map_at_3 value: 50.934000000000005 - type: map_at_5 value: 52.583 - type: mrr_at_1 value: 48.339 - type: mrr_at_10 value: 57.265 - type: mrr_at_100 value: 57.873 - type: mrr_at_1000 value: 57.906 - type: mrr_at_3 value: 55.193000000000005 - type: mrr_at_5 value: 56.303000000000004 - type: ndcg_at_1 value: 48.339 - type: ndcg_at_10 value: 59.19799999999999 - type: ndcg_at_100 value: 62.743 - type: ndcg_at_1000 value: 63.99399999999999 - type: ndcg_at_3 value: 54.367 - type: ndcg_at_5 value: 56.548 - type: precision_at_1 value: 48.339 - type: precision_at_10 value: 9.216000000000001 - type: precision_at_100 value: 1.1809999999999998 - type: precision_at_1000 value: 0.134 - type: precision_at_3 value: 23.72 - type: precision_at_5 value: 16.025 - type: recall_at_1 value: 42.675000000000004 - type: recall_at_10 value: 71.437 - type: recall_at_100 value: 86.803 - type: recall_at_1000 value: 95.581 - type: recall_at_3 value: 58.434 - type: recall_at_5 value: 63.754 - type: map_at_1 value: 23.518 - type: map_at_10 value: 30.648999999999997 - type: map_at_100 value: 31.508999999999997 - type: map_at_1000 value: 31.604 - type: map_at_3 value: 28.247 - type: map_at_5 value: 29.65 - type: mrr_at_1 value: 25.650000000000002 - type: mrr_at_10 value: 32.771 - type: mrr_at_100 value: 33.554 - type: mrr_at_1000 value: 33.629999999999995 - type: mrr_at_3 value: 30.433 - type: mrr_at_5 value: 31.812 - type: ndcg_at_1 value: 25.650000000000002 - type: ndcg_at_10 value: 34.929 - type: ndcg_at_100 value: 39.382 - type: ndcg_at_1000 value: 41.913 - type: ndcg_at_3 value: 30.292 - type: ndcg_at_5 value: 32.629999999999995 - type: precision_at_1 value: 25.650000000000002 - type: precision_at_10 value: 5.311 - type: precision_at_100 value: 0.792 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 12.58 - type: precision_at_5 value: 8.994 - type: recall_at_1 value: 23.518 - type: recall_at_10 value: 46.19 - type: recall_at_100 value: 67.123 - type: recall_at_1000 value: 86.442 - type: recall_at_3 value: 33.678000000000004 - type: recall_at_5 value: 39.244 - type: map_at_1 value: 15.891 - type: map_at_10 value: 22.464000000000002 - type: map_at_100 value: 23.483 - type: map_at_1000 value: 23.613 - type: map_at_3 value: 20.080000000000002 - type: map_at_5 value: 21.526 - type: mrr_at_1 value: 20.025000000000002 - type: mrr_at_10 value: 26.712999999999997 - type: mrr_at_100 value: 27.650000000000002 - type: mrr_at_1000 value: 27.737000000000002 - type: mrr_at_3 value: 24.274 - type: mrr_at_5 value: 25.711000000000002 - type: ndcg_at_1 value: 20.025000000000002 - type: ndcg_at_10 value: 27.028999999999996 - type: ndcg_at_100 value: 32.064 - type: ndcg_at_1000 value: 35.188 - type: ndcg_at_3 value: 22.512999999999998 - type: ndcg_at_5 value: 24.89 - type: precision_at_1 value: 20.025000000000002 - type: precision_at_10 value: 4.776 - type: precision_at_100 value: 0.8500000000000001 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 10.531 - type: precision_at_5 value: 7.811 - type: recall_at_1 value: 15.891 - type: recall_at_10 value: 37.261 - type: recall_at_100 value: 59.12 - type: recall_at_1000 value: 81.356 - type: recall_at_3 value: 24.741 - type: recall_at_5 value: 30.753999999999998 - type: map_at_1 value: 27.544 - type: map_at_10 value: 36.283 - type: map_at_100 value: 37.467 - type: map_at_1000 value: 37.574000000000005 - type: map_at_3 value: 33.528999999999996 - type: map_at_5 value: 35.028999999999996 - type: mrr_at_1 value: 34.166999999999994 - type: mrr_at_10 value: 41.866 - type: mrr_at_100 value: 42.666 - type: mrr_at_1000 value: 42.716 - type: mrr_at_3 value: 39.541 - type: mrr_at_5 value: 40.768 - type: ndcg_at_1 value: 34.166999999999994 - type: ndcg_at_10 value: 41.577 - type: ndcg_at_100 value: 46.687 - type: ndcg_at_1000 value: 48.967 - type: ndcg_at_3 value: 37.177 - type: ndcg_at_5 value: 39.097 - type: precision_at_1 value: 34.166999999999994 - type: precision_at_10 value: 7.420999999999999 - type: precision_at_100 value: 1.165 - type: precision_at_1000 value: 0.154 - type: precision_at_3 value: 17.291999999999998 - type: precision_at_5 value: 12.166 - type: recall_at_1 value: 27.544 - type: recall_at_10 value: 51.99399999999999 - type: recall_at_100 value: 73.738 - type: recall_at_1000 value: 89.33 - type: recall_at_3 value: 39.179 - type: recall_at_5 value: 44.385999999999996 - type: map_at_1 value: 26.661 - type: map_at_10 value: 35.475 - type: map_at_100 value: 36.626999999999995 - type: map_at_1000 value: 36.741 - type: map_at_3 value: 32.818000000000005 - type: map_at_5 value: 34.397 - type: mrr_at_1 value: 32.647999999999996 - type: mrr_at_10 value: 40.784 - type: mrr_at_100 value: 41.602 - type: mrr_at_1000 value: 41.661 - type: mrr_at_3 value: 38.68 - type: mrr_at_5 value: 39.838 - type: ndcg_at_1 value: 32.647999999999996 - type: ndcg_at_10 value: 40.697 - type: ndcg_at_100 value: 45.799 - type: ndcg_at_1000 value: 48.235 - type: ndcg_at_3 value: 36.516 - type: ndcg_at_5 value: 38.515 - type: precision_at_1 value: 32.647999999999996 - type: precision_at_10 value: 7.202999999999999 - type: precision_at_100 value: 1.1360000000000001 - type: precision_at_1000 value: 0.151 - type: precision_at_3 value: 17.314 - type: precision_at_5 value: 12.145999999999999 - type: recall_at_1 value: 26.661 - type: recall_at_10 value: 50.995000000000005 - type: recall_at_100 value: 73.065 - type: recall_at_1000 value: 89.781 - type: recall_at_3 value: 39.073 - type: recall_at_5 value: 44.395 - type: map_at_1 value: 25.946583333333333 - type: map_at_10 value: 33.79725 - type: map_at_100 value: 34.86408333333333 - type: map_at_1000 value: 34.9795 - type: map_at_3 value: 31.259999999999998 - type: map_at_5 value: 32.71541666666666 - type: mrr_at_1 value: 30.863749999999996 - type: mrr_at_10 value: 37.99183333333333 - type: mrr_at_100 value: 38.790499999999994 - type: mrr_at_1000 value: 38.85575000000001 - type: mrr_at_3 value: 35.82083333333333 - type: mrr_at_5 value: 37.07533333333333 - type: ndcg_at_1 value: 30.863749999999996 - type: ndcg_at_10 value: 38.52141666666667 - type: ndcg_at_100 value: 43.17966666666667 - type: ndcg_at_1000 value: 45.64608333333333 - type: ndcg_at_3 value: 34.333000000000006 - type: ndcg_at_5 value: 36.34975 - type: precision_at_1 value: 30.863749999999996 - type: precision_at_10 value: 6.598999999999999 - type: precision_at_100 value: 1.0502500000000001 - type: precision_at_1000 value: 0.14400000000000002 - type: precision_at_3 value: 15.557583333333334 - type: precision_at_5 value: 11.020000000000001 - type: recall_at_1 value: 25.946583333333333 - type: recall_at_10 value: 48.36991666666666 - type: recall_at_100 value: 69.02408333333334 - type: recall_at_1000 value: 86.43858333333331 - type: recall_at_3 value: 36.4965 - type: recall_at_5 value: 41.76258333333334 - type: map_at_1 value: 22.431 - type: map_at_10 value: 28.889 - type: map_at_100 value: 29.642000000000003 - type: map_at_1000 value: 29.742 - type: map_at_3 value: 26.998 - type: map_at_5 value: 28.172000000000004 - type: mrr_at_1 value: 25.307000000000002 - type: mrr_at_10 value: 31.763 - type: mrr_at_100 value: 32.443 - type: mrr_at_1000 value: 32.531 - type: mrr_at_3 value: 29.959000000000003 - type: mrr_at_5 value: 31.063000000000002 - type: ndcg_at_1 value: 25.307000000000002 - type: ndcg_at_10 value: 32.586999999999996 - type: ndcg_at_100 value: 36.5 - type: ndcg_at_1000 value: 39.133 - type: ndcg_at_3 value: 29.25 - type: ndcg_at_5 value: 31.023 - type: precision_at_1 value: 25.307000000000002 - type: precision_at_10 value: 4.954 - type: precision_at_100 value: 0.747 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 12.577 - type: precision_at_5 value: 8.741999999999999 - type: recall_at_1 value: 22.431 - type: recall_at_10 value: 41.134 - type: recall_at_100 value: 59.28600000000001 - type: recall_at_1000 value: 78.857 - type: recall_at_3 value: 31.926 - type: recall_at_5 value: 36.335 - type: map_at_1 value: 17.586 - type: map_at_10 value: 23.304 - type: map_at_100 value: 24.159 - type: map_at_1000 value: 24.281 - type: map_at_3 value: 21.316 - type: map_at_5 value: 22.383 - type: mrr_at_1 value: 21.645 - type: mrr_at_10 value: 27.365000000000002 - type: mrr_at_100 value: 28.108 - type: mrr_at_1000 value: 28.192 - type: mrr_at_3 value: 25.482 - type: mrr_at_5 value: 26.479999999999997 - type: ndcg_at_1 value: 21.645 - type: ndcg_at_10 value: 27.306 - type: ndcg_at_100 value: 31.496000000000002 - type: ndcg_at_1000 value: 34.53 - type: ndcg_at_3 value: 23.73 - type: ndcg_at_5 value: 25.294 - type: precision_at_1 value: 21.645 - type: precision_at_10 value: 4.797 - type: precision_at_100 value: 0.8059999999999999 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 10.850999999999999 - type: precision_at_5 value: 7.736 - type: recall_at_1 value: 17.586 - type: recall_at_10 value: 35.481 - type: recall_at_100 value: 54.534000000000006 - type: recall_at_1000 value: 76.456 - type: recall_at_3 value: 25.335 - type: recall_at_5 value: 29.473 - type: map_at_1 value: 25.095 - type: map_at_10 value: 32.374 - type: map_at_100 value: 33.537 - type: map_at_1000 value: 33.634 - type: map_at_3 value: 30.089 - type: map_at_5 value: 31.433 - type: mrr_at_1 value: 29.198 - type: mrr_at_10 value: 36.01 - type: mrr_at_100 value: 37.022 - type: mrr_at_1000 value: 37.083 - type: mrr_at_3 value: 33.94 - type: mrr_at_5 value: 35.148 - type: ndcg_at_1 value: 29.198 - type: ndcg_at_10 value: 36.729 - type: ndcg_at_100 value: 42.114000000000004 - type: ndcg_at_1000 value: 44.592 - type: ndcg_at_3 value: 32.644 - type: ndcg_at_5 value: 34.652 - type: precision_at_1 value: 29.198 - type: precision_at_10 value: 5.970000000000001 - type: precision_at_100 value: 0.967 - type: precision_at_1000 value: 0.129 - type: precision_at_3 value: 14.396999999999998 - type: precision_at_5 value: 10.093 - type: recall_at_1 value: 25.095 - type: recall_at_10 value: 46.392 - type: recall_at_100 value: 69.706 - type: recall_at_1000 value: 87.738 - type: recall_at_3 value: 35.303000000000004 - type: recall_at_5 value: 40.441 - type: map_at_1 value: 26.857999999999997 - type: map_at_10 value: 34.066 - type: map_at_100 value: 35.671 - type: map_at_1000 value: 35.881 - type: map_at_3 value: 31.304 - type: map_at_5 value: 32.885 - type: mrr_at_1 value: 32.411 - type: mrr_at_10 value: 38.987 - type: mrr_at_100 value: 39.894 - type: mrr_at_1000 value: 39.959 - type: mrr_at_3 value: 36.626999999999995 - type: mrr_at_5 value: 38.011 - type: ndcg_at_1 value: 32.411 - type: ndcg_at_10 value: 39.208 - type: ndcg_at_100 value: 44.626 - type: ndcg_at_1000 value: 47.43 - type: ndcg_at_3 value: 35.091 - type: ndcg_at_5 value: 37.119 - type: precision_at_1 value: 32.411 - type: precision_at_10 value: 7.51 - type: precision_at_100 value: 1.486 - type: precision_at_1000 value: 0.234 - type: precision_at_3 value: 16.14 - type: precision_at_5 value: 11.976 - type: recall_at_1 value: 26.857999999999997 - type: recall_at_10 value: 47.407 - type: recall_at_100 value: 72.236 - type: recall_at_1000 value: 90.77 - type: recall_at_3 value: 35.125 - type: recall_at_5 value: 40.522999999999996 - type: map_at_1 value: 21.3 - type: map_at_10 value: 27.412999999999997 - type: map_at_100 value: 28.29 - type: map_at_1000 value: 28.398 - type: map_at_3 value: 25.169999999999998 - type: map_at_5 value: 26.496 - type: mrr_at_1 value: 23.29 - type: mrr_at_10 value: 29.215000000000003 - type: mrr_at_100 value: 30.073 - type: mrr_at_1000 value: 30.156 - type: mrr_at_3 value: 26.956000000000003 - type: mrr_at_5 value: 28.38 - type: ndcg_at_1 value: 23.29 - type: ndcg_at_10 value: 31.113000000000003 - type: ndcg_at_100 value: 35.701 - type: ndcg_at_1000 value: 38.505 - type: ndcg_at_3 value: 26.727 - type: ndcg_at_5 value: 29.037000000000003 - type: precision_at_1 value: 23.29 - type: precision_at_10 value: 4.787 - type: precision_at_100 value: 0.763 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 11.091 - type: precision_at_5 value: 7.985 - type: recall_at_1 value: 21.3 - type: recall_at_10 value: 40.782000000000004 - type: recall_at_100 value: 62.13999999999999 - type: recall_at_1000 value: 83.012 - type: recall_at_3 value: 29.131 - type: recall_at_5 value: 34.624 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 9.631 - type: map_at_10 value: 16.634999999999998 - type: map_at_100 value: 18.23 - type: map_at_1000 value: 18.419 - type: map_at_3 value: 13.66 - type: map_at_5 value: 15.173 - type: mrr_at_1 value: 21.368000000000002 - type: mrr_at_10 value: 31.56 - type: mrr_at_100 value: 32.58 - type: mrr_at_1000 value: 32.633 - type: mrr_at_3 value: 28.241 - type: mrr_at_5 value: 30.225 - type: ndcg_at_1 value: 21.368000000000002 - type: ndcg_at_10 value: 23.855999999999998 - type: ndcg_at_100 value: 30.686999999999998 - type: ndcg_at_1000 value: 34.327000000000005 - type: ndcg_at_3 value: 18.781 - type: ndcg_at_5 value: 20.73 - type: precision_at_1 value: 21.368000000000002 - type: precision_at_10 value: 7.564 - type: precision_at_100 value: 1.496 - type: precision_at_1000 value: 0.217 - type: precision_at_3 value: 13.876 - type: precision_at_5 value: 11.062 - type: recall_at_1 value: 9.631 - type: recall_at_10 value: 29.517 - type: recall_at_100 value: 53.452 - type: recall_at_1000 value: 74.115 - type: recall_at_3 value: 17.605999999999998 - type: recall_at_5 value: 22.505 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.885 - type: map_at_10 value: 18.798000000000002 - type: map_at_100 value: 26.316 - type: map_at_1000 value: 27.869 - type: map_at_3 value: 13.719000000000001 - type: map_at_5 value: 15.716 - type: mrr_at_1 value: 66 - type: mrr_at_10 value: 74.263 - type: mrr_at_100 value: 74.519 - type: mrr_at_1000 value: 74.531 - type: mrr_at_3 value: 72.458 - type: mrr_at_5 value: 73.321 - type: ndcg_at_1 value: 53.87499999999999 - type: ndcg_at_10 value: 40.355999999999995 - type: ndcg_at_100 value: 44.366 - type: ndcg_at_1000 value: 51.771 - type: ndcg_at_3 value: 45.195 - type: ndcg_at_5 value: 42.187000000000005 - type: precision_at_1 value: 66 - type: precision_at_10 value: 31.75 - type: precision_at_100 value: 10.11 - type: precision_at_1000 value: 1.9800000000000002 - type: precision_at_3 value: 48.167 - type: precision_at_5 value: 40.050000000000004 - type: recall_at_1 value: 8.885 - type: recall_at_10 value: 24.471999999999998 - type: recall_at_100 value: 49.669000000000004 - type: recall_at_1000 value: 73.383 - type: recall_at_3 value: 14.872 - type: recall_at_5 value: 18.262999999999998 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 45.18 - type: f1 value: 40.26878691789978 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 62.751999999999995 - type: map_at_10 value: 74.131 - type: map_at_100 value: 74.407 - type: map_at_1000 value: 74.423 - type: map_at_3 value: 72.329 - type: map_at_5 value: 73.555 - type: mrr_at_1 value: 67.282 - type: mrr_at_10 value: 78.292 - type: mrr_at_100 value: 78.455 - type: mrr_at_1000 value: 78.458 - type: mrr_at_3 value: 76.755 - type: mrr_at_5 value: 77.839 - type: ndcg_at_1 value: 67.282 - type: ndcg_at_10 value: 79.443 - type: ndcg_at_100 value: 80.529 - type: ndcg_at_1000 value: 80.812 - type: ndcg_at_3 value: 76.281 - type: ndcg_at_5 value: 78.235 - type: precision_at_1 value: 67.282 - type: precision_at_10 value: 10.078 - type: precision_at_100 value: 1.082 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 30.178 - type: precision_at_5 value: 19.232 - type: recall_at_1 value: 62.751999999999995 - type: recall_at_10 value: 91.521 - type: recall_at_100 value: 95.997 - type: recall_at_1000 value: 97.775 - type: recall_at_3 value: 83.131 - type: recall_at_5 value: 87.93299999999999 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 18.861 - type: map_at_10 value: 30.252000000000002 - type: map_at_100 value: 32.082 - type: map_at_1000 value: 32.261 - type: map_at_3 value: 25.909 - type: map_at_5 value: 28.296 - type: mrr_at_1 value: 37.346000000000004 - type: mrr_at_10 value: 45.802 - type: mrr_at_100 value: 46.611999999999995 - type: mrr_at_1000 value: 46.659 - type: mrr_at_3 value: 43.056 - type: mrr_at_5 value: 44.637 - type: ndcg_at_1 value: 37.346000000000004 - type: ndcg_at_10 value: 38.169 - type: ndcg_at_100 value: 44.864 - type: ndcg_at_1000 value: 47.974 - type: ndcg_at_3 value: 33.619 - type: ndcg_at_5 value: 35.317 - type: precision_at_1 value: 37.346000000000004 - type: precision_at_10 value: 10.693999999999999 - type: precision_at_100 value: 1.775 - type: precision_at_1000 value: 0.231 - type: precision_at_3 value: 22.325 - type: precision_at_5 value: 16.852 - type: recall_at_1 value: 18.861 - type: recall_at_10 value: 45.672000000000004 - type: recall_at_100 value: 70.60499999999999 - type: recall_at_1000 value: 89.216 - type: recall_at_3 value: 30.361 - type: recall_at_5 value: 36.998999999999995 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 37.852999999999994 - type: map_at_10 value: 59.961 - type: map_at_100 value: 60.78 - type: map_at_1000 value: 60.843 - type: map_at_3 value: 56.39999999999999 - type: map_at_5 value: 58.646 - type: mrr_at_1 value: 75.70599999999999 - type: mrr_at_10 value: 82.321 - type: mrr_at_100 value: 82.516 - type: mrr_at_1000 value: 82.525 - type: mrr_at_3 value: 81.317 - type: mrr_at_5 value: 81.922 - type: ndcg_at_1 value: 75.70599999999999 - type: ndcg_at_10 value: 68.557 - type: ndcg_at_100 value: 71.485 - type: ndcg_at_1000 value: 72.71600000000001 - type: ndcg_at_3 value: 63.524 - type: ndcg_at_5 value: 66.338 - type: precision_at_1 value: 75.70599999999999 - type: precision_at_10 value: 14.463000000000001 - type: precision_at_100 value: 1.677 - type: precision_at_1000 value: 0.184 - type: precision_at_3 value: 40.806 - type: precision_at_5 value: 26.709 - type: recall_at_1 value: 37.852999999999994 - type: recall_at_10 value: 72.316 - type: recall_at_100 value: 83.842 - type: recall_at_1000 value: 91.999 - type: recall_at_3 value: 61.209 - type: recall_at_5 value: 66.77199999999999 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 85.46039999999999 - type: ap value: 79.9812521351881 - type: f1 value: 85.31722909702084 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 22.704 - type: map_at_10 value: 35.329 - type: map_at_100 value: 36.494 - type: map_at_1000 value: 36.541000000000004 - type: map_at_3 value: 31.476 - type: map_at_5 value: 33.731 - type: mrr_at_1 value: 23.294999999999998 - type: mrr_at_10 value: 35.859 - type: mrr_at_100 value: 36.968 - type: mrr_at_1000 value: 37.008 - type: mrr_at_3 value: 32.085 - type: mrr_at_5 value: 34.299 - type: ndcg_at_1 value: 23.324 - type: ndcg_at_10 value: 42.274 - type: ndcg_at_100 value: 47.839999999999996 - type: ndcg_at_1000 value: 48.971 - type: ndcg_at_3 value: 34.454 - type: ndcg_at_5 value: 38.464 - type: precision_at_1 value: 23.324 - type: precision_at_10 value: 6.648 - type: precision_at_100 value: 0.9440000000000001 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.674999999999999 - type: precision_at_5 value: 10.850999999999999 - type: recall_at_1 value: 22.704 - type: recall_at_10 value: 63.660000000000004 - type: recall_at_100 value: 89.29899999999999 - type: recall_at_1000 value: 97.88900000000001 - type: recall_at_3 value: 42.441 - type: recall_at_5 value: 52.04 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.1326949384405 - type: f1 value: 92.89743579612082 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (de) type: mteb/mtop_domain config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.62524654832347 - type: f1 value: 88.65106082263151 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (es) type: mteb/mtop_domain config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 90.59039359573046 - type: f1 value: 90.31532892105662 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (fr) type: mteb/mtop_domain config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 86.21046038208581 - type: f1 value: 86.41459529813113 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (hi) type: mteb/mtop_domain config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 87.3180351380423 - type: f1 value: 86.71383078226444 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (th) type: mteb/mtop_domain config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 86.24231464737792 - type: f1 value: 86.31845567592403 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 75.27131782945736 - type: f1 value: 57.52079940417103 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (de) type: mteb/mtop_intent config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.2341504649197 - type: f1 value: 51.349951558039244 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (es) type: mteb/mtop_intent config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.27418278852569 - type: f1 value: 50.1714985749095 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (fr) type: mteb/mtop_intent config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 67.68243031631694 - type: f1 value: 50.1066160836192 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (hi) type: mteb/mtop_intent config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 69.2362854069559 - type: f1 value: 48.821279948766424 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (th) type: mteb/mtop_intent config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.71428571428571 - type: f1 value: 53.94611389496195 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (af) type: mteb/amazon_massive_intent config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.97646267652992 - type: f1 value: 57.26797883561521 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (am) type: mteb/amazon_massive_intent config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.65501008742435 - type: f1 value: 50.416258382177034 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ar) type: mteb/amazon_massive_intent config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.45796906523201 - type: f1 value: 53.306690547422185 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (az) type: mteb/amazon_massive_intent config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.59246805648957 - type: f1 value: 59.818381969051494 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (bn) type: mteb/amazon_massive_intent config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.126429051782104 - type: f1 value: 58.25993593933026 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (cy) type: mteb/amazon_massive_intent config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 50.057162071284466 - type: f1 value: 46.96095728790911 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (da) type: mteb/amazon_massive_intent config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.64425016812375 - type: f1 value: 62.858291698755764 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (de) type: mteb/amazon_massive_intent config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.08944182918628 - type: f1 value: 62.44639030604241 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (el) type: mteb/amazon_massive_intent config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.68056489576328 - type: f1 value: 61.775326758789504 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.11163416274377 - type: f1 value: 69.70789096927015 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (es) type: mteb/amazon_massive_intent config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.40282447881641 - type: f1 value: 66.38492065671895 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fa) type: mteb/amazon_massive_intent config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.24613315400134 - type: f1 value: 64.3348019501336 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fi) type: mteb/amazon_massive_intent config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.78345662407531 - type: f1 value: 62.21279452354622 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fr) type: mteb/amazon_massive_intent config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.9455279085407 - type: f1 value: 65.48193124964094 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (he) type: mteb/amazon_massive_intent config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.05110961667788 - type: f1 value: 58.097856564684534 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hi) type: mteb/amazon_massive_intent config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.95292535305985 - type: f1 value: 62.09182174767901 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hu) type: mteb/amazon_massive_intent config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.97310020174848 - type: f1 value: 61.14252567730396 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hy) type: mteb/amazon_massive_intent config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.08069939475453 - type: f1 value: 57.044041742492034 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (id) type: mteb/amazon_massive_intent config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.63752521856085 - type: f1 value: 63.889340907205316 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (is) type: mteb/amazon_massive_intent config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.385339609952936 - type: f1 value: 53.449033750088304 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (it) type: mteb/amazon_massive_intent config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.93073301950234 - type: f1 value: 65.9884357824104 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ja) type: mteb/amazon_massive_intent config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.94418291862812 - type: f1 value: 66.48740222583132 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (jv) type: mteb/amazon_massive_intent config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.26025554808339 - type: f1 value: 50.19562815100793 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ka) type: mteb/amazon_massive_intent config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 48.98789509078682 - type: f1 value: 46.65788438676836 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (km) type: mteb/amazon_massive_intent config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 44.68728984532616 - type: f1 value: 41.642419349541996 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (kn) type: mteb/amazon_massive_intent config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.19300605245461 - type: f1 value: 55.8626492442437 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ko) type: mteb/amazon_massive_intent config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.33826496301278 - type: f1 value: 63.89499791648792 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (lv) type: mteb/amazon_massive_intent config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.33960995292536 - type: f1 value: 57.15242464180892 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ml) type: mteb/amazon_massive_intent config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.09347679892402 - type: f1 value: 59.64733214063841 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (mn) type: mteb/amazon_massive_intent config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.75924680564896 - type: f1 value: 55.96585692366827 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ms) type: mteb/amazon_massive_intent config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.48486886348352 - type: f1 value: 59.45143559032946 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (my) type: mteb/amazon_massive_intent config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.56422326832549 - type: f1 value: 54.96368702901926 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nb) type: mteb/amazon_massive_intent config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.18022864828512 - type: f1 value: 63.05369805040634 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nl) type: mteb/amazon_massive_intent config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.30329522528581 - type: f1 value: 64.06084612020727 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pl) type: mteb/amazon_massive_intent config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.36919973100201 - type: f1 value: 65.12154124788887 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pt) type: mteb/amazon_massive_intent config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.98117014122394 - type: f1 value: 66.41847559806962 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ro) type: mteb/amazon_massive_intent config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.53799596503026 - type: f1 value: 62.17067330740817 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ru) type: mteb/amazon_massive_intent config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.01815736381977 - type: f1 value: 66.24988369607843 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sl) type: mteb/amazon_massive_intent config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.34700739744452 - type: f1 value: 59.957933424941636 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sq) type: mteb/amazon_massive_intent config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.23402824478815 - type: f1 value: 57.98836976018471 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sv) type: mteb/amazon_massive_intent config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.54068594485541 - type: f1 value: 65.43849680666855 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sw) type: mteb/amazon_massive_intent config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.998655010087425 - type: f1 value: 52.83737515406804 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ta) type: mteb/amazon_massive_intent config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.71217215870882 - type: f1 value: 55.051794977833026 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (te) type: mteb/amazon_massive_intent config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.724277067921996 - type: f1 value: 56.33485571838306 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (th) type: mteb/amazon_massive_intent config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.59515803631473 - type: f1 value: 64.96772366193588 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tl) type: mteb/amazon_massive_intent config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.860793544048406 - type: f1 value: 58.148845819115394 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tr) type: mteb/amazon_massive_intent config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.40753194351043 - type: f1 value: 63.18903778054698 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ur) type: mteb/amazon_massive_intent config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.52320107599194 - type: f1 value: 58.356144563398516 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (vi) type: mteb/amazon_massive_intent config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.17014122394083 - type: f1 value: 63.919964062638925 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.15601882985878 - type: f1 value: 67.01451905761371 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-TW) type: mteb/amazon_massive_intent config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.65030262273034 - type: f1 value: 64.14420425129063 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (af) type: mteb/amazon_massive_scenario config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.08742434431743 - type: f1 value: 63.044060042311756 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (am) type: mteb/amazon_massive_scenario config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.52387357094821 - type: f1 value: 56.82398588814534 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ar) type: mteb/amazon_massive_scenario config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.239408204438476 - type: f1 value: 61.92570286170469 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (az) type: mteb/amazon_massive_scenario config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.74915938130463 - type: f1 value: 62.130740689396276 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (bn) type: mteb/amazon_massive_scenario config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.00336247478144 - type: f1 value: 63.71080635228055 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (cy) type: mteb/amazon_massive_scenario config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 52.837928715534645 - type: f1 value: 50.390741680320836 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (da) type: mteb/amazon_massive_scenario config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.42098184263618 - type: f1 value: 71.41355113538995 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (de) type: mteb/amazon_massive_scenario config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.95359784801613 - type: f1 value: 71.42699340156742 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (el) type: mteb/amazon_massive_scenario config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.18157363819772 - type: f1 value: 69.74836113037671 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.08137188971082 - type: f1 value: 76.78000685068261 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (es) type: mteb/amazon_massive_scenario config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.5030262273033 - type: f1 value: 71.71620130425673 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fa) type: mteb/amazon_massive_scenario config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.24546065904505 - type: f1 value: 69.07638311730359 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fi) type: mteb/amazon_massive_scenario config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.12911903160726 - type: f1 value: 68.32651736539815 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fr) type: mteb/amazon_massive_scenario config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.89307330195025 - type: f1 value: 71.33986549860187 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (he) type: mteb/amazon_massive_scenario config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.44451916610626 - type: f1 value: 66.90192664503866 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hi) type: mteb/amazon_massive_scenario config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.16274377942166 - type: f1 value: 68.01090953775066 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hu) type: mteb/amazon_massive_scenario config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.75319435104237 - type: f1 value: 70.18035309201403 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hy) type: mteb/amazon_massive_scenario config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.14391392064559 - type: f1 value: 61.48286540778145 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (id) type: mteb/amazon_massive_scenario config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.70275722932078 - type: f1 value: 70.26164779846495 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (is) type: mteb/amazon_massive_scenario config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.93813046402153 - type: f1 value: 58.8852862116525 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (it) type: mteb/amazon_massive_scenario config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.320107599193 - type: f1 value: 72.19836409602924 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ja) type: mteb/amazon_massive_scenario config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.65366509751176 - type: f1 value: 74.55188288799579 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (jv) type: mteb/amazon_massive_scenario config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.694014794889036 - type: f1 value: 58.11353311721067 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ka) type: mteb/amazon_massive_scenario config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 54.37457969065231 - type: f1 value: 52.81306134311697 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (km) type: mteb/amazon_massive_scenario config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 48.3086751849361 - type: f1 value: 45.396449765419376 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (kn) type: mteb/amazon_massive_scenario config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.151983860121064 - type: f1 value: 60.31762544281696 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ko) type: mteb/amazon_massive_scenario config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.44788164088769 - type: f1 value: 71.68150151736367 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (lv) type: mteb/amazon_massive_scenario config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.81439139206455 - type: f1 value: 62.06735559105593 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ml) type: mteb/amazon_massive_scenario config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.04303967720242 - type: f1 value: 66.68298851670133 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (mn) type: mteb/amazon_massive_scenario config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.43913920645595 - type: f1 value: 60.25605977560783 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ms) type: mteb/amazon_massive_scenario config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.90316072629456 - type: f1 value: 65.1325924692381 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (my) type: mteb/amazon_massive_scenario config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.63752521856086 - type: f1 value: 59.14284778039585 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nb) type: mteb/amazon_massive_scenario config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.63080026899797 - type: f1 value: 70.89771864626877 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nl) type: mteb/amazon_massive_scenario config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.10827168796234 - type: f1 value: 71.71954219691159 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pl) type: mteb/amazon_massive_scenario config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.59515803631471 - type: f1 value: 70.05040128099003 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pt) type: mteb/amazon_massive_scenario config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.83389374579691 - type: f1 value: 70.84877936562735 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ro) type: mteb/amazon_massive_scenario config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.18628110289173 - type: f1 value: 68.97232927921841 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ru) type: mteb/amazon_massive_scenario config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.99260255548083 - type: f1 value: 72.85139492157732 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sl) type: mteb/amazon_massive_scenario config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.26227303295225 - type: f1 value: 65.08833655469431 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sq) type: mteb/amazon_massive_scenario config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.48621385339611 - type: f1 value: 64.43483199071298 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sv) type: mteb/amazon_massive_scenario config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.14391392064559 - type: f1 value: 72.2580822579741 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sw) type: mteb/amazon_massive_scenario config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.88567585743107 - type: f1 value: 58.3073765932569 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ta) type: mteb/amazon_massive_scenario config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.38399462004034 - type: f1 value: 60.82139544252606 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (te) type: mteb/amazon_massive_scenario config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.58574310692671 - type: f1 value: 60.71443370385374 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (th) type: mteb/amazon_massive_scenario config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.61398789509079 - type: f1 value: 70.99761812049401 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tl) type: mteb/amazon_massive_scenario config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.73705447209146 - type: f1 value: 61.680849331794796 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tr) type: mteb/amazon_massive_scenario config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.66778749159381 - type: f1 value: 71.17320646080115 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ur) type: mteb/amazon_massive_scenario config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.640215198386 - type: f1 value: 63.301805157015444 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (vi) type: mteb/amazon_massive_scenario config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.00672494956288 - type: f1 value: 70.26005548582106 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.42030934767989 - type: f1 value: 75.2074842882598 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-TW) type: mteb/amazon_massive_scenario config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.69266980497646 - type: f1 value: 70.94103167391192 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 28.91697191169135 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.434000079573313 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.96683513343383 - type: mrr value: 31.967364078714834 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.5280000000000005 - type: map_at_10 value: 11.793 - type: map_at_100 value: 14.496999999999998 - type: map_at_1000 value: 15.783 - type: map_at_3 value: 8.838 - type: map_at_5 value: 10.07 - type: mrr_at_1 value: 43.653 - type: mrr_at_10 value: 51.531000000000006 - type: mrr_at_100 value: 52.205 - type: mrr_at_1000 value: 52.242999999999995 - type: mrr_at_3 value: 49.431999999999995 - type: mrr_at_5 value: 50.470000000000006 - type: ndcg_at_1 value: 42.415000000000006 - type: ndcg_at_10 value: 32.464999999999996 - type: ndcg_at_100 value: 28.927999999999997 - type: ndcg_at_1000 value: 37.629000000000005 - type: ndcg_at_3 value: 37.845 - type: ndcg_at_5 value: 35.147 - type: precision_at_1 value: 43.653 - type: precision_at_10 value: 23.932000000000002 - type: precision_at_100 value: 7.17 - type: precision_at_1000 value: 1.967 - type: precision_at_3 value: 35.397 - type: precision_at_5 value: 29.907 - type: recall_at_1 value: 5.5280000000000005 - type: recall_at_10 value: 15.568000000000001 - type: recall_at_100 value: 28.54 - type: recall_at_1000 value: 59.864 - type: recall_at_3 value: 9.822000000000001 - type: recall_at_5 value: 11.726 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 37.041000000000004 - type: map_at_10 value: 52.664 - type: map_at_100 value: 53.477 - type: map_at_1000 value: 53.505 - type: map_at_3 value: 48.510999999999996 - type: map_at_5 value: 51.036 - type: mrr_at_1 value: 41.338 - type: mrr_at_10 value: 55.071000000000005 - type: mrr_at_100 value: 55.672 - type: mrr_at_1000 value: 55.689 - type: mrr_at_3 value: 51.82 - type: mrr_at_5 value: 53.852 - type: ndcg_at_1 value: 41.338 - type: ndcg_at_10 value: 60.01800000000001 - type: ndcg_at_100 value: 63.409000000000006 - type: ndcg_at_1000 value: 64.017 - type: ndcg_at_3 value: 52.44799999999999 - type: ndcg_at_5 value: 56.571000000000005 - type: precision_at_1 value: 41.338 - type: precision_at_10 value: 9.531 - type: precision_at_100 value: 1.145 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 23.416 - type: precision_at_5 value: 16.46 - type: recall_at_1 value: 37.041000000000004 - type: recall_at_10 value: 79.76299999999999 - type: recall_at_100 value: 94.39 - type: recall_at_1000 value: 98.851 - type: recall_at_3 value: 60.465 - type: recall_at_5 value: 69.906 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 69.952 - type: map_at_10 value: 83.758 - type: map_at_100 value: 84.406 - type: map_at_1000 value: 84.425 - type: map_at_3 value: 80.839 - type: map_at_5 value: 82.646 - type: mrr_at_1 value: 80.62 - type: mrr_at_10 value: 86.947 - type: mrr_at_100 value: 87.063 - type: mrr_at_1000 value: 87.064 - type: mrr_at_3 value: 85.96000000000001 - type: mrr_at_5 value: 86.619 - type: ndcg_at_1 value: 80.63 - type: ndcg_at_10 value: 87.64800000000001 - type: ndcg_at_100 value: 88.929 - type: ndcg_at_1000 value: 89.054 - type: ndcg_at_3 value: 84.765 - type: ndcg_at_5 value: 86.291 - type: precision_at_1 value: 80.63 - type: precision_at_10 value: 13.314 - type: precision_at_100 value: 1.525 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.1 - type: precision_at_5 value: 24.372 - type: recall_at_1 value: 69.952 - type: recall_at_10 value: 94.955 - type: recall_at_100 value: 99.38 - type: recall_at_1000 value: 99.96000000000001 - type: recall_at_3 value: 86.60600000000001 - type: recall_at_5 value: 90.997 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 42.41329517878427 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 55.171278362748666 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.213 - type: map_at_10 value: 9.895 - type: map_at_100 value: 11.776 - type: map_at_1000 value: 12.084 - type: map_at_3 value: 7.2669999999999995 - type: map_at_5 value: 8.620999999999999 - type: mrr_at_1 value: 20.8 - type: mrr_at_10 value: 31.112000000000002 - type: mrr_at_100 value: 32.274 - type: mrr_at_1000 value: 32.35 - type: mrr_at_3 value: 28.133000000000003 - type: mrr_at_5 value: 29.892999999999997 - type: ndcg_at_1 value: 20.8 - type: ndcg_at_10 value: 17.163999999999998 - type: ndcg_at_100 value: 24.738 - type: ndcg_at_1000 value: 30.316 - type: ndcg_at_3 value: 16.665 - type: ndcg_at_5 value: 14.478 - type: precision_at_1 value: 20.8 - type: precision_at_10 value: 8.74 - type: precision_at_100 value: 1.963 - type: precision_at_1000 value: 0.33 - type: precision_at_3 value: 15.467 - type: precision_at_5 value: 12.6 - type: recall_at_1 value: 4.213 - type: recall_at_10 value: 17.698 - type: recall_at_100 value: 39.838 - type: recall_at_1000 value: 66.893 - type: recall_at_3 value: 9.418 - type: recall_at_5 value: 12.773000000000001 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 82.90453315738294 - type: cos_sim_spearman value: 78.51197850080254 - type: euclidean_pearson value: 80.09647123597748 - type: euclidean_spearman value: 78.63548011514061 - type: manhattan_pearson value: 80.10645285675231 - type: manhattan_spearman value: 78.57861806068901 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.2616156846401 - type: cos_sim_spearman value: 76.69713867850156 - type: euclidean_pearson value: 77.97948563800394 - type: euclidean_spearman value: 74.2371211567807 - type: manhattan_pearson value: 77.69697879669705 - type: manhattan_spearman value: 73.86529778022278 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 77.0293269315045 - type: cos_sim_spearman value: 78.02555120584198 - type: euclidean_pearson value: 78.25398100379078 - type: euclidean_spearman value: 78.66963870599464 - type: manhattan_pearson value: 78.14314682167348 - type: manhattan_spearman value: 78.57692322969135 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 79.16989925136942 - type: cos_sim_spearman value: 76.5996225327091 - type: euclidean_pearson value: 77.8319003279786 - type: euclidean_spearman value: 76.42824009468998 - type: manhattan_pearson value: 77.69118862737736 - type: manhattan_spearman value: 76.25568104762812 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 87.42012286935325 - type: cos_sim_spearman value: 88.15654297884122 - type: euclidean_pearson value: 87.34082819427852 - type: euclidean_spearman value: 88.06333589547084 - type: manhattan_pearson value: 87.25115596784842 - type: manhattan_spearman value: 87.9559927695203 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 82.88222044996712 - type: cos_sim_spearman value: 84.28476589061077 - type: euclidean_pearson value: 83.17399758058309 - type: euclidean_spearman value: 83.85497357244542 - type: manhattan_pearson value: 83.0308397703786 - type: manhattan_spearman value: 83.71554539935046 - task: type: STS dataset: name: MTEB STS17 (ko-ko) type: mteb/sts17-crosslingual-sts config: ko-ko split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 80.20682986257339 - type: cos_sim_spearman value: 79.94567120362092 - type: euclidean_pearson value: 79.43122480368902 - type: euclidean_spearman value: 79.94802077264987 - type: manhattan_pearson value: 79.32653021527081 - type: manhattan_spearman value: 79.80961146709178 - task: type: STS dataset: name: MTEB STS17 (ar-ar) type: mteb/sts17-crosslingual-sts config: ar-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 74.46578144394383 - type: cos_sim_spearman value: 74.52496637472179 - type: euclidean_pearson value: 72.2903807076809 - type: euclidean_spearman value: 73.55549359771645 - type: manhattan_pearson value: 72.09324837709393 - type: manhattan_spearman value: 73.36743103606581 - task: type: STS dataset: name: MTEB STS17 (en-ar) type: mteb/sts17-crosslingual-sts config: en-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 71.37272335116 - type: cos_sim_spearman value: 71.26702117766037 - type: euclidean_pearson value: 67.114829954434 - type: euclidean_spearman value: 66.37938893947761 - type: manhattan_pearson value: 66.79688574095246 - type: manhattan_spearman value: 66.17292828079667 - task: type: STS dataset: name: MTEB STS17 (en-de) type: mteb/sts17-crosslingual-sts config: en-de split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 80.61016770129092 - type: cos_sim_spearman value: 82.08515426632214 - type: euclidean_pearson value: 80.557340361131 - type: euclidean_spearman value: 80.37585812266175 - type: manhattan_pearson value: 80.6782873404285 - type: manhattan_spearman value: 80.6678073032024 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.00150745350108 - type: cos_sim_spearman value: 87.83441972211425 - type: euclidean_pearson value: 87.94826702308792 - type: euclidean_spearman value: 87.46143974860725 - type: manhattan_pearson value: 87.97560344306105 - type: manhattan_spearman value: 87.5267102829796 - task: type: STS dataset: name: MTEB STS17 (en-tr) type: mteb/sts17-crosslingual-sts config: en-tr split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 64.76325252267235 - type: cos_sim_spearman value: 63.32615095463905 - type: euclidean_pearson value: 64.07920669155716 - type: euclidean_spearman value: 61.21409893072176 - type: manhattan_pearson value: 64.26308625680016 - type: manhattan_spearman value: 61.2438185254079 - task: type: STS dataset: name: MTEB STS17 (es-en) type: mteb/sts17-crosslingual-sts config: es-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 75.82644463022595 - type: cos_sim_spearman value: 76.50381269945073 - type: euclidean_pearson value: 75.1328548315934 - type: euclidean_spearman value: 75.63761139408453 - type: manhattan_pearson value: 75.18610101241407 - type: manhattan_spearman value: 75.30669266354164 - task: type: STS dataset: name: MTEB STS17 (es-es) type: mteb/sts17-crosslingual-sts config: es-es split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.49994164686832 - type: cos_sim_spearman value: 86.73743986245549 - type: euclidean_pearson value: 86.8272894387145 - type: euclidean_spearman value: 85.97608491000507 - type: manhattan_pearson value: 86.74960140396779 - type: manhattan_spearman value: 85.79285984190273 - task: type: STS dataset: name: MTEB STS17 (fr-en) type: mteb/sts17-crosslingual-sts config: fr-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.58172210788469 - type: cos_sim_spearman value: 80.17516468334607 - type: euclidean_pearson value: 77.56537843470504 - type: euclidean_spearman value: 77.57264627395521 - type: manhattan_pearson value: 78.09703521695943 - type: manhattan_spearman value: 78.15942760916954 - task: type: STS dataset: name: MTEB STS17 (it-en) type: mteb/sts17-crosslingual-sts config: it-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.7589932931751 - type: cos_sim_spearman value: 80.15210089028162 - type: euclidean_pearson value: 77.54135223516057 - type: euclidean_spearman value: 77.52697996368764 - type: manhattan_pearson value: 77.65734439572518 - type: manhattan_spearman value: 77.77702992016121 - task: type: STS dataset: name: MTEB STS17 (nl-en) type: mteb/sts17-crosslingual-sts config: nl-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.16682365511267 - type: cos_sim_spearman value: 79.25311267628506 - type: euclidean_pearson value: 77.54882036762244 - type: euclidean_spearman value: 77.33212935194827 - type: manhattan_pearson value: 77.98405516064015 - type: manhattan_spearman value: 77.85075717865719 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.10473294775917 - type: cos_sim_spearman value: 61.82780474476838 - type: euclidean_pearson value: 45.885111672377256 - type: euclidean_spearman value: 56.88306351932454 - type: manhattan_pearson value: 46.101218127323186 - type: manhattan_spearman value: 56.80953694186333 - task: type: STS dataset: name: MTEB STS22 (de) type: mteb/sts22-crosslingual-sts config: de split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 45.781923079584146 - type: cos_sim_spearman value: 55.95098449691107 - type: euclidean_pearson value: 25.4571031323205 - type: euclidean_spearman value: 49.859978118078935 - type: manhattan_pearson value: 25.624938455041384 - type: manhattan_spearman value: 49.99546185049401 - task: type: STS dataset: name: MTEB STS22 (es) type: mteb/sts22-crosslingual-sts config: es split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 60.00618133997907 - type: cos_sim_spearman value: 66.57896677718321 - type: euclidean_pearson value: 42.60118466388821 - type: euclidean_spearman value: 62.8210759715209 - type: manhattan_pearson value: 42.63446860604094 - type: manhattan_spearman value: 62.73803068925271 - task: type: STS dataset: name: MTEB STS22 (pl) type: mteb/sts22-crosslingual-sts config: pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 28.460759121626943 - type: cos_sim_spearman value: 34.13459007469131 - type: euclidean_pearson value: 6.0917739325525195 - type: euclidean_spearman value: 27.9947262664867 - type: manhattan_pearson value: 6.16877864169911 - type: manhattan_spearman value: 28.00664163971514 - task: type: STS dataset: name: MTEB STS22 (tr) type: mteb/sts22-crosslingual-sts config: tr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 57.42546621771696 - type: cos_sim_spearman value: 63.699663168970474 - type: euclidean_pearson value: 38.12085278789738 - type: euclidean_spearman value: 58.12329140741536 - type: manhattan_pearson value: 37.97364549443335 - type: manhattan_spearman value: 57.81545502318733 - task: type: STS dataset: name: MTEB STS22 (ar) type: mteb/sts22-crosslingual-sts config: ar split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 46.82241380954213 - type: cos_sim_spearman value: 57.86569456006391 - type: euclidean_pearson value: 31.80480070178813 - type: euclidean_spearman value: 52.484000620130104 - type: manhattan_pearson value: 31.952708554646097 - type: manhattan_spearman value: 52.8560972356195 - task: type: STS dataset: name: MTEB STS22 (ru) type: mteb/sts22-crosslingual-sts config: ru split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 52.00447170498087 - type: cos_sim_spearman value: 60.664116225735164 - type: euclidean_pearson value: 33.87382555421702 - type: euclidean_spearman value: 55.74649067458667 - type: manhattan_pearson value: 33.99117246759437 - type: manhattan_spearman value: 55.98749034923899 - task: type: STS dataset: name: MTEB STS22 (zh) type: mteb/sts22-crosslingual-sts config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.06497233105448 - type: cos_sim_spearman value: 65.62968801135676 - type: euclidean_pearson value: 47.482076613243905 - type: euclidean_spearman value: 62.65137791498299 - type: manhattan_pearson value: 47.57052626104093 - type: manhattan_spearman value: 62.436916516613294 - task: type: STS dataset: name: MTEB STS22 (fr) type: mteb/sts22-crosslingual-sts config: fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 70.49397298562575 - type: cos_sim_spearman value: 74.79604041187868 - type: euclidean_pearson value: 49.661891561317795 - type: euclidean_spearman value: 70.31535537621006 - type: manhattan_pearson value: 49.553715741850006 - type: manhattan_spearman value: 70.24779344636806 - task: type: STS dataset: name: MTEB STS22 (de-en) type: mteb/sts22-crosslingual-sts config: de-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 55.640574515348696 - type: cos_sim_spearman value: 54.927959317689 - type: euclidean_pearson value: 29.00139666967476 - type: euclidean_spearman value: 41.86386566971605 - type: manhattan_pearson value: 29.47411067730344 - type: manhattan_spearman value: 42.337438424952786 - task: type: STS dataset: name: MTEB STS22 (es-en) type: mteb/sts22-crosslingual-sts config: es-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 68.14095292259312 - type: cos_sim_spearman value: 73.99017581234789 - type: euclidean_pearson value: 46.46304297872084 - type: euclidean_spearman value: 60.91834114800041 - type: manhattan_pearson value: 47.07072666338692 - type: manhattan_spearman value: 61.70415727977926 - task: type: STS dataset: name: MTEB STS22 (it) type: mteb/sts22-crosslingual-sts config: it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 73.27184653359575 - type: cos_sim_spearman value: 77.76070252418626 - type: euclidean_pearson value: 62.30586577544778 - type: euclidean_spearman value: 75.14246629110978 - type: manhattan_pearson value: 62.328196884927046 - type: manhattan_spearman value: 75.1282792981433 - task: type: STS dataset: name: MTEB STS22 (pl-en) type: mteb/sts22-crosslingual-sts config: pl-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 71.59448528829957 - type: cos_sim_spearman value: 70.37277734222123 - type: euclidean_pearson value: 57.63145565721123 - type: euclidean_spearman value: 66.10113048304427 - type: manhattan_pearson value: 57.18897811586808 - type: manhattan_spearman value: 66.5595511215901 - task: type: STS dataset: name: MTEB STS22 (zh-en) type: mteb/sts22-crosslingual-sts config: zh-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 66.37520607720838 - type: cos_sim_spearman value: 69.92282148997948 - type: euclidean_pearson value: 40.55768770125291 - type: euclidean_spearman value: 55.189128944669605 - type: manhattan_pearson value: 41.03566433468883 - type: manhattan_spearman value: 55.61251893174558 - task: type: STS dataset: name: MTEB STS22 (es-it) type: mteb/sts22-crosslingual-sts config: es-it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 57.791929533771835 - type: cos_sim_spearman value: 66.45819707662093 - type: euclidean_pearson value: 39.03686018511092 - type: euclidean_spearman value: 56.01282695640428 - type: manhattan_pearson value: 38.91586623619632 - type: manhattan_spearman value: 56.69394943612747 - task: type: STS dataset: name: MTEB STS22 (de-fr) type: mteb/sts22-crosslingual-sts config: de-fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 47.82224468473866 - type: cos_sim_spearman value: 59.467307194781164 - type: euclidean_pearson value: 27.428459190256145 - type: euclidean_spearman value: 60.83463107397519 - type: manhattan_pearson value: 27.487391578496638 - type: manhattan_spearman value: 61.281380460246496 - task: type: STS dataset: name: MTEB STS22 (de-pl) type: mteb/sts22-crosslingual-sts config: de-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 16.306666792752644 - type: cos_sim_spearman value: 39.35486427252405 - type: euclidean_pearson value: -2.7887154897955435 - type: euclidean_spearman value: 27.1296051831719 - type: manhattan_pearson value: -3.202291270581297 - type: manhattan_spearman value: 26.32895849218158 - task: type: STS dataset: name: MTEB STS22 (fr-pl) type: mteb/sts22-crosslingual-sts config: fr-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.67006803805076 - type: cos_sim_spearman value: 73.24670207647144 - type: euclidean_pearson value: 46.91884681500483 - type: euclidean_spearman value: 16.903085094570333 - type: manhattan_pearson value: 46.88391675325812 - type: manhattan_spearman value: 28.17180849095055 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 83.79555591223837 - type: cos_sim_spearman value: 85.63658602085185 - type: euclidean_pearson value: 85.22080894037671 - type: euclidean_spearman value: 85.54113580167038 - type: manhattan_pearson value: 85.1639505960118 - type: manhattan_spearman value: 85.43502665436196 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 80.73900991689766 - type: mrr value: 94.81624131133934 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 55.678000000000004 - type: map_at_10 value: 65.135 - type: map_at_100 value: 65.824 - type: map_at_1000 value: 65.852 - type: map_at_3 value: 62.736000000000004 - type: map_at_5 value: 64.411 - type: mrr_at_1 value: 58.333 - type: mrr_at_10 value: 66.5 - type: mrr_at_100 value: 67.053 - type: mrr_at_1000 value: 67.08 - type: mrr_at_3 value: 64.944 - type: mrr_at_5 value: 65.89399999999999 - type: ndcg_at_1 value: 58.333 - type: ndcg_at_10 value: 69.34700000000001 - type: ndcg_at_100 value: 72.32 - type: ndcg_at_1000 value: 73.014 - type: ndcg_at_3 value: 65.578 - type: ndcg_at_5 value: 67.738 - type: precision_at_1 value: 58.333 - type: precision_at_10 value: 9.033 - type: precision_at_100 value: 1.0670000000000002 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 25.444 - type: precision_at_5 value: 16.933 - type: recall_at_1 value: 55.678000000000004 - type: recall_at_10 value: 80.72200000000001 - type: recall_at_100 value: 93.93299999999999 - type: recall_at_1000 value: 99.333 - type: recall_at_3 value: 70.783 - type: recall_at_5 value: 75.978 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.74653465346535 - type: cos_sim_ap value: 93.01476369929063 - type: cos_sim_f1 value: 86.93009118541033 - type: cos_sim_precision value: 88.09034907597535 - type: cos_sim_recall value: 85.8 - type: dot_accuracy value: 99.22970297029703 - type: dot_ap value: 51.58725659485144 - type: dot_f1 value: 53.51351351351352 - type: dot_precision value: 58.235294117647065 - type: dot_recall value: 49.5 - type: euclidean_accuracy value: 99.74356435643564 - type: euclidean_ap value: 92.40332894384368 - type: euclidean_f1 value: 86.97838109602817 - type: euclidean_precision value: 87.46208291203236 - type: euclidean_recall value: 86.5 - type: manhattan_accuracy value: 99.73069306930694 - type: manhattan_ap value: 92.01320815721121 - type: manhattan_f1 value: 86.4135864135864 - type: manhattan_precision value: 86.32734530938124 - type: manhattan_recall value: 86.5 - type: max_accuracy value: 99.74653465346535 - type: max_ap value: 93.01476369929063 - type: max_f1 value: 86.97838109602817 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 55.2660514302523 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 30.4637783572547 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 49.41377758357637 - type: mrr value: 50.138451213818854 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 28.887846011166594 - type: cos_sim_spearman value: 30.10823258355903 - type: dot_pearson value: 12.888049550236385 - type: dot_spearman value: 12.827495903098123 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.21 - type: map_at_10 value: 1.667 - type: map_at_100 value: 9.15 - type: map_at_1000 value: 22.927 - type: map_at_3 value: 0.573 - type: map_at_5 value: 0.915 - type: mrr_at_1 value: 80 - type: mrr_at_10 value: 87.167 - type: mrr_at_100 value: 87.167 - type: mrr_at_1000 value: 87.167 - type: mrr_at_3 value: 85.667 - type: mrr_at_5 value: 87.167 - type: ndcg_at_1 value: 76 - type: ndcg_at_10 value: 69.757 - type: ndcg_at_100 value: 52.402 - type: ndcg_at_1000 value: 47.737 - type: ndcg_at_3 value: 71.866 - type: ndcg_at_5 value: 72.225 - type: precision_at_1 value: 80 - type: precision_at_10 value: 75 - type: precision_at_100 value: 53.959999999999994 - type: precision_at_1000 value: 21.568 - type: precision_at_3 value: 76.667 - type: precision_at_5 value: 78 - type: recall_at_1 value: 0.21 - type: recall_at_10 value: 1.9189999999999998 - type: recall_at_100 value: 12.589 - type: recall_at_1000 value: 45.312000000000005 - type: recall_at_3 value: 0.61 - type: recall_at_5 value: 1.019 - task: type: BitextMining dataset: name: MTEB Tatoeba (sqi-eng) type: mteb/tatoeba-bitext-mining config: sqi-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.10000000000001 - type: f1 value: 90.06 - type: precision value: 89.17333333333333 - type: recall value: 92.10000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (fry-eng) type: mteb/tatoeba-bitext-mining config: fry-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 56.06936416184971 - type: f1 value: 50.87508028259473 - type: precision value: 48.97398843930635 - type: recall value: 56.06936416184971 - task: type: BitextMining dataset: name: MTEB Tatoeba (kur-eng) type: mteb/tatoeba-bitext-mining config: kur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 57.3170731707317 - type: f1 value: 52.96080139372822 - type: precision value: 51.67861124382864 - type: recall value: 57.3170731707317 - task: type: BitextMining dataset: name: MTEB Tatoeba (tur-eng) type: mteb/tatoeba-bitext-mining config: tur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.3 - type: f1 value: 92.67333333333333 - type: precision value: 91.90833333333333 - type: recall value: 94.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (deu-eng) type: mteb/tatoeba-bitext-mining config: deu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.7 - type: f1 value: 97.07333333333332 - type: precision value: 96.79500000000002 - type: recall value: 97.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (nld-eng) type: mteb/tatoeba-bitext-mining config: nld-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.69999999999999 - type: f1 value: 93.2 - type: precision value: 92.48333333333333 - type: recall value: 94.69999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (ron-eng) type: mteb/tatoeba-bitext-mining config: ron-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.9 - type: f1 value: 91.26666666666667 - type: precision value: 90.59444444444445 - type: recall value: 92.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (ang-eng) type: mteb/tatoeba-bitext-mining config: ang-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 34.32835820895522 - type: f1 value: 29.074180380150533 - type: precision value: 28.068207322920596 - type: recall value: 34.32835820895522 - task: type: BitextMining dataset: name: MTEB Tatoeba (ido-eng) type: mteb/tatoeba-bitext-mining config: ido-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.5 - type: f1 value: 74.3945115995116 - type: precision value: 72.82967843459222 - type: recall value: 78.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (jav-eng) type: mteb/tatoeba-bitext-mining config: jav-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.34146341463415 - type: f1 value: 61.2469400518181 - type: precision value: 59.63977756660683 - type: recall value: 66.34146341463415 - task: type: BitextMining dataset: name: MTEB Tatoeba (isl-eng) type: mteb/tatoeba-bitext-mining config: isl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.9 - type: f1 value: 76.90349206349207 - type: precision value: 75.32921568627451 - type: recall value: 80.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (slv-eng) type: mteb/tatoeba-bitext-mining config: slv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.93317132442284 - type: f1 value: 81.92519105034295 - type: precision value: 80.71283920615635 - type: recall value: 84.93317132442284 - task: type: BitextMining dataset: name: MTEB Tatoeba (cym-eng) type: mteb/tatoeba-bitext-mining config: cym-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.1304347826087 - type: f1 value: 65.22394755003451 - type: precision value: 62.912422360248435 - type: recall value: 71.1304347826087 - task: type: BitextMining dataset: name: MTEB Tatoeba (kaz-eng) type: mteb/tatoeba-bitext-mining config: kaz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.82608695652173 - type: f1 value: 75.55693581780538 - type: precision value: 73.79420289855072 - type: recall value: 79.82608695652173 - task: type: BitextMining dataset: name: MTEB Tatoeba (est-eng) type: mteb/tatoeba-bitext-mining config: est-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74 - type: f1 value: 70.51022222222223 - type: precision value: 69.29673599347512 - type: recall value: 74 - task: type: BitextMining dataset: name: MTEB Tatoeba (heb-eng) type: mteb/tatoeba-bitext-mining config: heb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.7 - type: f1 value: 74.14238095238095 - type: precision value: 72.27214285714285 - type: recall value: 78.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (gla-eng) type: mteb/tatoeba-bitext-mining config: gla-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.97466827503016 - type: f1 value: 43.080330405420874 - type: precision value: 41.36505499593557 - type: recall value: 48.97466827503016 - task: type: BitextMining dataset: name: MTEB Tatoeba (mar-eng) type: mteb/tatoeba-bitext-mining config: mar-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.60000000000001 - type: f1 value: 86.62333333333333 - type: precision value: 85.225 - type: recall value: 89.60000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (lat-eng) type: mteb/tatoeba-bitext-mining config: lat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 45.2 - type: f1 value: 39.5761253006253 - type: precision value: 37.991358436312 - type: recall value: 45.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (bel-eng) type: mteb/tatoeba-bitext-mining config: bel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.5 - type: f1 value: 86.70333333333333 - type: precision value: 85.53166666666667 - type: recall value: 89.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (pms-eng) type: mteb/tatoeba-bitext-mining config: pms-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.095238095238095 - type: f1 value: 44.60650460650461 - type: precision value: 42.774116796477045 - type: recall value: 50.095238095238095 - task: type: BitextMining dataset: name: MTEB Tatoeba (gle-eng) type: mteb/tatoeba-bitext-mining config: gle-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 63.4 - type: f1 value: 58.35967261904762 - type: precision value: 56.54857142857143 - type: recall value: 63.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (pes-eng) type: mteb/tatoeba-bitext-mining config: pes-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.2 - type: f1 value: 87.075 - type: precision value: 86.12095238095239 - type: recall value: 89.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (nob-eng) type: mteb/tatoeba-bitext-mining config: nob-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.8 - type: f1 value: 95.90333333333334 - type: precision value: 95.50833333333333 - type: recall value: 96.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (bul-eng) type: mteb/tatoeba-bitext-mining config: bul-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.9 - type: f1 value: 88.6288888888889 - type: precision value: 87.61607142857142 - type: recall value: 90.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (cbk-eng) type: mteb/tatoeba-bitext-mining config: cbk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.2 - type: f1 value: 60.54377630539395 - type: precision value: 58.89434482711381 - type: recall value: 65.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (hun-eng) type: mteb/tatoeba-bitext-mining config: hun-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87 - type: f1 value: 84.32412698412699 - type: precision value: 83.25527777777778 - type: recall value: 87 - task: type: BitextMining dataset: name: MTEB Tatoeba (uig-eng) type: mteb/tatoeba-bitext-mining config: uig-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.7 - type: f1 value: 63.07883541295306 - type: precision value: 61.06117424242426 - type: recall value: 68.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (rus-eng) type: mteb/tatoeba-bitext-mining config: rus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.7 - type: f1 value: 91.78333333333335 - type: precision value: 90.86666666666667 - type: recall value: 93.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (spa-eng) type: mteb/tatoeba-bitext-mining config: spa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.7 - type: f1 value: 96.96666666666667 - type: precision value: 96.61666666666667 - type: recall value: 97.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (hye-eng) type: mteb/tatoeba-bitext-mining config: hye-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.27493261455525 - type: f1 value: 85.90745732255168 - type: precision value: 84.91389637616052 - type: recall value: 88.27493261455525 - task: type: BitextMining dataset: name: MTEB Tatoeba (tel-eng) type: mteb/tatoeba-bitext-mining config: tel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.5982905982906 - type: f1 value: 88.4900284900285 - type: precision value: 87.57122507122507 - type: recall value: 90.5982905982906 - task: type: BitextMining dataset: name: MTEB Tatoeba (afr-eng) type: mteb/tatoeba-bitext-mining config: afr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.5 - type: f1 value: 86.90769841269842 - type: precision value: 85.80178571428571 - type: recall value: 89.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (mon-eng) type: mteb/tatoeba-bitext-mining config: mon-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.5 - type: f1 value: 78.36796536796538 - type: precision value: 76.82196969696969 - type: recall value: 82.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (arz-eng) type: mteb/tatoeba-bitext-mining config: arz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.48846960167715 - type: f1 value: 66.78771089148448 - type: precision value: 64.98302885095339 - type: recall value: 71.48846960167715 - task: type: BitextMining dataset: name: MTEB Tatoeba (hrv-eng) type: mteb/tatoeba-bitext-mining config: hrv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.1 - type: f1 value: 92.50333333333333 - type: precision value: 91.77499999999999 - type: recall value: 94.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (nov-eng) type: mteb/tatoeba-bitext-mining config: nov-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.20622568093385 - type: f1 value: 66.83278891450098 - type: precision value: 65.35065777283677 - type: recall value: 71.20622568093385 - task: type: BitextMining dataset: name: MTEB Tatoeba (gsw-eng) type: mteb/tatoeba-bitext-mining config: gsw-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.717948717948715 - type: f1 value: 43.53146853146853 - type: precision value: 42.04721204721204 - type: recall value: 48.717948717948715 - task: type: BitextMining dataset: name: MTEB Tatoeba (nds-eng) type: mteb/tatoeba-bitext-mining config: nds-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 58.5 - type: f1 value: 53.8564991863928 - type: precision value: 52.40329436122275 - type: recall value: 58.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (ukr-eng) type: mteb/tatoeba-bitext-mining config: ukr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.8 - type: f1 value: 88.29 - type: precision value: 87.09166666666667 - type: recall value: 90.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (uzb-eng) type: mteb/tatoeba-bitext-mining config: uzb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 67.28971962616822 - type: f1 value: 62.63425307817832 - type: precision value: 60.98065939771546 - type: recall value: 67.28971962616822 - task: type: BitextMining dataset: name: MTEB Tatoeba (lit-eng) type: mteb/tatoeba-bitext-mining config: lit-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.7 - type: f1 value: 75.5264472455649 - type: precision value: 74.38205086580086 - type: recall value: 78.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (ina-eng) type: mteb/tatoeba-bitext-mining config: ina-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.7 - type: f1 value: 86.10809523809525 - type: precision value: 85.07602564102565 - type: recall value: 88.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (lfn-eng) type: mteb/tatoeba-bitext-mining config: lfn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 56.99999999999999 - type: f1 value: 52.85487521402737 - type: precision value: 51.53985162713104 - type: recall value: 56.99999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (zsm-eng) type: mteb/tatoeba-bitext-mining config: zsm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94 - type: f1 value: 92.45333333333333 - type: precision value: 91.79166666666667 - type: recall value: 94 - task: type: BitextMining dataset: name: MTEB Tatoeba (ita-eng) type: mteb/tatoeba-bitext-mining config: ita-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.30000000000001 - type: f1 value: 90.61333333333333 - type: precision value: 89.83333333333331 - type: recall value: 92.30000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (cmn-eng) type: mteb/tatoeba-bitext-mining config: cmn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.69999999999999 - type: f1 value: 93.34555555555555 - type: precision value: 92.75416666666668 - type: recall value: 94.69999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (lvs-eng) type: mteb/tatoeba-bitext-mining config: lvs-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.2 - type: f1 value: 76.6563035113035 - type: precision value: 75.3014652014652 - type: recall value: 80.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (glg-eng) type: mteb/tatoeba-bitext-mining config: glg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.7 - type: f1 value: 82.78689263765207 - type: precision value: 82.06705086580087 - type: recall value: 84.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (ceb-eng) type: mteb/tatoeba-bitext-mining config: ceb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.33333333333333 - type: f1 value: 45.461523661523664 - type: precision value: 43.93545574795575 - type: recall value: 50.33333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (bre-eng) type: mteb/tatoeba-bitext-mining config: bre-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.6000000000000005 - type: f1 value: 5.442121400446441 - type: precision value: 5.146630385487529 - type: recall value: 6.6000000000000005 - task: type: BitextMining dataset: name: MTEB Tatoeba (ben-eng) type: mteb/tatoeba-bitext-mining config: ben-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85 - type: f1 value: 81.04666666666667 - type: precision value: 79.25 - type: recall value: 85 - task: type: BitextMining dataset: name: MTEB Tatoeba (swg-eng) type: mteb/tatoeba-bitext-mining config: swg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 47.32142857142857 - type: f1 value: 42.333333333333336 - type: precision value: 40.69196428571429 - type: recall value: 47.32142857142857 - task: type: BitextMining dataset: name: MTEB Tatoeba (arq-eng) type: mteb/tatoeba-bitext-mining config: arq-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 30.735455543358945 - type: f1 value: 26.73616790022338 - type: precision value: 25.397823220451283 - type: recall value: 30.735455543358945 - task: type: BitextMining dataset: name: MTEB Tatoeba (kab-eng) type: mteb/tatoeba-bitext-mining config: kab-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 25.1 - type: f1 value: 21.975989896371022 - type: precision value: 21.059885632257203 - type: recall value: 25.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (fra-eng) type: mteb/tatoeba-bitext-mining config: fra-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.3 - type: f1 value: 92.75666666666666 - type: precision value: 92.06166666666665 - type: recall value: 94.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (por-eng) type: mteb/tatoeba-bitext-mining config: por-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.1 - type: f1 value: 92.74 - type: precision value: 92.09166666666667 - type: recall value: 94.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (tat-eng) type: mteb/tatoeba-bitext-mining config: tat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.3 - type: f1 value: 66.922442002442 - type: precision value: 65.38249567099568 - type: recall value: 71.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (oci-eng) type: mteb/tatoeba-bitext-mining config: oci-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 40.300000000000004 - type: f1 value: 35.78682789299971 - type: precision value: 34.66425128716588 - type: recall value: 40.300000000000004 - task: type: BitextMining dataset: name: MTEB Tatoeba (pol-eng) type: mteb/tatoeba-bitext-mining config: pol-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96 - type: f1 value: 94.82333333333334 - type: precision value: 94.27833333333334 - type: recall value: 96 - task: type: BitextMining dataset: name: MTEB Tatoeba (war-eng) type: mteb/tatoeba-bitext-mining config: war-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 51.1 - type: f1 value: 47.179074753133584 - type: precision value: 46.06461044702424 - type: recall value: 51.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (aze-eng) type: mteb/tatoeba-bitext-mining config: aze-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.7 - type: f1 value: 84.71 - type: precision value: 83.46166666666667 - type: recall value: 87.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (vie-eng) type: mteb/tatoeba-bitext-mining config: vie-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.8 - type: f1 value: 94.68333333333334 - type: precision value: 94.13333333333334 - type: recall value: 95.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (nno-eng) type: mteb/tatoeba-bitext-mining config: nno-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.39999999999999 - type: f1 value: 82.5577380952381 - type: precision value: 81.36833333333334 - type: recall value: 85.39999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (cha-eng) type: mteb/tatoeba-bitext-mining config: cha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 21.16788321167883 - type: f1 value: 16.948865627297987 - type: precision value: 15.971932568647897 - type: recall value: 21.16788321167883 - task: type: BitextMining dataset: name: MTEB Tatoeba (mhr-eng) type: mteb/tatoeba-bitext-mining config: mhr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.9 - type: f1 value: 5.515526831658907 - type: precision value: 5.141966366966367 - type: recall value: 6.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (dan-eng) type: mteb/tatoeba-bitext-mining config: dan-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.2 - type: f1 value: 91.39666666666668 - type: precision value: 90.58666666666667 - type: recall value: 93.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (ell-eng) type: mteb/tatoeba-bitext-mining config: ell-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.2 - type: f1 value: 89.95666666666666 - type: precision value: 88.92833333333333 - type: recall value: 92.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (amh-eng) type: mteb/tatoeba-bitext-mining config: amh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.76190476190477 - type: f1 value: 74.93386243386244 - type: precision value: 73.11011904761904 - type: recall value: 79.76190476190477 - task: type: BitextMining dataset: name: MTEB Tatoeba (pam-eng) type: mteb/tatoeba-bitext-mining config: pam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 8.799999999999999 - type: f1 value: 6.921439712248537 - type: precision value: 6.489885109680683 - type: recall value: 8.799999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (hsb-eng) type: mteb/tatoeba-bitext-mining config: hsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 45.75569358178054 - type: f1 value: 40.34699501312631 - type: precision value: 38.57886764719063 - type: recall value: 45.75569358178054 - task: type: BitextMining dataset: name: MTEB Tatoeba (srp-eng) type: mteb/tatoeba-bitext-mining config: srp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.4 - type: f1 value: 89.08333333333333 - type: precision value: 88.01666666666668 - type: recall value: 91.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (epo-eng) type: mteb/tatoeba-bitext-mining config: epo-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.60000000000001 - type: f1 value: 92.06690476190477 - type: precision value: 91.45095238095239 - type: recall value: 93.60000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (kzj-eng) type: mteb/tatoeba-bitext-mining config: kzj-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 7.5 - type: f1 value: 6.200363129378736 - type: precision value: 5.89115314822466 - type: recall value: 7.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (awa-eng) type: mteb/tatoeba-bitext-mining config: awa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.59307359307358 - type: f1 value: 68.38933553219267 - type: precision value: 66.62698412698413 - type: recall value: 73.59307359307358 - task: type: BitextMining dataset: name: MTEB Tatoeba (fao-eng) type: mteb/tatoeba-bitext-mining config: fao-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.8473282442748 - type: f1 value: 64.72373682297346 - type: precision value: 62.82834214131924 - type: recall value: 69.8473282442748 - task: type: BitextMining dataset: name: MTEB Tatoeba (mal-eng) type: mteb/tatoeba-bitext-mining config: mal-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.5254730713246 - type: f1 value: 96.72489082969432 - type: precision value: 96.33672974284326 - type: recall value: 97.5254730713246 - task: type: BitextMining dataset: name: MTEB Tatoeba (ile-eng) type: mteb/tatoeba-bitext-mining config: ile-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.6 - type: f1 value: 72.42746031746033 - type: precision value: 71.14036630036631 - type: recall value: 75.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (bos-eng) type: mteb/tatoeba-bitext-mining config: bos-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.24293785310734 - type: f1 value: 88.86064030131826 - type: precision value: 87.73540489642184 - type: recall value: 91.24293785310734 - task: type: BitextMining dataset: name: MTEB Tatoeba (cor-eng) type: mteb/tatoeba-bitext-mining config: cor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.2 - type: f1 value: 4.383083659794954 - type: precision value: 4.027861324289673 - type: recall value: 6.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (cat-eng) type: mteb/tatoeba-bitext-mining config: cat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.8 - type: f1 value: 84.09428571428572 - type: precision value: 83.00333333333333 - type: recall value: 86.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (eus-eng) type: mteb/tatoeba-bitext-mining config: eus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 60.699999999999996 - type: f1 value: 56.1584972394755 - type: precision value: 54.713456330903135 - type: recall value: 60.699999999999996 - task: type: BitextMining dataset: name: MTEB Tatoeba (yue-eng) type: mteb/tatoeba-bitext-mining config: yue-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.2 - type: f1 value: 80.66190476190475 - type: precision value: 79.19690476190476 - type: recall value: 84.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (swe-eng) type: mteb/tatoeba-bitext-mining config: swe-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.2 - type: f1 value: 91.33 - type: precision value: 90.45 - type: recall value: 93.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (dtp-eng) type: mteb/tatoeba-bitext-mining config: dtp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.3 - type: f1 value: 5.126828976748276 - type: precision value: 4.853614328966668 - type: recall value: 6.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (kat-eng) type: mteb/tatoeba-bitext-mining config: kat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.76943699731903 - type: f1 value: 77.82873739308057 - type: precision value: 76.27622452019234 - type: recall value: 81.76943699731903 - task: type: BitextMining dataset: name: MTEB Tatoeba (jpn-eng) type: mteb/tatoeba-bitext-mining config: jpn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.30000000000001 - type: f1 value: 90.29666666666665 - type: precision value: 89.40333333333334 - type: recall value: 92.30000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (csb-eng) type: mteb/tatoeba-bitext-mining config: csb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 29.249011857707508 - type: f1 value: 24.561866096392947 - type: precision value: 23.356583740215456 - type: recall value: 29.249011857707508 - task: type: BitextMining dataset: name: MTEB Tatoeba (xho-eng) type: mteb/tatoeba-bitext-mining config: xho-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.46478873239437 - type: f1 value: 73.23943661971832 - type: precision value: 71.66666666666667 - type: recall value: 77.46478873239437 - task: type: BitextMining dataset: name: MTEB Tatoeba (orv-eng) type: mteb/tatoeba-bitext-mining config: orv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 20.35928143712575 - type: f1 value: 15.997867865075824 - type: precision value: 14.882104658301346 - type: recall value: 20.35928143712575 - task: type: BitextMining dataset: name: MTEB Tatoeba (ind-eng) type: mteb/tatoeba-bitext-mining config: ind-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.2 - type: f1 value: 90.25999999999999 - type: precision value: 89.45333333333335 - type: recall value: 92.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (tuk-eng) type: mteb/tatoeba-bitext-mining config: tuk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 23.15270935960591 - type: f1 value: 19.65673625772148 - type: precision value: 18.793705293464992 - type: recall value: 23.15270935960591 - task: type: BitextMining dataset: name: MTEB Tatoeba (max-eng) type: mteb/tatoeba-bitext-mining config: max-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 59.154929577464785 - type: f1 value: 52.3868463305083 - type: precision value: 50.14938113529662 - type: recall value: 59.154929577464785 - task: type: BitextMining dataset: name: MTEB Tatoeba (swh-eng) type: mteb/tatoeba-bitext-mining config: swh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.51282051282051 - type: f1 value: 66.8089133089133 - type: precision value: 65.37645687645687 - type: recall value: 70.51282051282051 - task: type: BitextMining dataset: name: MTEB Tatoeba (hin-eng) type: mteb/tatoeba-bitext-mining config: hin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.6 - type: f1 value: 93 - type: precision value: 92.23333333333333 - type: recall value: 94.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (dsb-eng) type: mteb/tatoeba-bitext-mining config: dsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 38.62212943632568 - type: f1 value: 34.3278276962583 - type: precision value: 33.07646935732408 - type: recall value: 38.62212943632568 - task: type: BitextMining dataset: name: MTEB Tatoeba (ber-eng) type: mteb/tatoeba-bitext-mining config: ber-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 28.1 - type: f1 value: 23.579609223054604 - type: precision value: 22.39622774921555 - type: recall value: 28.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (tam-eng) type: mteb/tatoeba-bitext-mining config: tam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.27361563517914 - type: f1 value: 85.12486427795874 - type: precision value: 83.71335504885994 - type: recall value: 88.27361563517914 - task: type: BitextMining dataset: name: MTEB Tatoeba (slk-eng) type: mteb/tatoeba-bitext-mining config: slk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.6 - type: f1 value: 86.39928571428571 - type: precision value: 85.4947557997558 - type: recall value: 88.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (tgl-eng) type: mteb/tatoeba-bitext-mining config: tgl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.5 - type: f1 value: 83.77952380952381 - type: precision value: 82.67602564102565 - type: recall value: 86.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (ast-eng) type: mteb/tatoeba-bitext-mining config: ast-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.52755905511812 - type: f1 value: 75.3055868016498 - type: precision value: 73.81889763779527 - type: recall value: 79.52755905511812 - task: type: BitextMining dataset: name: MTEB Tatoeba (mkd-eng) type: mteb/tatoeba-bitext-mining config: mkd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.9 - type: f1 value: 73.76261904761905 - type: precision value: 72.11670995670995 - type: recall value: 77.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (khm-eng) type: mteb/tatoeba-bitext-mining config: khm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 53.8781163434903 - type: f1 value: 47.25804051288816 - type: precision value: 45.0603482390186 - type: recall value: 53.8781163434903 - task: type: BitextMining dataset: name: MTEB Tatoeba (ces-eng) type: mteb/tatoeba-bitext-mining config: ces-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.10000000000001 - type: f1 value: 88.88 - type: precision value: 87.96333333333334 - type: recall value: 91.10000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (tzl-eng) type: mteb/tatoeba-bitext-mining config: tzl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 38.46153846153847 - type: f1 value: 34.43978243978244 - type: precision value: 33.429487179487175 - type: recall value: 38.46153846153847 - task: type: BitextMining dataset: name: MTEB Tatoeba (urd-eng) type: mteb/tatoeba-bitext-mining config: urd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.9 - type: f1 value: 86.19888888888887 - type: precision value: 85.07440476190476 - type: recall value: 88.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (ara-eng) type: mteb/tatoeba-bitext-mining config: ara-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.9 - type: f1 value: 82.58857142857143 - type: precision value: 81.15666666666667 - type: recall value: 85.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (kor-eng) type: mteb/tatoeba-bitext-mining config: kor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.8 - type: f1 value: 83.36999999999999 - type: precision value: 81.86833333333333 - type: recall value: 86.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (yid-eng) type: mteb/tatoeba-bitext-mining config: yid-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.51415094339622 - type: f1 value: 63.195000099481234 - type: precision value: 61.394033442972116 - type: recall value: 68.51415094339622 - task: type: BitextMining dataset: name: MTEB Tatoeba (fin-eng) type: mteb/tatoeba-bitext-mining config: fin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.5 - type: f1 value: 86.14603174603175 - type: precision value: 85.1162037037037 - type: recall value: 88.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (tha-eng) type: mteb/tatoeba-bitext-mining config: tha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.62043795620438 - type: f1 value: 94.40389294403892 - type: precision value: 93.7956204379562 - type: recall value: 95.62043795620438 - task: type: BitextMining dataset: name: MTEB Tatoeba (wuu-eng) type: mteb/tatoeba-bitext-mining config: wuu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.8 - type: f1 value: 78.6532178932179 - type: precision value: 77.46348795840176 - type: recall value: 81.8 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.603 - type: map_at_10 value: 8.5 - type: map_at_100 value: 12.985 - type: map_at_1000 value: 14.466999999999999 - type: map_at_3 value: 4.859999999999999 - type: map_at_5 value: 5.817 - type: mrr_at_1 value: 28.571 - type: mrr_at_10 value: 42.331 - type: mrr_at_100 value: 43.592999999999996 - type: mrr_at_1000 value: 43.592999999999996 - type: mrr_at_3 value: 38.435 - type: mrr_at_5 value: 39.966 - type: ndcg_at_1 value: 26.531 - type: ndcg_at_10 value: 21.353 - type: ndcg_at_100 value: 31.087999999999997 - type: ndcg_at_1000 value: 43.163000000000004 - type: ndcg_at_3 value: 22.999 - type: ndcg_at_5 value: 21.451 - type: precision_at_1 value: 28.571 - type: precision_at_10 value: 19.387999999999998 - type: precision_at_100 value: 6.265 - type: precision_at_1000 value: 1.4160000000000001 - type: precision_at_3 value: 24.490000000000002 - type: precision_at_5 value: 21.224 - type: recall_at_1 value: 2.603 - type: recall_at_10 value: 14.474 - type: recall_at_100 value: 40.287 - type: recall_at_1000 value: 76.606 - type: recall_at_3 value: 5.978 - type: recall_at_5 value: 7.819 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 69.7848 - type: ap value: 13.661023167088224 - type: f1 value: 53.61686134460943 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 61.28183361629882 - type: f1 value: 61.55481034919965 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 35.972128420092396 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 85.59933241938367 - type: cos_sim_ap value: 72.20760361208136 - type: cos_sim_f1 value: 66.4447731755424 - type: cos_sim_precision value: 62.35539102267469 - type: cos_sim_recall value: 71.10817941952506 - type: dot_accuracy value: 78.98313166835548 - type: dot_ap value: 44.492521645493795 - type: dot_f1 value: 45.814889336016094 - type: dot_precision value: 37.02439024390244 - type: dot_recall value: 60.07915567282321 - type: euclidean_accuracy value: 85.3907134767837 - type: euclidean_ap value: 71.53847289080343 - type: euclidean_f1 value: 65.95952206778834 - type: euclidean_precision value: 61.31006346328196 - type: euclidean_recall value: 71.37203166226914 - type: manhattan_accuracy value: 85.40859510043511 - type: manhattan_ap value: 71.49664104395515 - type: manhattan_f1 value: 65.98569969356485 - type: manhattan_precision value: 63.928748144482924 - type: manhattan_recall value: 68.17941952506597 - type: max_accuracy value: 85.59933241938367 - type: max_ap value: 72.20760361208136 - type: max_f1 value: 66.4447731755424 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.83261536073273 - type: cos_sim_ap value: 85.48178133644264 - type: cos_sim_f1 value: 77.87816307403935 - type: cos_sim_precision value: 75.88953021114926 - type: cos_sim_recall value: 79.97382198952879 - type: dot_accuracy value: 79.76287499514883 - type: dot_ap value: 59.17438838475084 - type: dot_f1 value: 56.34566667855996 - type: dot_precision value: 52.50349092359864 - type: dot_recall value: 60.794579611949494 - type: euclidean_accuracy value: 88.76857996662397 - type: euclidean_ap value: 85.22764834359887 - type: euclidean_f1 value: 77.65379751543554 - type: euclidean_precision value: 75.11152683839401 - type: euclidean_recall value: 80.37419156144134 - type: manhattan_accuracy value: 88.6987231730508 - type: manhattan_ap value: 85.18907981724007 - type: manhattan_f1 value: 77.51967028849757 - type: manhattan_precision value: 75.49992701795358 - type: manhattan_recall value: 79.65044656606098 - type: max_accuracy value: 88.83261536073273 - type: max_ap value: 85.48178133644264 - type: max_f1 value: 77.87816307403935 --- ## Multilingual-E5-base [Multilingual E5 Text Embeddings: A Technical Report](https://arxiv.org/pdf/2402.05672). Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, Furu Wei, arXiv 2024 This model has 12 layers and the embedding size is 768. ## Usage Below is an example to encode queries and passages from the MS-MARCO passage ranking dataset. ```python import torch.nn.functional as F from torch import Tensor from transformers import AutoTokenizer, AutoModel def average_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor: last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] # Each input text should start with "query: " or "passage: ", even for non-English texts. # For tasks other than retrieval, you can simply use the "query: " prefix. input_texts = ['query: how much protein should a female eat', 'query: 南瓜的家常做法', "passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右,放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅"] tokenizer = AutoTokenizer.from_pretrained('intfloat/multilingual-e5-base') model = AutoModel.from_pretrained('intfloat/multilingual-e5-base') # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt') outputs = model(**batch_dict) embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask']) # normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:2] @ embeddings[2:].T) * 100 print(scores.tolist()) ``` ## Supported Languages This model is initialized from [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) and continually trained on a mixture of multilingual datasets. It supports 100 languages from xlm-roberta, but low-resource languages may see performance degradation. ## Training Details **Initialization**: [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) **First stage**: contrastive pre-training with weak supervision | Dataset | Weak supervision | # of text pairs | |--------------------------------------------------------------------------------------------------------|---------------------------------------|-----------------| | Filtered [mC4](https://huggingface.co/datasets/mc4) | (title, page content) | 1B | | [CC News](https://huggingface.co/datasets/intfloat/multilingual_cc_news) | (title, news content) | 400M | | [NLLB](https://huggingface.co/datasets/allenai/nllb) | translation pairs | 2.4B | | [Wikipedia](https://huggingface.co/datasets/intfloat/wikipedia) | (hierarchical section title, passage) | 150M | | Filtered [Reddit](https://www.reddit.com/) | (comment, response) | 800M | | [S2ORC](https://github.com/allenai/s2orc) | (title, abstract) and citation pairs | 100M | | [Stackexchange](https://stackexchange.com/) | (question, answer) | 50M | | [xP3](https://huggingface.co/datasets/bigscience/xP3) | (input prompt, response) | 80M | | [Miscellaneous unsupervised SBERT data](https://huggingface.co/sentence-transformers/all-MiniLM-L6-v2) | - | 10M | **Second stage**: supervised fine-tuning | Dataset | Language | # of text pairs | |----------------------------------------------------------------------------------------|--------------|-----------------| | [MS MARCO](https://microsoft.github.io/msmarco/) | English | 500k | | [NQ](https://github.com/facebookresearch/DPR) | English | 70k | | [Trivia QA](https://github.com/facebookresearch/DPR) | English | 60k | | [NLI from SimCSE](https://github.com/princeton-nlp/SimCSE) | English | <300k | | [ELI5](https://huggingface.co/datasets/eli5) | English | 500k | | [DuReader Retrieval](https://github.com/baidu/DuReader/tree/master/DuReader-Retrieval) | Chinese | 86k | | [KILT Fever](https://huggingface.co/datasets/kilt_tasks) | English | 70k | | [KILT HotpotQA](https://huggingface.co/datasets/kilt_tasks) | English | 70k | | [SQuAD](https://huggingface.co/datasets/squad) | English | 87k | | [Quora](https://huggingface.co/datasets/quora) | English | 150k | | [Mr. TyDi](https://huggingface.co/datasets/castorini/mr-tydi) | 11 languages | 50k | | [MIRACL](https://huggingface.co/datasets/miracl/miracl) | 16 languages | 40k | For all labeled datasets, we only use its training set for fine-tuning. For other training details, please refer to our paper at [https://arxiv.org/pdf/2402.05672](https://arxiv.org/pdf/2402.05672). ## Benchmark Results on [Mr. TyDi](https://arxiv.org/abs/2108.08787) | Model | Avg MRR@10 | | ar | bn | en | fi | id | ja | ko | ru | sw | te | th | |-----------------------|------------|-------|------| --- | --- | --- | --- | --- | --- | --- |------| --- | --- | | BM25 | 33.3 | | 36.7 | 41.3 | 15.1 | 28.8 | 38.2 | 21.7 | 28.1 | 32.9 | 39.6 | 42.4 | 41.7 | | mDPR | 16.7 | | 26.0 | 25.8 | 16.2 | 11.3 | 14.6 | 18.1 | 21.9 | 18.5 | 7.3 | 10.6 | 13.5 | | BM25 + mDPR | 41.7 | | 49.1 | 53.5 | 28.4 | 36.5 | 45.5 | 35.5 | 36.2 | 42.7 | 40.5 | 42.0 | 49.2 | | | | | multilingual-e5-small | 64.4 | | 71.5 | 66.3 | 54.5 | 57.7 | 63.2 | 55.4 | 54.3 | 60.8 | 65.4 | 89.1 | 70.1 | | multilingual-e5-base | 65.9 | | 72.3 | 65.0 | 58.5 | 60.8 | 64.9 | 56.6 | 55.8 | 62.7 | 69.0 | 86.6 | 72.7 | | multilingual-e5-large | **70.5** | | 77.5 | 73.2 | 60.8 | 66.8 | 68.5 | 62.5 | 61.6 | 65.8 | 72.7 | 90.2 | 76.2 | ## MTEB Benchmark Evaluation Check out [unilm/e5](https://github.com/microsoft/unilm/tree/master/e5) to reproduce evaluation results on the [BEIR](https://arxiv.org/abs/2104.08663) and [MTEB benchmark](https://arxiv.org/abs/2210.07316). ## Support for Sentence Transformers Below is an example for usage with sentence_transformers. ```python from sentence_transformers import SentenceTransformer model = SentenceTransformer('intfloat/multilingual-e5-base') input_texts = [ 'query: how much protein should a female eat', 'query: 南瓜的家常做法', "passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 i s 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or traini ng for a marathon. Check out the chart below to see how much protein you should be eating each day.", "passage: 1.清炒南瓜丝 原料:嫩南瓜半个 调料:葱、盐、白糖、鸡精 做法: 1、南瓜用刀薄薄的削去表面一层皮 ,用勺子刮去瓤 2、擦成细丝(没有擦菜板就用刀慢慢切成细丝) 3、锅烧热放油,入葱花煸出香味 4、入南瓜丝快速翻炒一分钟左右, 放盐、一点白糖和鸡精调味出锅 2.香葱炒南瓜 原料:南瓜1只 调料:香葱、蒜末、橄榄油、盐 做法: 1、将南瓜去皮,切成片 2、油 锅8成热后,将蒜末放入爆香 3、爆香后,将南瓜片放入,翻炒 4、在翻炒的同时,可以不时地往锅里加水,但不要太多 5、放入盐,炒匀 6、南瓜差不多软和绵了之后,就可以关火 7、撒入香葱,即可出锅" ] embeddings = model.encode(input_texts, normalize_embeddings=True) ``` Package requirements `pip install sentence_transformers~=2.2.2` Contributors: [michaelfeil](https://huggingface.co/michaelfeil) ## FAQ **1. Do I need to add the prefix "query: " and "passage: " to input texts?** Yes, this is how the model is trained, otherwise you will see a performance degradation. Here are some rules of thumb: - Use "query: " and "passage: " correspondingly for asymmetric tasks such as passage retrieval in open QA, ad-hoc information retrieval. - Use "query: " prefix for symmetric tasks such as semantic similarity, bitext mining, paraphrase retrieval. - Use "query: " prefix if you want to use embeddings as features, such as linear probing classification, clustering. **2. Why are my reproduced results slightly different from reported in the model card?** Different versions of `transformers` and `pytorch` could cause negligible but non-zero performance differences. **3. Why does the cosine similarity scores distribute around 0.7 to 1.0?** This is a known and expected behavior as we use a low temperature 0.01 for InfoNCE contrastive loss. For text embedding tasks like text retrieval or semantic similarity, what matters is the relative order of the scores instead of the absolute values, so this should not be an issue. ## Citation If you find our paper or models helpful, please consider cite as follows: ``` @article{wang2024multilingual, title={Multilingual E5 Text Embeddings: A Technical Report}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Yang, Linjun and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2402.05672}, year={2024} } ``` ## Limitations Long texts will be truncated to at most 512 tokens.
[ "BIOSSES", "SCIFACT" ]
RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf
RichardErkhov
null
[ "gguf", "region:us" ]
2024-08-06T19:43:54Z
2024-08-06T19:44:59+00:00
18
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Phi-3-mini-128k-instruct - GGUF - Model creator: https://huggingface.co/gretelai/ - Original model: https://huggingface.co/gretelai/Phi-3-mini-128k-instruct/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Phi-3-mini-128k-instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q2_K.gguf) | Q2_K | 0.65GB | | [Phi-3-mini-128k-instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ3_XS.gguf) | IQ3_XS | 0.07GB | | [Phi-3-mini-128k-instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ3_S.gguf) | IQ3_S | 0.0GB | | [Phi-3-mini-128k-instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K_S.gguf) | Q3_K_S | 0.0GB | | [Phi-3-mini-128k-instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ3_M.gguf) | IQ3_M | 0.0GB | | [Phi-3-mini-128k-instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K.gguf) | Q3_K | 0.0GB | | [Phi-3-mini-128k-instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K_M.gguf) | Q3_K_M | 0.0GB | | [Phi-3-mini-128k-instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q3_K_L.gguf) | Q3_K_L | 0.0GB | | [Phi-3-mini-128k-instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ4_XS.gguf) | IQ4_XS | 0.0GB | | [Phi-3-mini-128k-instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_0.gguf) | Q4_0 | 0.0GB | | [Phi-3-mini-128k-instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.IQ4_NL.gguf) | IQ4_NL | 0.0GB | | [Phi-3-mini-128k-instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_K_S.gguf) | Q4_K_S | 0.0GB | | [Phi-3-mini-128k-instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_K.gguf) | Q4_K | 0.0GB | | [Phi-3-mini-128k-instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_K_M.gguf) | Q4_K_M | 0.0GB | | [Phi-3-mini-128k-instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q4_1.gguf) | Q4_1 | 0.0GB | | [Phi-3-mini-128k-instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_0.gguf) | Q5_0 | 0.0GB | | [Phi-3-mini-128k-instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_K_S.gguf) | Q5_K_S | 0.0GB | | [Phi-3-mini-128k-instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_K.gguf) | Q5_K | 0.0GB | | [Phi-3-mini-128k-instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_K_M.gguf) | Q5_K_M | 0.0GB | | [Phi-3-mini-128k-instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q5_1.gguf) | Q5_1 | 0.0GB | | [Phi-3-mini-128k-instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q6_K.gguf) | Q6_K | 0.0GB | | [Phi-3-mini-128k-instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/gretelai_-_Phi-3-mini-128k-instruct-gguf/blob/main/Phi-3-mini-128k-instruct.Q8_0.gguf) | Q8_0 | 0.0GB | Original model description: --- license: mit license_link: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/LICENSE language: - en pipeline_tag: text-generation tags: - nlp - code widget: - messages: - role: user content: Can you provide ways to eat combinations of bananas and dragonfruits? --- NOTE: this is mirrored from https://huggingface.co/microsoft/Phi-3-mini-128k-instruct ## Model Summary The Phi-3-Mini-128K-Instruct is a 3.8 billion-parameter, lightweight, state-of-the-art open model trained using the Phi-3 datasets. This dataset includes both synthetic data and filtered publicly available website data, with an emphasis on high-quality and reasoning-dense properties. The model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support. After initial training, the model underwent a post-training process that involved supervised fine-tuning and direct preference optimization to enhance its ability to follow instructions and adhere to safety measures. When evaluated against benchmarks that test common sense, language understanding, mathematics, coding, long-term context, and logical reasoning, the Phi-3 Mini-128K-Instruct demonstrated robust and state-of-the-art performance among models with fewer than 13 billion parameters. Resources and Technical Documentation: 🏡 [Phi-3 Portal](https://azure.microsoft.com/en-us/products/phi-3) <br> 📰 [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024) <br> 📖 [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) <br> 🛠️ [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) <br> 👩‍🍳 [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook) <br> 🖥️ [Try It](https://aka.ms/try-phi3) | | Short Context | Long Context | | :- | :- | :- | | Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)| | Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)| | Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)| | Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct-onnx-cuda)| ## Intended Uses **Primary use cases** The model is intended for commercial and research use in English. The model provides uses for applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## Release Notes This is an update over the original instruction-tuned Phi-3-mini release based on valuable customer feedback. The model used additional post-training data leading to substantial gains on long-context understanding, instruction following, and structure output. We also improve multi-turn conversation quality, explicitly support <|system|> tag, and significantly improve reasoning capability. We believe most use cases will benefit from this release, but we encourage users to test in their particular AI applications. We appreciate the enthusiastic adoption of the Phi-3 model family, and continue to welcome all feedback from the community. These tables below highlights improvements on instruction following, structure output, reasoning, and long-context understanding of the new release on our public and internal benchmark datasets. | Benchmarks | Original | June 2024 Update | | :- | :- | :- | | Instruction Extra Hard | 5.7 | 5.9 | | Instruction Hard | 5.0 | 5.2 | | JSON Structure Output | 1.9 | 60.1 | | XML Structure Output | 47.8 | 52.9 | | GPQA | 25.9 | 29.7 | | MMLU | 68.1 | 69.7 | | **Average** | **25.7** | **37.3** | RULER: a retrieval-based benchmark for long context understanding | Model | 4K | 8K | 16K | 32K | 64K | 128K | Average | | :-------------------| :------| :------| :------| :------| :------| :------| :---------| | Original | 86.7 | 78.1 | 75.6 | 70.3 | 58.9 | 43.3 | **68.8** | | June 2024 Update | 92.4 | 91.1 | 90.8 | 87.9 | 79.8 | 65.6 | **84.6** | RepoQA: a benchmark for long context code understanding | Model | Python | C++ | Rust | Java | TypeScript | Average | | :-------------------| :--------| :-----| :------| :------| :------------| :---------| | Original | 27 | 29 | 40 | 33 | 33 | **32.4** | | June 2024 Update | 85 | 63 | 72 | 93 | 72 | **77** | Notes: if users would like to check out the previous version, use the git commit id **bb5bf1e4001277a606e11debca0ef80323e5f824**. For the model conversion, e.g. GGUF and other formats, we invite the community to experiment with various approaches and share your valuable feedback. Let's innovate together! ## How to Use Phi-3 Mini-128K-Instruct has been integrated in the development version (4.41.3) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Examples of required packages: ``` flash_attn==2.5.8 torch==2.3.1 accelerate==0.31.0 transformers==4.41.2 ``` Phi-3 Mini-128K-Instruct is also available in [Azure AI Studio](https://aka.ms/try-phi3) ### Tokenizer Phi-3 Mini-128K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size. ### Chat Format Given the nature of the training data, the Phi-3 Mini-128K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|system|> You are a helpful assistant.<|end|> <|user|> Question?<|end|> <|assistant|> ``` For example: ```markdown <|system|> You are a helpful assistant.<|end|> <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|system|> You are a helpful travel assistant.<|end|> <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( "microsoft/Phi-3-mini-128k-instruct", device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct") messages = [ {"role": "system", "content": "You are a helpful AI assistant."}, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` Notes: If you want to use flash attention, call _AutoModelForCausalLM.from_pretrained()_ with _attn_implementation="flash_attention_2"_ ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3 Mini-128K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 128K tokens * GPUs: 512 H100-80G * Training time: 10 days * Training data: 4.9T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between May and June 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. * Release dates: June, 2024. ### Datasets Our training data includes a wide variety of sources, totaling 4.9 trillion tokens, and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report). ### Fine-tuning A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/sample_finetune.py). ## Benchmarks We report the results under completion format for Phi-3-Mini-128K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5. All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. | Category | Benchmark | Phi-3-Mini-128K-Ins | Gemma-7B | Mistral-7B | Mixtral-8x7B | Llama-3-8B-Ins | GPT3.5-Turbo-1106 | | :----------| :-----------| :---------------------| :----------| :------------| :--------------| :----------------| :-------------------| | Popular aggregated benchmark | AGI Eval <br>5-shot| 39.5 | 42.1 | 35.1 | 45.2 | 42 | 48.4 | | | MMLU <br>5-shot | 69.7 | 63.6 | 61.7 | 70.5 | 66.5 | 71.4 | | | BigBench Hard <br>3-shot | 72.1 | 59.6 | 57.3 | 69.7 | 51.5 | 68.3 | | Language Understanding | ANLI <br>7-shot | 52.3 | 48.7 | 47.1 | 55.2 | 57.3 | 58.1 | | | HellaSwag <br>5-shot | 70.5 | 49.8 | 58.5 | 70.4 | 71.1 | 78.8 | | Reasoning | ARC Challenge <br>10-shot | 85.5 | 78.3 | 78.6 | 87.3 | 82.8 | 87.4 | | | BoolQ <br>0-shot | 77.1 | 66 | 72.2 | 76.6 | 80.9 | 79.1 | | | MedQA <br>2-shot | 56.4 | 49.6 | 50 | 62.2 | 60.5 | 63.4 | | | OpenBookQA <br>10-shot | 78.8 | 78.6 | 79.8 | 85.8 | 82.6 | 86 | | | PIQA <br>5-shot | 80.1 | 78.1 | 77.7 | 86 | 75.7 | 86.6 | | | GPQA <br>0-shot | 29.7 | 2.9 | 15 | 6.9 | 32.4 | 29.9 | | | Social IQA <br>5-shot | 74.7 | 65.5 | 74.6 | 75.9 | 73.9 | 68.3 | | | TruthfulQA (MC2) <br>10-shot | 64.8 | 52.1 | 53 | 60.1 | 63.2 | 67.7 | | | WinoGrande <br>5-shot | 71.0 | 55.6 | 54.2 | 62 | 65 | 68.8 | | Factual Knowledge | TriviaQA <br>5-shot | 57.8 | 72.3 | 75.2 | 82.2 | 67.7 | 85.8 | | Math | GSM8K CoTT <br>8-shot | 85.3 | 59.8 | 46.4 | 64.7 | 77.4 | 78.1 | | Code Generation | HumanEval <br>0-shot | 60.4 | 34.1 | 28.0 | 37.8 | 60.4 | 62.2 | | | MBPP <br>3-shot | 70.0 | 51.5 | 50.8 | 60.2 | 67.7 | 77.8 | | **Average** | | **66.4** | **56.0** | **56.4** | **64.4** | **65.5** | **70.3** | **Long Context**: Phi-3 Mini-128K-Instruct supports 128K context length, therefore the model is capable of several long context tasks including long document/meeting summarization, long document QA. | Benchmark | Phi-3 Mini-128K-Instruct | Mistral-7B | Mixtral 8x7B | LLaMA-3-8B-Instruct | | :---------------| :--------------------------|:------------|:--------------|:---------------------| | GovReport | 25.3 | 4.9 | 20.3 | 10.3 | | QMSum | 21.9 | 15.5 | 20.6 | 2.9 | | Qasper | 41.6 | 23.5 | 26.6 | 8.1 | | SQuALITY | 24.1 | 14.7 | 16.2 | 25 | | SummScreenFD | 16.8 | 9.3 | 11.3 | 5.1 | | **Average** | **25.9** | **13.6** | **19.0** | **10.3** | We take a closer look at different categories across 100 public benchmark datasets at the table below: | Category | Phi-3-Mini-128K-Instruct | Gemma-7B | Mistral-7B | Mixtral 8x7B | Llama-3-8B-Instruct | GPT-3.5-Turbo | |:----------|:--------------------------|:----------|:------------|:--------------|:---------------------|:---------------| | Popular aggregated benchmark | 60.6 | 59.4 | 56.5 | 66.2 | 59.9 | 67.0 | | Reasoning | 69.4 | 60.3 | 62.8 | 68.1 | 69.6 | 71.7 | | Language understanding | 57.5 | 57.6 | 52.5 | 66.1 | 63.2 | 67.7 | | Code generation | 61.0 | 45.6 | 42.9 | 52.7 | 56.4 | 70.4 | | Math | 51.6 | 35.8 | 25.4 | 40.3 | 41.1 | 52.8 | | Factual knowledge | 35.8 | 46.7 | 49.8 | 58.6 | 43.1 | 63.4 | | Multilingual | 56.4 | 66.5 | 57.4 | 66.7 | 66.6 | 71.0 | | Robustness | 61.1 | 38.4 | 40.6 | 51.0 | 64.5 | 69.3 | Overall, the model with only 3.8B-param achieves a similar level of language understanding and reasoning ability as much larger models. However, it is still fundamentally limited by its size for certain tasks. The model simply does not have the capacity to store too much world knowledge, which can be seen for example with low performance on TriviaQA. However, we believe such weakness can be resolved by augmenting Phi-3-Mini with a search engine. ## Cross Platform Support [ONNX runtime](https://onnxruntime.ai/blogs/accelerating-phi-3) now supports Phi-3 mini models across platforms and hardware. Optimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA). Along with DML, ONNX Runtime provides cross platform support for Phi3 mini across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3 Mini-128K-Instruct model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: * NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager" * Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128K](https://aka.ms/phi3-mini-128k-instruct-onnx) ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-128k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
knowledgator/gliner-bi-llama-v1.0
knowledgator
token-classification
[ "gliner", "pytorch", "NER", "GLiNER", "information extraction", "encoder", "entity recognition", "token-classification", "multilingual", "dataset:urchade/pile-mistral-v0.1", "dataset:numind/NuNER", "dataset:knowledgator/GLINER-multi-task-synthetic-data", "license:apache-2.0", "region:us" ]
2024-08-29T18:32:18Z
2024-09-02T05:45:51+00:00
18
0
--- datasets: - urchade/pile-mistral-v0.1 - numind/NuNER - knowledgator/GLINER-multi-task-synthetic-data language: - multilingual library_name: gliner license: apache-2.0 pipeline_tag: token-classification tags: - NER - GLiNER - information extraction - encoder - entity recognition --- # About GLiNER is a Named Entity Recognition (NER) model capable of identifying any entity type using a bidirectional transformer encoders (BERT-like). It provides a practical alternative to traditional NER models, which are limited to predefined entities, and Large Language Models (LLMs) that, despite their flexibility, are costly and large for resource-constrained scenarios. This particular version utilize bi-encoder architecture, where textual encoder is [Sheared-Llama-1.3B](https://huggingface.co/princeton-nlp/Sheared-LLaMA-1.3B) and entity label encoder is sentence transformer - [BGE-base-en](https://huggingface.co/BAAI/bge-small-en-v1.5). This model leverages the [LLM2Vec](https://github.com/McGill-NLP/llm2vec/tree/main/llm2vec) approach, transforming the initial decoder model into a bi-directional encoder. We further enhanced the model by pre-training it on the masked token prediction task using the Wikipedia corpus. This approach unlocks new capabilities for GLiNER, such as supporting flash attention, enabling a longer context window, and achieving faster inference times. Moreover, by utilizing modern decoders trained on extensive and up-to-date datasets, the model benefits from improved generalization and performance. This version highlights the key improvements and contextual benefits more clearly.Such architecture brings several advantages over uni-encoder GLiNER: * An unlimited amount of entities can be recognized at a single time; * Faster inference if entity embeddings are preprocessed; * Better generalization to unseen entities; However, it has some drawbacks such as a lack of inter-label interactions that make it hard for the model to disambiguate semantically similar but contextually different entities. ### Installation & Usage Install or update the gliner package: ```bash pip install gliner -U ``` Once you've downloaded the GLiNER library, you can import the GLiNER class. You can then load this model using `GLiNER.from_pretrained` and predict entities with `predict_entities`. ```python from gliner import GLiNER model = GLiNER.from_pretrained("knowledgator/gliner-bi-llama-v1.0") text = """ Cristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃˈtjɐnu ʁɔˈnaldu]; born 5 February 1985) is a Portuguese professional footballer who plays as a forward for and captains both Saudi Pro League club Al Nassr and the Portugal national team. Widely regarded as one of the greatest players of all time, Ronaldo has won five Ballon d'Or awards,[note 3] a record three UEFA Men's Player of the Year Awards, and four European Golden Shoes, the most by a European player. He has won 33 trophies in his career, including seven league titles, five UEFA Champions Leagues, the UEFA European Championship and the UEFA Nations League. Ronaldo holds the records for most appearances (183), goals (140) and assists (42) in the Champions League, goals in the European Championship (14), international goals (128) and international appearances (205). He is one of the few players to have made over 1,200 professional career appearances, the most by an outfield player, and has scored over 850 official senior career goals for club and country, making him the top goalscorer of all time. """ labels = ["person", "award", "date", "competitions", "teams"] entities = model.predict_entities(text, labels, threshold=0.3) for entity in entities: print(entity["text"], "=>", entity["label"]) ``` ``` Cristiano Ronaldo dos Santos Aveiro => person 5 February 1985 => date Al Nassr => teams Portugal national team => teams Ballon d'Or => award UEFA Men's Player of the Year Awards => award European Golden Shoes => award UEFA Champions Leagues => competitions UEFA European Championship => competitions UEFA Nations League => competitions Champions League => competitions European Championship => competitions ``` If you want to use flash attention or increase sequence length, please, check the following code: ```python from gliner import GLiNER import torch model = GLiNER.from_pretrained("knowledgator/gliner-bi-llama-v1.0", _attn_implementation = 'flash_attention_2', max_length = 2048).to('cuda:0', dtype=torch.float16) ``` If you have a large amount of entities and want to pre-embed them, please, refer to the following code snippet: ```python labels = ["your entities"] texts = ["your texts"] entity_embeddings = model.encode_labels(labels, batch_size = 8) outputs = model.batch_predict_with_embeds(texts, entity_embeddings, labels) ``` ### Benchmarks Below you can see the table with benchmarking results on various named entity recognition datasets: | Dataset | Score | |-------------------------|--------| | ACE 2004 | 26.8% | | ACE 2005 | 29.2% | | AnatEM | 25.3% | | Broad Tweet Corpus | 66.8% | | CoNLL 2003 | 60.3% | | FabNER | 21.2% | | FindVehicle | 28.3% | | GENIA_NER | 58.3% | | HarveyNER | 18.3% | | MultiNERD | 64.7% | | Ontonotes | 28.4% | | PolyglotNER | 45.3% | | TweetNER7 | 35.9% | | WikiANN en | 53.6% | | WikiNeural | 73.4% | | bc2gm | 63.2% | | bc4chemd | 56.8% | | bc5cdr | 71.3% | | ncbi | 64.9% | | **Average** | **47.0%** | | | | | CrossNER_AI | 56.7% | | CrossNER_literature | 61.5% | | CrossNER_music | 70.2% | | CrossNER_politics | 75.6% | | CrossNER_science | 66.8% | | mit-movie | 39.9% | | mit-restaurant | 41.7% | | **Average (zero-shot benchmark)** | **58.9%** | ### Join Our Discord Connect with our community on Discord for news, support, and discussion about our models. Join [Discord](https://discord.gg/dkyeAgs9DG).
[ "ANATEM", "BC5CDR" ]
knowledgator/gliner-llama-1B-v1.0
knowledgator
token-classification
[ "gliner", "pytorch", "NER", "GLiNER", "information extraction", "encoder", "entity recognition", "token-classification", "multilingual", "dataset:urchade/pile-mistral-v0.1", "dataset:knowledgator/GLINER-multi-task-synthetic-data", "dataset:EmergentMethods/AskNews-NER-v0", "license:apache-2.0", "region:us" ]
2024-09-01T09:21:19Z
2024-09-06T06:41:35+00:00
18
6
--- datasets: - urchade/pile-mistral-v0.1 - knowledgator/GLINER-multi-task-synthetic-data - EmergentMethods/AskNews-NER-v0 language: - multilingual library_name: gliner license: apache-2.0 pipeline_tag: token-classification tags: - NER - GLiNER - information extraction - encoder - entity recognition --- # About GLiNER is a Named Entity Recognition (NER) model capable of identifying any entity type using a bidirectional transformer encoders (BERT-like). It provides a practical alternative to traditional NER models, which are limited to predefined entities, and Large Language Models (LLMs) that, despite their flexibility, are costly and large for resource-constrained scenarios. The initial versions of GLiNER relied on older encoder architectures like BERT and DeBERTA. These models, however, were trained on smaller datasets and lacked support for modern optimization techniques such as flash attention. Additionally, their context window was typically limited to 512 tokens, which is insufficient for many practical applications. Recognizing these limitations, we began exploring alternative backbones for GLiNER. This latest model leverages the LLM2Vec approach, transforming the initial decoder model into a bidirectional encoder. We further enhanced the model by pre-training it on the masked token prediction task using the Wikipedia corpus. This approach introduces several advancements for GLiNER, including support for flash attention, an extended context window, and faster inference times. Additionally, by utilizing modern decoders trained on large, up-to-date datasets, the model exhibits improved generalization and performance. Key Advantages Over Previous GLiNER Models: * Enhanced performance and generalization capabilities * Support for Flash Attention * Extended context window (up to 32k tokens) While these models are larger and require more computational resources compared to older encoders, they are still considered relatively small given current standards and provide significant benefits for a wide range of use cases. ### Installation & Usage Install or update the gliner package: ```bash pip install gliner -U ``` And LLM2Vec packages: ```bash pip install llm2vec ``` Once you've downloaded the GLiNER library, you can import the GLiNER class. You can then load this model using `GLiNER.from_pretrained` and predict entities with `predict_entities`. ```python from gliner import GLiNER model = GLiNER.from_pretrained("knowledgator/gliner-llama-1B-v1.0") text = """ Cristiano Ronaldo dos Santos Aveiro (Portuguese pronunciation: [kɾiʃˈtjɐnu ʁɔˈnaldu]; born 5 February 1985) is a Portuguese professional footballer who plays as a forward for and captains both Saudi Pro League club Al Nassr and the Portugal national team. Widely regarded as one of the greatest players of all time, Ronaldo has won five Ballon d'Or awards,[note 3] a record three UEFA Men's Player of the Year Awards, and four European Golden Shoes, the most by a European player. He has won 33 trophies in his career, including seven league titles, five UEFA Champions Leagues, the UEFA European Championship and the UEFA Nations League. Ronaldo holds the records for most appearances (183), goals (140) and assists (42) in the Champions League, goals in the European Championship (14), international goals (128) and international appearances (205). He is one of the few players to have made over 1,200 professional career appearances, the most by an outfield player, and has scored over 850 official senior career goals for club and country, making him the top goalscorer of all time. """ labels = ["person", "award", "date", "competitions", "teams"] entities = model.predict_entities(text, labels, threshold=0.5) for entity in entities: print(entity["text"], "=>", entity["label"]) ``` ``` Cristiano Ronaldo dos Santos Aveiro => person 5 February 1985 => date Al Nassr => teams Portugal national team => teams Ballon d'Or => award UEFA Men's Player of the Year Awards => award European Golden Shoes => award UEFA Champions Leagues => competitions UEFA European Championship => competitions UEFA Nations League => competitions Champions League => competitions European Championship => competitions ``` If you want to use flash attention or increase sequence length, please, check the following code: ```python from gliner import GLiNER import torch model = GLiNER.from_pretrained("knowledgator/gliner-llama-1B-v1.0", _attn_implementation = 'flash_attention_2', max_length = 2048).to('cuda:0', dtype=torch.float16) ``` ### Benchmarks Below you can see the table with benchmarking results on various named entity recognition datasets: | Dataset | Score | |-----------------------------|--------| | ACE 2004 | 29.0% | | ACE 2005 | 30.4% | | AnatEM | 40.2% | | Broad Tweet Corpus | 65.3% | | CoNLL 2003 | 62.4% | | FabNER | 25.3% | | FindVehicle | 39.7% | | GENIA_NER | 55.6% | | HarveyNER | 25.1% | | MultiNERD | 61.3% | | Ontonotes | 25.6% | | PolyglotNER | 42.7% | | TweetNER7 | 36.2% | | WikiANN en | 55.7% | | WikiNeural | 73.6% | | bc2gm | 55.5% | | bc4chemd | 65.1% | | bc5cdr | 74.0% | | ncbi | 64.8% | | **Average** | **48.8%** | | | | | CrossNER_AI | 57.5% | | CrossNER_literature | 68.1% | | CrossNER_music | 65.3% | | CrossNER_politics | 73.3% | | CrossNER_science | 67.8% | | mit-movie | 47.7% | | mit-restaurant | 40.0% | | **Average (zero-shot benchmark)** | **60.1%** | ### Join Our Discord Connect with our community on Discord for news, support, and discussion about our models. Join [Discord](https://discord.gg/dkyeAgs9DG).
[ "ANATEM", "BC5CDR" ]
alexbuz/GRIN-MoE-2
alexbuz
text-generation
[ "transformers", "safetensors", "grinmoe", "nlp", "code", "text-generation", "conversational", "custom_code", "en", "arxiv:2409.12136", "arxiv:2404.14219", "license:mit", "endpoints_compatible", "region:us" ]
2024-09-22T22:25:16Z
2024-09-22T22:41:51+00:00
18
0
--- language: - en library_name: transformers license: mit license_link: https://github.com/microsoft/GRIN-MoE/tree/main/LICENSE pipeline_tag: text-generation tags: - nlp - code widget: - messages: - role: user content: Sally (a girl) has 3 brothers. Each brother has 2 sisters. How many sisters does Sally have? --- <h1 align="center"> &#128513; MoE</h1> <h4 align="center">GRIN: <em>GR</em>adient-<em>IN</em>formed MoE</h4> <p align="center"> <a href="https://huggingface.co/microsoft/GRIN-MoE">Hugging Face</a>&nbsp | &nbsp <a href="https://arxiv.org/abs/2409.12136">Tech Report</a>&nbsp | &nbsp <a href="https://huggingface.co/microsoft/GRIN-MoE/blob/main/LICENSE">License</a>&nbsp | &nbsp <a href="https://github.com/microsoft/GRIN-MoE">Github</a> &nbsp | &nbsp <a href="https://huggingface.co/microsoft/GRIN-MoE#usage">Get Started</a>&nbsp <br> - With **only 6.6B** activate parameters, GRIN MoE achieves **exceptionally good** performance across a diverse set of tasks, particularly in coding and mathematics tasks. - GRIN uses [**SparseMixer-v2**](https://arxiv.org/html/2409.12136v1#Pt1) to estimate the gradient related to expert routing, while the conventional MoE training treats expert gating as a proxy for the gradient estimation. - GRIN scales MoE training with [**neither expert parallelism nor token dropping**](https://arxiv.org/pdf/2409.12136#page=5.42), while the conventional MoE training employs expert parallelism and deploys token dropping. ## Intended Uses ### Primary Use Cases The model is intended for commercial and research use in multiple languages. The model provides uses for general purpose AI systems and applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. ### Use Case Considerations Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. ***Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under.*** ## Usage ### Command-line Demo The simpliest way to inference with GRIN-MoE is to run the demo as below, which would setup environment, download model weight, and run inference for a math question. ```bash # This script is available at `https://github.com/microsoft/GRIN-MoE/blob/main/demo/demo.sh` and requires docker to run. curl https://raw.githubusercontent.com/microsoft/GRIN-MoE/main/demo/demo.sh | bash -s ``` ### Interactive Demo Run the following command to play with the model with more questions and customized inputs, which would launch a jupyter notebook at `localhost:8887`. ```bash # This script requires docker to run. docker run --gpus all -p 8887:8887 --rm nvcr.io/nvidia/pytorch:24.08-py3 /bin/bash -c 'git clone https://github.com/microsoft/GRIN-MoE.git && jupyter notebook --port 8887 --notebook-dir GRIN-MoE/demo' ``` ## Benchmarks To understand the capabilities, we compare GRIN MoE with a set of models over a variety of benchmarks using our internal benchmark platform. At the high-level overview of the model quality on representative benchmarks: ### Popular Benchmarks Note a different version of mid-training and post-training, emphasizing long context and multilingual ability, has been conducted and has been released at https://huggingface.co/microsoft/Phi-3.5-MoE-instruct. | | GRIN MoE (16x3.8B) | Phi-3.5-MoE (16x3.8B) | Mixtral (8x7B) | Mixtral (8x22B) | Llama3 (8B) | Llama3 (70B) | GPT3.5 | GPT4o | |---------------|-----------|---------|---------|---------|--------|--------|--------|-------| | MMLU | 79.4 | 78.9 | 70.5 | 76.2 | 66.5 | 80.2 | 71.4 | 86.9 | | HellaSwag | 83.7 | 83.8 | 70.4 | 79.0 | 71.1 | 82.6 | 78.8 | 91.7 | | ANLI | 60.6 | 59.8 | 55.2 | 65.2 | 57.3 | 68.3 | 58.1 | 75.7 | | GSM-8K | 90.4 | 88.7 | 64.7 | 83.8 | 77.4 | 93.5 | 78.1 | 93.8 | | MedQA | 70.4 | 70.5 | 62.2 | 67.9 | 60.5 | 78.5 | 63.4 | 88.9 | | AGIEval | 48.2 | 50.3 | 45.2 | 54.0 | 42.0 | 56.9 | 48.4 | 37.6 | | TriviaQA | 73.9 | 71.6 | 78.5 | 82.2 | 67.7 | 84.5 | 85.8 | 66.0 | | Arc-C | 92.0 | 91.0 | 87.3 | 91.3 | 82.8 | 93.0 | 87.4 | 97.0 | | Arc-E | 98.0 | 97.1 | 95.6 | 96.9 | 93.4 | 98.2 | 96.3 | 99.0 | | PIQA | 89.0 | 88.6 | 86.0 | 85.0 | 75.7 | 85.3 | 86.6 | 92.9 | | SociQA | 79.5 | 78.0 | 75.9 | 78.2 | 73.9 | 81.1 | 68.3 | 81.4 | | BigBench-Hard | 81.4 | 79.1 | 69.7 | 81.8 | 51.5 | 80.2 | 68.3 | 81.2 | | WinoGrande | 81.4 | 81.3 | 62.0 | 75.3 | 65.0 | 83.3 | 68.8 | 89.3 | | OpenBookQA | 89.8 | 89.6 | 85.8 | 88.6 | 82.6 | 91.8 | 86.0 | 95.2 | | BoolQ | 83.4 | 84.6 | 77.6 | 82.7 | 80.9 | 89.1 | 79.1 | 90.6 | | CommonSenseQA | 81.8 | 83.5 | 78.1 | 82.0 | 79.0 | 84.4 | 79.6 | 88.5 | | TruthfulQA | 74.5 | 77.5 | 60.1 | 67.4 | 63.2 | 81.9 | 85.8 | 85.6 | | HumanEval | 74.4 | 70.7 | 37.8 | 39.6 | 60.4 | 78.7 | 62.2 | 92.1 | | MBPP | 80.3 | 80.8 | 60.2 | 70.7 | 67.7 | 81.3 | 77.8 | 90.4 | | Average | 79.6 | 79.2 | 69.6 | 76.2 | 69.4 | 82.8 | 75.2 | 85.7 | ### Livebench Performance on LiveBench-2024-07-25. Models are ranked by their average score (AVG). *Baseline results are referenced from the official benchmark. | | Reasoning | Coding | Mathematics | Data Analysis | Language | IF | AVG | |------------------------------|-----------|----------|--------------|---------------|----------|----------|----------| | Claude-3-haiku* | 29.3 | 24.5 | 25.7 | 41.5 | 30.1 | 64.0 | 35.9 | | Mixtral-8x22B-instruct-v0.1* | 29.3 | 32.0 | 28.3 | 31.7 | 26.5 | 63.1 | 35.2 | | GPT-3.5-turbo-0125* | 26.7 | 27.7 | 26.9 | 41.2 | 24.2 | 60.5 | 34.5 | | **GRIN MoE** | **35.3** | **23.7** | **29.8** | **32.0** | **16.9** | **57.6** | **32.5** | | Mistral-small-2402* | 26.0 | 21.2 | 28.2 | 31.9 | 22.1 | 63.9 | 32.2 | | Command-r-plus* | 28.7 | 19.5 | 24.9 | 24.6 | 23.9 | 71.5 | 32.2 | | Gemma-2-9B-it* | 17.3 | 22.5 | 24.0 | 35.1 | 27.6 | 61.6 | 31.3 | ## Training ### Model | | | |---------------------|-----| | Developer | Microsoft | | Architecture | GRIN MoE has 16x3.8B parameters with **6.6B active parameters** when using 2 experts. The model is a mixture-of-expert decoder-only Transformer model using the tokenizer with vocabulary size of 32,064. | | Inputs | Text. It is best suited for prompts using chat format. | | Context length | 4K tokens | | GPUs | 512 H100-80G | | Training time | 18 days | | Training data | 4.0T tokens | | Outputs | Generated text in response to the input | | Dates | Trained between April and June 2024 | | Status | This is a static model trained on an offline dataset with cutoff date October 2023 for publicly available data. Future versions of the tuned models may be released as we improve models. | | Supported languages | English | | Release date | Sep 2024 | | License | MIT | ### Training Datasets Our training data includes a wide variety of sources, totaling 4 trillion tokens, and is a combination of 1) publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) high quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. More details about data can be found in the [Phi-3 Technical Report](https://arxiv.org/pdf/2404.14219). ## Responsible AI Considerations Like other language models, Gradient Informed (GRIN) MoE model can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: * Quality of Service: GRIN MoE is trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. * Representation of Harms & Perpetuation of Stereotypes: This model can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. * Inappropriate or Offensive Content: This model may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. * Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. * Limited Scope for Code: Majority of the training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use-case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: * Allocation: The model may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. * High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. * Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). * Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. * Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. * Copyrighted content: The model might generate content that infringes on copyright protections. Developers should implement measures to detect and filter copyrighted material, and end-users should be informed about the potential for unintended copyright violations and the importance of verifying original sources to avoid legal complications. * Election Misinformation: Developers should ensure robust verification mechanisms are in place to detect and correct false information regarding elections and should inform users of the need for critical evaluation of AI-generated election-related content to mitigate the spread of misinformation. ## License The model is licensed under the [MIT license](./LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies. Hyper Icon
[ "MEDQA" ]
SkyeTeam/stella_en_400m
SkyeTeam
sentence-similarity
[ "sentence-transformers", "pytorch", "safetensors", "new", "feature-extraction", "mteb", "transformers", "sentence-similarity", "custom_code", "arxiv:2205.13147", "license:mit", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-09-28T00:12:08Z
2024-09-28T00:13:33+00:00
18
0
--- license: mit tags: - mteb - sentence-transformers - transformers - sentence-similarity model-index: - name: stella_en_400M_v5 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 92.35820895522387 - type: ap value: 70.81322736988783 - type: ap_weighted value: 70.81322736988783 - type: f1 value: 88.9505466159595 - type: f1_weighted value: 92.68630932872613 - type: main_score value: 92.35820895522387 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 97.1945 - type: ap value: 96.08192192244094 - type: ap_weighted value: 96.08192192244094 - type: f1 value: 97.1936887167346 - type: f1_weighted value: 97.1936887167346 - type: main_score value: 97.1945 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 59.528000000000006 - type: f1 value: 59.21016819840188 - type: f1_weighted value: 59.21016819840188 - type: main_score value: 59.528000000000006 - task: type: Retrieval dataset: name: MTEB ArguAna type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: main_score value: 64.24 - type: map_at_1 value: 40.398 - type: map_at_10 value: 56.215 - type: map_at_100 value: 56.833999999999996 - type: map_at_1000 value: 56.835 - type: map_at_20 value: 56.747 - type: map_at_3 value: 52.181 - type: map_at_5 value: 54.628 - type: mrr_at_1 value: 41.25177809388336 - type: mrr_at_10 value: 56.570762491815216 - type: mrr_at_100 value: 57.17548614361504 - type: mrr_at_1000 value: 57.176650626377466 - type: mrr_at_20 value: 57.08916253512566 - type: mrr_at_3 value: 52.47747747747754 - type: mrr_at_5 value: 54.94547178757718 - type: nauc_map_at_1000_diff1 value: 22.408086887100158 - type: nauc_map_at_1000_max value: -8.730419096847543 - type: nauc_map_at_1000_std value: -17.789262741255737 - type: nauc_map_at_100_diff1 value: 22.407371684274025 - type: nauc_map_at_100_max value: -8.732263549026266 - type: nauc_map_at_100_std value: -17.79550515579994 - type: nauc_map_at_10_diff1 value: 21.925005073301246 - type: nauc_map_at_10_max value: -8.990323944492134 - type: nauc_map_at_10_std value: -18.199246301671458 - type: nauc_map_at_1_diff1 value: 26.23276644969203 - type: nauc_map_at_1_max value: -12.376511389571245 - type: nauc_map_at_1_std value: -18.11411715207284 - type: nauc_map_at_20_diff1 value: 22.32455790850922 - type: nauc_map_at_20_max value: -8.664671547236034 - type: nauc_map_at_20_std value: -17.8290016125137 - type: nauc_map_at_3_diff1 value: 22.395462147465064 - type: nauc_map_at_3_max value: -8.206580750918844 - type: nauc_map_at_3_std value: -17.604490446911484 - type: nauc_map_at_5_diff1 value: 21.95307379904799 - type: nauc_map_at_5_max value: -8.03958102978443 - type: nauc_map_at_5_std value: -17.36578866595004 - type: nauc_mrr_at_1000_diff1 value: 20.124236798365587 - type: nauc_mrr_at_1000_max value: -9.587376069575898 - type: nauc_mrr_at_1000_std value: -17.79191612151833 - type: nauc_mrr_at_100_diff1 value: 20.123612603474033 - type: nauc_mrr_at_100_max value: -9.589187218607831 - type: nauc_mrr_at_100_std value: -17.7981617777748 - type: nauc_mrr_at_10_diff1 value: 19.723683875738075 - type: nauc_mrr_at_10_max value: -9.774151729178815 - type: nauc_mrr_at_10_std value: -18.168668675495162 - type: nauc_mrr_at_1_diff1 value: 23.945332059908132 - type: nauc_mrr_at_1_max value: -12.260461466152819 - type: nauc_mrr_at_1_std value: -18.007194922921148 - type: nauc_mrr_at_20_diff1 value: 20.04819461810257 - type: nauc_mrr_at_20_max value: -9.518368283588936 - type: nauc_mrr_at_20_std value: -17.831608149836136 - type: nauc_mrr_at_3_diff1 value: 19.8571785245832 - type: nauc_mrr_at_3_max value: -9.464375021240478 - type: nauc_mrr_at_3_std value: -17.728533927330453 - type: nauc_mrr_at_5_diff1 value: 19.670313652167827 - type: nauc_mrr_at_5_max value: -8.966372585728434 - type: nauc_mrr_at_5_std value: -17.468955834324817 - type: nauc_ndcg_at_1000_diff1 value: 21.863049281767417 - type: nauc_ndcg_at_1000_max value: -8.18698520924057 - type: nauc_ndcg_at_1000_std value: -17.634483364794804 - type: nauc_ndcg_at_100_diff1 value: 21.849924385738586 - type: nauc_ndcg_at_100_max value: -8.226437560889345 - type: nauc_ndcg_at_100_std value: -17.774648478087002 - type: nauc_ndcg_at_10_diff1 value: 19.888395590413573 - type: nauc_ndcg_at_10_max value: -8.968706085632382 - type: nauc_ndcg_at_10_std value: -19.31386964628115 - type: nauc_ndcg_at_1_diff1 value: 26.23276644969203 - type: nauc_ndcg_at_1_max value: -12.376511389571245 - type: nauc_ndcg_at_1_std value: -18.11411715207284 - type: nauc_ndcg_at_20_diff1 value: 21.38413342416933 - type: nauc_ndcg_at_20_max value: -7.636238194084164 - type: nauc_ndcg_at_20_std value: -17.946390844693028 - type: nauc_ndcg_at_3_diff1 value: 21.29169165029195 - type: nauc_ndcg_at_3_max value: -6.793840499730093 - type: nauc_ndcg_at_3_std value: -17.52359001586737 - type: nauc_ndcg_at_5_diff1 value: 20.238297656671364 - type: nauc_ndcg_at_5_max value: -6.424992706950072 - type: nauc_ndcg_at_5_std value: -17.082391132291356 - type: nauc_precision_at_1000_diff1 value: -7.05195108528572 - type: nauc_precision_at_1000_max value: 34.439879624882145 - type: nauc_precision_at_1000_std value: 68.72436351659353 - type: nauc_precision_at_100_diff1 value: -2.769464113932605 - type: nauc_precision_at_100_max value: 9.89562961226698 - type: nauc_precision_at_100_std value: -0.5880967482224028 - type: nauc_precision_at_10_diff1 value: 2.1371544726832323 - type: nauc_precision_at_10_max value: -11.93051325147756 - type: nauc_precision_at_10_std value: -30.83144187392059 - type: nauc_precision_at_1_diff1 value: 26.23276644969203 - type: nauc_precision_at_1_max value: -12.376511389571245 - type: nauc_precision_at_1_std value: -18.11411715207284 - type: nauc_precision_at_20_diff1 value: 3.780146814257504 - type: nauc_precision_at_20_max value: 17.06527540214615 - type: nauc_precision_at_20_std value: -20.36832563035565 - type: nauc_precision_at_3_diff1 value: 17.63894384012077 - type: nauc_precision_at_3_max value: -2.0220490624638887 - type: nauc_precision_at_3_std value: -17.285601413493918 - type: nauc_precision_at_5_diff1 value: 12.557855071944601 - type: nauc_precision_at_5_max value: 0.5840236463956658 - type: nauc_precision_at_5_std value: -15.827224420217846 - type: nauc_recall_at_1000_diff1 value: -7.051951085286463 - type: nauc_recall_at_1000_max value: 34.43987962487738 - type: nauc_recall_at_1000_std value: 68.724363516591 - type: nauc_recall_at_100_diff1 value: -2.769464113930314 - type: nauc_recall_at_100_max value: 9.895629612270017 - type: nauc_recall_at_100_std value: -0.58809674821745 - type: nauc_recall_at_10_diff1 value: 2.1371544726834495 - type: nauc_recall_at_10_max value: -11.930513251477253 - type: nauc_recall_at_10_std value: -30.83144187392047 - type: nauc_recall_at_1_diff1 value: 26.23276644969203 - type: nauc_recall_at_1_max value: -12.376511389571245 - type: nauc_recall_at_1_std value: -18.11411715207284 - type: nauc_recall_at_20_diff1 value: 3.7801468142575922 - type: nauc_recall_at_20_max value: 17.0652754021456 - type: nauc_recall_at_20_std value: -20.36832563035559 - type: nauc_recall_at_3_diff1 value: 17.63894384012074 - type: nauc_recall_at_3_max value: -2.02204906246383 - type: nauc_recall_at_3_std value: -17.28560141349386 - type: nauc_recall_at_5_diff1 value: 12.55785507194463 - type: nauc_recall_at_5_max value: 0.5840236463957296 - type: nauc_recall_at_5_std value: -15.827224420217856 - type: ndcg_at_1 value: 40.398 - type: ndcg_at_10 value: 64.24 - type: ndcg_at_100 value: 66.631 - type: ndcg_at_1000 value: 66.65100000000001 - type: ndcg_at_20 value: 66.086 - type: ndcg_at_3 value: 55.938 - type: ndcg_at_5 value: 60.370000000000005 - type: precision_at_1 value: 40.398 - type: precision_at_10 value: 8.962 - type: precision_at_100 value: 0.9950000000000001 - type: precision_at_1000 value: 0.1 - type: precision_at_20 value: 4.836 - type: precision_at_3 value: 22.262 - type: precision_at_5 value: 15.519 - type: recall_at_1 value: 40.398 - type: recall_at_10 value: 89.616 - type: recall_at_100 value: 99.502 - type: recall_at_1000 value: 99.644 - type: recall_at_20 value: 96.72800000000001 - type: recall_at_3 value: 66.78500000000001 - type: recall_at_5 value: 77.596 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: main_score value: 55.1564333205451 - type: v_measure value: 55.1564333205451 - type: v_measure_std value: 14.696883012214512 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: main_score value: 49.823698316694795 - type: v_measure value: 49.823698316694795 - type: v_measure_std value: 14.951660654298186 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: main_score value: 66.15294503553424 - type: map value: 66.15294503553424 - type: mrr value: 78.53438420612935 - type: nAUC_map_diff1 value: 12.569697092717997 - type: nAUC_map_max value: 21.50670312412572 - type: nAUC_map_std value: 16.943786429229064 - type: nAUC_mrr_diff1 value: 15.590272897361238 - type: nAUC_mrr_max value: 34.96072022474653 - type: nAUC_mrr_std value: 21.649217605241045 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cosine_pearson value: 85.7824546319275 - type: cosine_spearman value: 83.29587385660628 - type: euclidean_pearson value: 84.58764190565167 - type: euclidean_spearman value: 83.30069324352772 - type: main_score value: 83.29587385660628 - type: manhattan_pearson value: 84.95996839947179 - type: manhattan_spearman value: 83.87480271054358 - type: pearson value: 85.7824546319275 - type: spearman value: 83.29587385660628 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 89.30194805194806 - type: f1 value: 89.26182507266391 - type: f1_weighted value: 89.26182507266391 - type: main_score value: 89.30194805194806 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: main_score value: 50.67972171889736 - type: v_measure value: 50.67972171889736 - type: v_measure_std value: 0.7687409980036303 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: main_score value: 45.80539715556144 - type: v_measure value: 45.80539715556144 - type: v_measure_std value: 0.9601346216579142 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: mteb/cqadupstack config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: main_score value: 44.361250000000005 - type: map_at_1 value: 28.304499999999997 - type: map_at_10 value: 38.54841666666666 - type: map_at_100 value: 39.83141666666667 - type: map_at_1000 value: 39.944750000000006 - type: map_at_20 value: 39.25341666666667 - type: map_at_3 value: 35.406749999999995 - type: map_at_5 value: 37.15558333333333 - type: mrr_at_1 value: 34.09077232860122 - type: mrr_at_10 value: 43.15445393211421 - type: mrr_at_100 value: 43.98645286848257 - type: mrr_at_1000 value: 44.037631313469404 - type: mrr_at_20 value: 43.64045813249614 - type: mrr_at_3 value: 40.674138648480486 - type: mrr_at_5 value: 42.106251182620255 - type: nauc_map_at_1000_diff1 value: 46.250011739434996 - type: nauc_map_at_1000_max value: 30.13664446260598 - type: nauc_map_at_1000_std value: 5.422301791618935 - type: nauc_map_at_100_diff1 value: 46.253631351999395 - type: nauc_map_at_100_max value: 30.12612918885181 - type: nauc_map_at_100_std value: 5.367077019987172 - type: nauc_map_at_10_diff1 value: 46.328171341741346 - type: nauc_map_at_10_max value: 29.80274612581464 - type: nauc_map_at_10_std value: 4.62996685176396 - type: nauc_map_at_1_diff1 value: 51.56118117729493 - type: nauc_map_at_1_max value: 27.94885243863768 - type: nauc_map_at_1_std value: 1.700366508927356 - type: nauc_map_at_20_diff1 value: 46.286750260299094 - type: nauc_map_at_20_max value: 29.979205290353278 - type: nauc_map_at_20_std value: 5.010588412441873 - type: nauc_map_at_3_diff1 value: 47.10018183619064 - type: nauc_map_at_3_max value: 29.062318206078753 - type: nauc_map_at_3_std value: 3.2235696254694197 - type: nauc_map_at_5_diff1 value: 46.41971733050039 - type: nauc_map_at_5_max value: 29.456798617695657 - type: nauc_map_at_5_std value: 4.0921691023077145 - type: nauc_mrr_at_1000_diff1 value: 45.88888977975723 - type: nauc_mrr_at_1000_max value: 32.162138978089544 - type: nauc_mrr_at_1000_std value: 6.2811943424217915 - type: nauc_mrr_at_100_diff1 value: 45.87480433011124 - type: nauc_mrr_at_100_max value: 32.16011334212834 - type: nauc_mrr_at_100_std value: 6.2865717772421785 - type: nauc_mrr_at_10_diff1 value: 45.849652904658825 - type: nauc_mrr_at_10_max value: 32.13847916232293 - type: nauc_mrr_at_10_std value: 6.105718728141999 - type: nauc_mrr_at_1_diff1 value: 51.013730325062156 - type: nauc_mrr_at_1_max value: 32.77457396492779 - type: nauc_mrr_at_1_std value: 4.415684893471724 - type: nauc_mrr_at_20_diff1 value: 45.86663046255274 - type: nauc_mrr_at_20_max value: 32.15219360697865 - type: nauc_mrr_at_20_std value: 6.19603046412763 - type: nauc_mrr_at_3_diff1 value: 46.522376582423185 - type: nauc_mrr_at_3_max value: 32.18259009733714 - type: nauc_mrr_at_3_std value: 5.288000648220897 - type: nauc_mrr_at_5_diff1 value: 45.86611481369745 - type: nauc_mrr_at_5_max value: 32.14261639054921 - type: nauc_mrr_at_5_std value: 5.8811238177073735 - type: nauc_ndcg_at_1000_diff1 value: 44.5055097547565 - type: nauc_ndcg_at_1000_max value: 31.149682057975458 - type: nauc_ndcg_at_1000_std value: 8.157937194901333 - type: nauc_ndcg_at_100_diff1 value: 44.12398363638596 - type: nauc_ndcg_at_100_max value: 30.878064321409994 - type: nauc_ndcg_at_100_std value: 8.40493441452808 - type: nauc_ndcg_at_10_diff1 value: 44.200093505221474 - type: nauc_ndcg_at_10_max value: 30.15267107733158 - type: nauc_ndcg_at_10_std value: 6.407495361566107 - type: nauc_ndcg_at_1_diff1 value: 51.013730325062156 - type: nauc_ndcg_at_1_max value: 32.77457396492779 - type: nauc_ndcg_at_1_std value: 4.415684893471724 - type: nauc_ndcg_at_20_diff1 value: 44.16988321564116 - type: nauc_ndcg_at_20_max value: 30.333532500651213 - type: nauc_ndcg_at_20_std value: 7.10024701386895 - type: nauc_ndcg_at_3_diff1 value: 45.35982873879988 - type: nauc_ndcg_at_3_max value: 30.288312457948702 - type: nauc_ndcg_at_3_std value: 4.653900898293395 - type: nauc_ndcg_at_5_diff1 value: 44.324558115380185 - type: nauc_ndcg_at_5_max value: 30.048149698941373 - type: nauc_ndcg_at_5_std value: 5.6684459618413205 - type: nauc_precision_at_1000_diff1 value: -7.282175798304458 - type: nauc_precision_at_1000_max value: 7.820142031765352 - type: nauc_precision_at_1000_std value: 11.736131836431172 - type: nauc_precision_at_100_diff1 value: 1.0222940256506976 - type: nauc_precision_at_100_max value: 16.12346497070298 - type: nauc_precision_at_100_std value: 18.202607395247874 - type: nauc_precision_at_10_diff1 value: 18.289439185857837 - type: nauc_precision_at_10_max value: 26.116517399154375 - type: nauc_precision_at_10_std value: 13.921214069982302 - type: nauc_precision_at_1_diff1 value: 51.013730325062156 - type: nauc_precision_at_1_max value: 32.77457396492779 - type: nauc_precision_at_1_std value: 4.415684893471724 - type: nauc_precision_at_20_diff1 value: 12.365165405210886 - type: nauc_precision_at_20_max value: 22.946297258937367 - type: nauc_precision_at_20_std value: 16.13862870358933 - type: nauc_precision_at_3_diff1 value: 32.063423642849685 - type: nauc_precision_at_3_max value: 30.140965811989407 - type: nauc_precision_at_3_std value: 8.501746262550146 - type: nauc_precision_at_5_diff1 value: 24.777203357717948 - type: nauc_precision_at_5_max value: 28.401579566848472 - type: nauc_precision_at_5_std value: 11.643246774390914 - type: nauc_recall_at_1000_diff1 value: 30.04216463401409 - type: nauc_recall_at_1000_max value: 34.98067760563842 - type: nauc_recall_at_1000_std value: 48.01453905250591 - type: nauc_recall_at_100_diff1 value: 31.193415507513972 - type: nauc_recall_at_100_max value: 28.69740149270981 - type: nauc_recall_at_100_std value: 25.20960758920368 - type: nauc_recall_at_10_diff1 value: 36.18870823636506 - type: nauc_recall_at_10_max value: 26.005625231341238 - type: nauc_recall_at_10_std value: 8.891983977041376 - type: nauc_recall_at_1_diff1 value: 51.56118117729493 - type: nauc_recall_at_1_max value: 27.94885243863768 - type: nauc_recall_at_1_std value: 1.700366508927356 - type: nauc_recall_at_20_diff1 value: 34.93996118564803 - type: nauc_recall_at_20_max value: 26.149961715956138 - type: nauc_recall_at_20_std value: 12.0657502367633 - type: nauc_recall_at_3_diff1 value: 40.80743946709512 - type: nauc_recall_at_3_max value: 26.443127773025783 - type: nauc_recall_at_3_std value: 3.7011448604241477 - type: nauc_recall_at_5_diff1 value: 37.608535157055776 - type: nauc_recall_at_5_max value: 26.168016189725822 - type: nauc_recall_at_5_std value: 6.344191564595316 - type: ndcg_at_1 value: 34.09083333333333 - type: ndcg_at_10 value: 44.361250000000005 - type: ndcg_at_100 value: 49.586166666666664 - type: ndcg_at_1000 value: 51.623583333333336 - type: ndcg_at_20 value: 46.40158333333333 - type: ndcg_at_3 value: 39.27733333333333 - type: ndcg_at_5 value: 41.662333333333336 - type: precision_at_1 value: 34.09083333333333 - type: precision_at_10 value: 7.957000000000002 - type: precision_at_100 value: 1.2521666666666669 - type: precision_at_1000 value: 0.16125 - type: precision_at_20 value: 4.6755 - type: precision_at_3 value: 18.402083333333334 - type: precision_at_5 value: 13.104333333333335 - type: recall_at_1 value: 28.304499999999997 - type: recall_at_10 value: 56.80666666666667 - type: recall_at_100 value: 79.66208333333334 - type: recall_at_1000 value: 93.6455 - type: recall_at_20 value: 64.2495 - type: recall_at_3 value: 42.431333333333335 - type: recall_at_5 value: 48.665416666666665 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: main_score value: 43.525999999999996 - type: map_at_1 value: 19.291 - type: map_at_10 value: 33.471000000000004 - type: map_at_100 value: 35.388999999999996 - type: map_at_1000 value: 35.568 - type: map_at_20 value: 34.496 - type: map_at_3 value: 28.713 - type: map_at_5 value: 31.384 - type: mrr_at_1 value: 43.77850162866449 - type: mrr_at_10 value: 56.28576598934912 - type: mrr_at_100 value: 56.8588518168194 - type: mrr_at_1000 value: 56.878236725973544 - type: mrr_at_20 value: 56.6409328120183 - type: mrr_at_3 value: 53.56134636264935 - type: mrr_at_5 value: 55.27795874049956 - type: nauc_map_at_1000_diff1 value: 27.262513153363876 - type: nauc_map_at_1000_max value: 40.099398684385584 - type: nauc_map_at_1000_std value: 18.847812394005512 - type: nauc_map_at_100_diff1 value: 27.238993503030745 - type: nauc_map_at_100_max value: 40.07730434492169 - type: nauc_map_at_100_std value: 18.795349250833684 - type: nauc_map_at_10_diff1 value: 27.70929180366227 - type: nauc_map_at_10_max value: 39.55987024970173 - type: nauc_map_at_10_std value: 17.214881544648996 - type: nauc_map_at_1_diff1 value: 43.34155892182403 - type: nauc_map_at_1_max value: 38.23324890148018 - type: nauc_map_at_1_std value: 6.0781444393516075 - type: nauc_map_at_20_diff1 value: 27.311577477800103 - type: nauc_map_at_20_max value: 39.624414083413456 - type: nauc_map_at_20_std value: 18.149811054163287 - type: nauc_map_at_3_diff1 value: 30.475965062734367 - type: nauc_map_at_3_max value: 38.49324825043695 - type: nauc_map_at_3_std value: 13.357656038648487 - type: nauc_map_at_5_diff1 value: 28.425110095017747 - type: nauc_map_at_5_max value: 39.017894870747796 - type: nauc_map_at_5_std value: 15.543817194122564 - type: nauc_mrr_at_1000_diff1 value: 33.16689354701644 - type: nauc_mrr_at_1000_max value: 41.70755363247148 - type: nauc_mrr_at_1000_std value: 24.61667417463176 - type: nauc_mrr_at_100_diff1 value: 33.147229262917506 - type: nauc_mrr_at_100_max value: 41.712455697170725 - type: nauc_mrr_at_100_std value: 24.6418922043652 - type: nauc_mrr_at_10_diff1 value: 32.94185191112572 - type: nauc_mrr_at_10_max value: 41.64272730141954 - type: nauc_mrr_at_10_std value: 24.663391015702707 - type: nauc_mrr_at_1_diff1 value: 39.571969559016395 - type: nauc_mrr_at_1_max value: 39.396249211263495 - type: nauc_mrr_at_1_std value: 16.984149923258357 - type: nauc_mrr_at_20_diff1 value: 33.10040770334742 - type: nauc_mrr_at_20_max value: 41.807565560083034 - type: nauc_mrr_at_20_std value: 24.8064180365271 - type: nauc_mrr_at_3_diff1 value: 33.065406161485704 - type: nauc_mrr_at_3_max value: 41.049510969934694 - type: nauc_mrr_at_3_std value: 23.18371458928609 - type: nauc_mrr_at_5_diff1 value: 33.2389593543916 - type: nauc_mrr_at_5_max value: 41.629486918949915 - type: nauc_mrr_at_5_std value: 24.5777253036149 - type: nauc_ndcg_at_1000_diff1 value: 25.868840609197637 - type: nauc_ndcg_at_1000_max value: 42.79564910784761 - type: nauc_ndcg_at_1000_std value: 27.035091271680113 - type: nauc_ndcg_at_100_diff1 value: 25.019789319579942 - type: nauc_ndcg_at_100_max value: 42.482345143533735 - type: nauc_ndcg_at_100_std value: 26.76872010731345 - type: nauc_ndcg_at_10_diff1 value: 25.949464660653238 - type: nauc_ndcg_at_10_max value: 40.79769544643906 - type: nauc_ndcg_at_10_std value: 22.486116508973204 - type: nauc_ndcg_at_1_diff1 value: 39.571969559016395 - type: nauc_ndcg_at_1_max value: 39.396249211263495 - type: nauc_ndcg_at_1_std value: 16.984149923258357 - type: nauc_ndcg_at_20_diff1 value: 25.173455685962214 - type: nauc_ndcg_at_20_max value: 40.88873540662413 - type: nauc_ndcg_at_20_std value: 24.4451041955519 - type: nauc_ndcg_at_3_diff1 value: 28.185416070726333 - type: nauc_ndcg_at_3_max value: 39.10600031163912 - type: nauc_ndcg_at_3_std value: 18.42694044215541 - type: nauc_ndcg_at_5_diff1 value: 27.112647584005583 - type: nauc_ndcg_at_5_max value: 40.154045682322526 - type: nauc_ndcg_at_5_std value: 20.26822517176828 - type: nauc_precision_at_1000_diff1 value: -16.42087927044017 - type: nauc_precision_at_1000_max value: 3.5326295053913 - type: nauc_precision_at_1000_std value: 24.406810708493197 - type: nauc_precision_at_100_diff1 value: -12.17648135724982 - type: nauc_precision_at_100_max value: 15.895489260126183 - type: nauc_precision_at_100_std value: 32.48346122610907 - type: nauc_precision_at_10_diff1 value: -1.2493131347748072 - type: nauc_precision_at_10_max value: 26.409459305604376 - type: nauc_precision_at_10_std value: 31.115432019300016 - type: nauc_precision_at_1_diff1 value: 39.571969559016395 - type: nauc_precision_at_1_max value: 39.396249211263495 - type: nauc_precision_at_1_std value: 16.984149923258357 - type: nauc_precision_at_20_diff1 value: -6.597509397240593 - type: nauc_precision_at_20_max value: 21.461984620659695 - type: nauc_precision_at_20_std value: 32.9450259748889 - type: nauc_precision_at_3_diff1 value: 9.46378764865453 - type: nauc_precision_at_3_max value: 32.03650819375425 - type: nauc_precision_at_3_std value: 26.489382638510765 - type: nauc_precision_at_5_diff1 value: 3.5987036728169537 - type: nauc_precision_at_5_max value: 30.633955978579703 - type: nauc_precision_at_5_std value: 30.532430088014443 - type: nauc_recall_at_1000_diff1 value: 10.714633106872254 - type: nauc_recall_at_1000_max value: 43.94958623961 - type: nauc_recall_at_1000_std value: 51.78914468954123 - type: nauc_recall_at_100_diff1 value: 9.63781472255557 - type: nauc_recall_at_100_max value: 38.50917465255336 - type: nauc_recall_at_100_std value: 37.78623984642377 - type: nauc_recall_at_10_diff1 value: 16.480342820841688 - type: nauc_recall_at_10_max value: 35.982566867357406 - type: nauc_recall_at_10_std value: 23.30688188788895 - type: nauc_recall_at_1_diff1 value: 43.34155892182403 - type: nauc_recall_at_1_max value: 38.23324890148018 - type: nauc_recall_at_1_std value: 6.0781444393516075 - type: nauc_recall_at_20_diff1 value: 13.521048985146367 - type: nauc_recall_at_20_max value: 34.62462209239834 - type: nauc_recall_at_20_std value: 27.85924191501618 - type: nauc_recall_at_3_diff1 value: 23.57032748533523 - type: nauc_recall_at_3_max value: 36.32703197635613 - type: nauc_recall_at_3_std value: 15.730238734014337 - type: nauc_recall_at_5_diff1 value: 19.61387036368584 - type: nauc_recall_at_5_max value: 36.22030835529556 - type: nauc_recall_at_5_std value: 19.76310648649897 - type: ndcg_at_1 value: 43.779 - type: ndcg_at_10 value: 43.525999999999996 - type: ndcg_at_100 value: 50.138000000000005 - type: ndcg_at_1000 value: 52.991 - type: ndcg_at_20 value: 46.083 - type: ndcg_at_3 value: 38.002 - type: ndcg_at_5 value: 39.842 - type: precision_at_1 value: 43.779 - type: precision_at_10 value: 13.205 - type: precision_at_100 value: 2.051 - type: precision_at_1000 value: 0.259 - type: precision_at_20 value: 7.722999999999999 - type: precision_at_3 value: 28.903000000000002 - type: precision_at_5 value: 21.368000000000002 - type: recall_at_1 value: 19.291 - type: recall_at_10 value: 48.754 - type: recall_at_100 value: 70.97200000000001 - type: recall_at_1000 value: 86.611 - type: recall_at_20 value: 55.884 - type: recall_at_3 value: 34.101 - type: recall_at_5 value: 40.784 - task: type: Retrieval dataset: name: MTEB DBPedia type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: main_score value: 49.884 - type: map_at_1 value: 9.913 - type: map_at_10 value: 23.186999999999998 - type: map_at_100 value: 34.207 - type: map_at_1000 value: 36.318 - type: map_at_20 value: 27.419 - type: map_at_3 value: 15.656 - type: map_at_5 value: 18.945999999999998 - type: mrr_at_1 value: 75.75 - type: mrr_at_10 value: 82.16279761904761 - type: mrr_at_100 value: 82.48445635330299 - type: mrr_at_1000 value: 82.4870246719901 - type: mrr_at_20 value: 82.36203632968338 - type: mrr_at_3 value: 81.29166666666666 - type: mrr_at_5 value: 82.02916666666667 - type: nauc_map_at_1000_diff1 value: 17.0739966990996 - type: nauc_map_at_1000_max value: 28.440065298437133 - type: nauc_map_at_1000_std value: 20.83498154003865 - type: nauc_map_at_100_diff1 value: 17.75982086107111 - type: nauc_map_at_100_max value: 26.87850835673573 - type: nauc_map_at_100_std value: 18.350282298599275 - type: nauc_map_at_10_diff1 value: 17.15984258564116 - type: nauc_map_at_10_max value: 10.846179132675553 - type: nauc_map_at_10_std value: -6.263534464094614 - type: nauc_map_at_1_diff1 value: 24.014897777973694 - type: nauc_map_at_1_max value: -4.556638938723358 - type: nauc_map_at_1_std value: -22.7844467526989 - type: nauc_map_at_20_diff1 value: 16.3179372493187 - type: nauc_map_at_20_max value: 17.176378915498915 - type: nauc_map_at_20_std value: 1.9378637630340372 - type: nauc_map_at_3_diff1 value: 19.12786794046792 - type: nauc_map_at_3_max value: 0.09063919305677291 - type: nauc_map_at_3_std value: -16.713143158330492 - type: nauc_map_at_5_diff1 value: 18.76504725420023 - type: nauc_map_at_5_max value: 5.040867712207419 - type: nauc_map_at_5_std value: -12.382578318931165 - type: nauc_mrr_at_1000_diff1 value: 54.61266255011247 - type: nauc_mrr_at_1000_max value: 60.83961280977112 - type: nauc_mrr_at_1000_std value: 32.70429260443016 - type: nauc_mrr_at_100_diff1 value: 54.61346236538542 - type: nauc_mrr_at_100_max value: 60.8407974416647 - type: nauc_mrr_at_100_std value: 32.69272843993462 - type: nauc_mrr_at_10_diff1 value: 54.74633685810871 - type: nauc_mrr_at_10_max value: 61.084525933097865 - type: nauc_mrr_at_10_std value: 33.001220210025565 - type: nauc_mrr_at_1_diff1 value: 56.12708423835806 - type: nauc_mrr_at_1_max value: 58.9314540998289 - type: nauc_mrr_at_1_std value: 27.39422607651012 - type: nauc_mrr_at_20_diff1 value: 54.58896150245695 - type: nauc_mrr_at_20_max value: 60.890929983464815 - type: nauc_mrr_at_20_std value: 32.65559641276393 - type: nauc_mrr_at_3_diff1 value: 54.38229071443791 - type: nauc_mrr_at_3_max value: 59.987849044098596 - type: nauc_mrr_at_3_std value: 33.439813880719974 - type: nauc_mrr_at_5_diff1 value: 54.961790262449824 - type: nauc_mrr_at_5_max value: 61.17705173908951 - type: nauc_mrr_at_5_std value: 33.30939850734856 - type: nauc_ndcg_at_1000_diff1 value: 29.27465932507067 - type: nauc_ndcg_at_1000_max value: 47.952543312315214 - type: nauc_ndcg_at_1000_std value: 36.17132236391485 - type: nauc_ndcg_at_100_diff1 value: 28.63072328980134 - type: nauc_ndcg_at_100_max value: 41.460833419186564 - type: nauc_ndcg_at_100_std value: 27.157100358988135 - type: nauc_ndcg_at_10_diff1 value: 23.41488013023301 - type: nauc_ndcg_at_10_max value: 39.27798133072349 - type: nauc_ndcg_at_10_std value: 21.979241438928312 - type: nauc_ndcg_at_1_diff1 value: 46.12120543657642 - type: nauc_ndcg_at_1_max value: 47.28452124039853 - type: nauc_ndcg_at_1_std value: 19.799884708952543 - type: nauc_ndcg_at_20_diff1 value: 23.627669045115574 - type: nauc_ndcg_at_20_max value: 35.88225062457673 - type: nauc_ndcg_at_20_std value: 18.218628030529498 - type: nauc_ndcg_at_3_diff1 value: 25.37309228946118 - type: nauc_ndcg_at_3_max value: 40.64426332992231 - type: nauc_ndcg_at_3_std value: 24.608330645901482 - type: nauc_ndcg_at_5_diff1 value: 24.055798594999654 - type: nauc_ndcg_at_5_max value: 41.16180524175431 - type: nauc_ndcg_at_5_std value: 24.048305528761315 - type: nauc_precision_at_1000_diff1 value: -18.234943251015576 - type: nauc_precision_at_1000_max value: 0.48708502364659184 - type: nauc_precision_at_1000_std value: 2.4473601543134027 - type: nauc_precision_at_100_diff1 value: -3.0077810947381227 - type: nauc_precision_at_100_max value: 25.27249321108913 - type: nauc_precision_at_100_std value: 37.36575792126928 - type: nauc_precision_at_10_diff1 value: -0.2393778190297635 - type: nauc_precision_at_10_max value: 36.40513293547299 - type: nauc_precision_at_10_std value: 37.4827885766009 - type: nauc_precision_at_1_diff1 value: 56.12708423835806 - type: nauc_precision_at_1_max value: 58.9314540998289 - type: nauc_precision_at_1_std value: 27.39422607651012 - type: nauc_precision_at_20_diff1 value: -1.2010133229402933 - type: nauc_precision_at_20_max value: 34.117541814385966 - type: nauc_precision_at_20_std value: 39.13273254177449 - type: nauc_precision_at_3_diff1 value: 11.757378092198486 - type: nauc_precision_at_3_max value: 42.637962482588875 - type: nauc_precision_at_3_std value: 37.42465077352342 - type: nauc_precision_at_5_diff1 value: 7.233177203405101 - type: nauc_precision_at_5_max value: 43.1663582897407 - type: nauc_precision_at_5_std value: 38.848449220750055 - type: nauc_recall_at_1000_diff1 value: 27.33938551969145 - type: nauc_recall_at_1000_max value: 45.5614254479334 - type: nauc_recall_at_1000_std value: 50.58528916250458 - type: nauc_recall_at_100_diff1 value: 23.610383761920097 - type: nauc_recall_at_100_max value: 31.422168485847184 - type: nauc_recall_at_100_std value: 25.58649926458304 - type: nauc_recall_at_10_diff1 value: 14.62495111808408 - type: nauc_recall_at_10_max value: 7.4295041277681095 - type: nauc_recall_at_10_std value: -9.32297089600654 - type: nauc_recall_at_1_diff1 value: 24.014897777973694 - type: nauc_recall_at_1_max value: -4.556638938723358 - type: nauc_recall_at_1_std value: -22.7844467526989 - type: nauc_recall_at_20_diff1 value: 14.027862330014662 - type: nauc_recall_at_20_max value: 12.437478731690844 - type: nauc_recall_at_20_std value: -3.0740743798103676 - type: nauc_recall_at_3_diff1 value: 16.354018356566712 - type: nauc_recall_at_3_max value: -2.9812231240997917 - type: nauc_recall_at_3_std value: -18.27746460743442 - type: nauc_recall_at_5_diff1 value: 16.81486583473587 - type: nauc_recall_at_5_max value: 2.420128513974744 - type: nauc_recall_at_5_std value: -14.441820321214108 - type: ndcg_at_1 value: 63.87500000000001 - type: ndcg_at_10 value: 49.884 - type: ndcg_at_100 value: 54.738 - type: ndcg_at_1000 value: 61.635 - type: ndcg_at_20 value: 48.894999999999996 - type: ndcg_at_3 value: 54.287 - type: ndcg_at_5 value: 52.40899999999999 - type: precision_at_1 value: 75.75 - type: precision_at_10 value: 40.9 - type: precision_at_100 value: 13.139999999999999 - type: precision_at_1000 value: 2.533 - type: precision_at_20 value: 30.8 - type: precision_at_3 value: 57.667 - type: precision_at_5 value: 51.05 - type: recall_at_1 value: 9.913 - type: recall_at_10 value: 28.591 - type: recall_at_100 value: 61.017999999999994 - type: recall_at_1000 value: 83.383 - type: recall_at_20 value: 37.834 - type: recall_at_3 value: 17.049 - type: recall_at_5 value: 21.685 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 78.77499999999999 - type: f1 value: 73.74058240799386 - type: f1_weighted value: 79.78804377638227 - type: main_score value: 78.77499999999999 - task: type: Retrieval dataset: name: MTEB FEVER type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: main_score value: 90.986 - type: map_at_1 value: 81.601 - type: map_at_10 value: 88.242 - type: map_at_100 value: 88.46000000000001 - type: map_at_1000 value: 88.472 - type: map_at_20 value: 88.375 - type: map_at_3 value: 87.237 - type: map_at_5 value: 87.85300000000001 - type: mrr_at_1 value: 87.81878187818782 - type: mrr_at_10 value: 92.20301196786335 - type: mrr_at_100 value: 92.24884236673292 - type: mrr_at_1000 value: 92.2496338899362 - type: mrr_at_20 value: 92.23112073283473 - type: mrr_at_3 value: 91.77417741774165 - type: mrr_at_5 value: 92.03970397039689 - type: nauc_map_at_1000_diff1 value: 56.54670664910505 - type: nauc_map_at_1000_max value: 33.08375749975477 - type: nauc_map_at_1000_std value: 2.7491595418252865 - type: nauc_map_at_100_diff1 value: 56.50887688686924 - type: nauc_map_at_100_max value: 33.075487189958494 - type: nauc_map_at_100_std value: 2.7675869969253375 - type: nauc_map_at_10_diff1 value: 56.08080806610569 - type: nauc_map_at_10_max value: 32.776972098819066 - type: nauc_map_at_10_std value: 2.5904846711290097 - type: nauc_map_at_1_diff1 value: 60.645344065853145 - type: nauc_map_at_1_max value: 31.232776777514797 - type: nauc_map_at_1_std value: -1.1946138176109171 - type: nauc_map_at_20_diff1 value: 56.28378454162355 - type: nauc_map_at_20_max value: 32.98207150385811 - type: nauc_map_at_20_std value: 2.8469814040214025 - type: nauc_map_at_3_diff1 value: 55.81958007095375 - type: nauc_map_at_3_max value: 31.602707711038313 - type: nauc_map_at_3_std value: 0.8117019292273401 - type: nauc_map_at_5_diff1 value: 55.706025752316535 - type: nauc_map_at_5_max value: 32.16032683604737 - type: nauc_map_at_5_std value: 1.8853201503498669 - type: nauc_mrr_at_1000_diff1 value: 75.4997173366251 - type: nauc_mrr_at_1000_max value: 41.49117135484116 - type: nauc_mrr_at_1000_std value: -2.0636172883680852 - type: nauc_mrr_at_100_diff1 value: 75.50118860648519 - type: nauc_mrr_at_100_max value: 41.49490161517194 - type: nauc_mrr_at_100_std value: -2.057024385178682 - type: nauc_mrr_at_10_diff1 value: 75.47295153099428 - type: nauc_mrr_at_10_max value: 41.55003304042536 - type: nauc_mrr_at_10_std value: -2.0353663198929253 - type: nauc_mrr_at_1_diff1 value: 76.632058433229 - type: nauc_mrr_at_1_max value: 39.754483718891656 - type: nauc_mrr_at_1_std value: -2.962241058101701 - type: nauc_mrr_at_20_diff1 value: 75.47221882396194 - type: nauc_mrr_at_20_max value: 41.50779280480839 - type: nauc_mrr_at_20_std value: -1.9620212266426307 - type: nauc_mrr_at_3_diff1 value: 75.5682297897137 - type: nauc_mrr_at_3_max value: 41.53543801506081 - type: nauc_mrr_at_3_std value: -3.391681195945978 - type: nauc_mrr_at_5_diff1 value: 75.37562775183947 - type: nauc_mrr_at_5_max value: 41.42028509006753 - type: nauc_mrr_at_5_std value: -2.418698675622726 - type: nauc_ndcg_at_1000_diff1 value: 59.364557011624 - type: nauc_ndcg_at_1000_max value: 35.4112238125149 - type: nauc_ndcg_at_1000_std value: 3.717516193303376 - type: nauc_ndcg_at_100_diff1 value: 58.55706703023122 - type: nauc_ndcg_at_100_max value: 35.352285999934594 - type: nauc_ndcg_at_100_std value: 4.273437944266781 - type: nauc_ndcg_at_10_diff1 value: 56.77422701267037 - type: nauc_ndcg_at_10_max value: 34.24909893882957 - type: nauc_ndcg_at_10_std value: 4.178151434006727 - type: nauc_ndcg_at_1_diff1 value: 76.632058433229 - type: nauc_ndcg_at_1_max value: 39.754483718891656 - type: nauc_ndcg_at_1_std value: -2.962241058101701 - type: nauc_ndcg_at_20_diff1 value: 57.27343398231262 - type: nauc_ndcg_at_20_max value: 34.7416626740278 - type: nauc_ndcg_at_20_std value: 4.955858766014002 - type: nauc_ndcg_at_3_diff1 value: 57.69267803121093 - type: nauc_ndcg_at_3_max value: 33.13744317023105 - type: nauc_ndcg_at_3_std value: 0.40380284030057023 - type: nauc_ndcg_at_5_diff1 value: 56.57461019113917 - type: nauc_ndcg_at_5_max value: 33.244657840804386 - type: nauc_ndcg_at_5_std value: 2.5121440827702046 - type: nauc_precision_at_1000_diff1 value: -14.54492513449718 - type: nauc_precision_at_1000_max value: -5.94552147573623 - type: nauc_precision_at_1000_std value: 1.2446209816057374 - type: nauc_precision_at_100_diff1 value: -15.452676132568344 - type: nauc_precision_at_100_max value: -3.760241749847617 - type: nauc_precision_at_100_std value: 4.623534605290865 - type: nauc_precision_at_10_diff1 value: -12.712908026086176 - type: nauc_precision_at_10_max value: 0.45241316994816805 - type: nauc_precision_at_10_std value: 7.849478570138391 - type: nauc_precision_at_1_diff1 value: 76.632058433229 - type: nauc_precision_at_1_max value: 39.754483718891656 - type: nauc_precision_at_1_std value: -2.962241058101701 - type: nauc_precision_at_20_diff1 value: -14.514618673172041 - type: nauc_precision_at_20_max value: -1.113635490621818 - type: nauc_precision_at_20_std value: 8.599811730457576 - type: nauc_precision_at_3_diff1 value: 6.1367799850003815 - type: nauc_precision_at_3_max value: 8.466271950897857 - type: nauc_precision_at_3_std value: 1.7458051543195068 - type: nauc_precision_at_5_diff1 value: -5.804548945783379 - type: nauc_precision_at_5_max value: 3.4060251839074818 - type: nauc_precision_at_5_std value: 5.583410511782371 - type: nauc_recall_at_1000_diff1 value: 19.329432953574095 - type: nauc_recall_at_1000_max value: 43.260442595158736 - type: nauc_recall_at_1000_std value: 53.89644660661804 - type: nauc_recall_at_100_diff1 value: 21.265326296051235 - type: nauc_recall_at_100_max value: 38.573000195373695 - type: nauc_recall_at_100_std value: 42.169391082152785 - type: nauc_recall_at_10_diff1 value: 29.785129558987432 - type: nauc_recall_at_10_max value: 28.379657867558034 - type: nauc_recall_at_10_std value: 21.132574624091973 - type: nauc_recall_at_1_diff1 value: 60.645344065853145 - type: nauc_recall_at_1_max value: 31.232776777514797 - type: nauc_recall_at_1_std value: -1.1946138176109171 - type: nauc_recall_at_20_diff1 value: 25.88845612373954 - type: nauc_recall_at_20_max value: 30.24785945821152 - type: nauc_recall_at_20_std value: 31.73911437468067 - type: nauc_recall_at_3_diff1 value: 42.2968464797395 - type: nauc_recall_at_3_max value: 26.494318009870018 - type: nauc_recall_at_3_std value: 2.6045977160467544 - type: nauc_recall_at_5_diff1 value: 35.81340094401374 - type: nauc_recall_at_5_max value: 25.91082947510634 - type: nauc_recall_at_5_std value: 9.759404930864779 - type: ndcg_at_1 value: 87.819 - type: ndcg_at_10 value: 90.986 - type: ndcg_at_100 value: 91.69 - type: ndcg_at_1000 value: 91.863 - type: ndcg_at_20 value: 91.293 - type: ndcg_at_3 value: 89.621 - type: ndcg_at_5 value: 90.333 - type: precision_at_1 value: 87.819 - type: precision_at_10 value: 10.753 - type: precision_at_100 value: 1.138 - type: precision_at_1000 value: 0.117 - type: precision_at_20 value: 5.4879999999999995 - type: precision_at_3 value: 33.703 - type: precision_at_5 value: 20.831 - type: recall_at_1 value: 81.601 - type: recall_at_10 value: 95.44200000000001 - type: recall_at_100 value: 98.14399999999999 - type: recall_at_1000 value: 99.157 - type: recall_at_20 value: 96.43 - type: recall_at_3 value: 91.729 - type: recall_at_5 value: 93.552 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: main_score value: 56.056 - type: map_at_1 value: 28.666000000000004 - type: map_at_10 value: 47.437000000000005 - type: map_at_100 value: 49.537 - type: map_at_1000 value: 49.665 - type: map_at_20 value: 48.618 - type: map_at_3 value: 41.355 - type: map_at_5 value: 44.525 - type: mrr_at_1 value: 55.55555555555556 - type: mrr_at_10 value: 63.705173427395614 - type: mrr_at_100 value: 64.25449940779741 - type: mrr_at_1000 value: 64.27635581092147 - type: mrr_at_20 value: 64.03796029079103 - type: mrr_at_3 value: 61.49691358024688 - type: mrr_at_5 value: 62.73148148148143 - type: nauc_map_at_1000_diff1 value: 43.24282910397747 - type: nauc_map_at_1000_max value: 28.506093180265644 - type: nauc_map_at_1000_std value: -13.040508386155054 - type: nauc_map_at_100_diff1 value: 43.23650442904607 - type: nauc_map_at_100_max value: 28.470565635459156 - type: nauc_map_at_100_std value: -12.988098780714935 - type: nauc_map_at_10_diff1 value: 43.393840733087686 - type: nauc_map_at_10_max value: 26.637302062720153 - type: nauc_map_at_10_std value: -14.47500292113762 - type: nauc_map_at_1_diff1 value: 47.705150227211725 - type: nauc_map_at_1_max value: 15.354189686550129 - type: nauc_map_at_1_std value: -14.559819859039067 - type: nauc_map_at_20_diff1 value: 43.14121075706104 - type: nauc_map_at_20_max value: 27.811170590408395 - type: nauc_map_at_20_std value: -13.459413585283583 - type: nauc_map_at_3_diff1 value: 44.33938667720801 - type: nauc_map_at_3_max value: 21.785619884549398 - type: nauc_map_at_3_std value: -15.569980103071593 - type: nauc_map_at_5_diff1 value: 43.39280905665027 - type: nauc_map_at_5_max value: 25.021492190645017 - type: nauc_map_at_5_std value: -14.48856622187443 - type: nauc_mrr_at_1000_diff1 value: 52.971563939946286 - type: nauc_mrr_at_1000_max value: 38.88019486172324 - type: nauc_mrr_at_1000_std value: -12.412991642381616 - type: nauc_mrr_at_100_diff1 value: 52.978468139876945 - type: nauc_mrr_at_100_max value: 38.89751787948751 - type: nauc_mrr_at_100_std value: -12.3677876252269 - type: nauc_mrr_at_10_diff1 value: 52.78507148048174 - type: nauc_mrr_at_10_max value: 38.55079809310022 - type: nauc_mrr_at_10_std value: -12.944127025078755 - type: nauc_mrr_at_1_diff1 value: 55.52626805861546 - type: nauc_mrr_at_1_max value: 40.49306809164979 - type: nauc_mrr_at_1_std value: -12.886607701317681 - type: nauc_mrr_at_20_diff1 value: 52.9592152665678 - type: nauc_mrr_at_20_max value: 38.88514014589964 - type: nauc_mrr_at_20_std value: -12.434464359819444 - type: nauc_mrr_at_3_diff1 value: 52.73696844091174 - type: nauc_mrr_at_3_max value: 38.61018727252859 - type: nauc_mrr_at_3_std value: -13.123989867364166 - type: nauc_mrr_at_5_diff1 value: 53.037110010188 - type: nauc_mrr_at_5_max value: 38.44770729849151 - type: nauc_mrr_at_5_std value: -13.49318771828972 - type: nauc_ndcg_at_1000_diff1 value: 44.73813840091289 - type: nauc_ndcg_at_1000_max value: 33.70113904685389 - type: nauc_ndcg_at_1000_std value: -10.328687058192742 - type: nauc_ndcg_at_100_diff1 value: 44.595174119928835 - type: nauc_ndcg_at_100_max value: 33.4788285112467 - type: nauc_ndcg_at_100_std value: -8.695355259716946 - type: nauc_ndcg_at_10_diff1 value: 44.39837225263 - type: nauc_ndcg_at_10_max value: 29.188289725593393 - type: nauc_ndcg_at_10_std value: -13.67608323673103 - type: nauc_ndcg_at_1_diff1 value: 55.52626805861546 - type: nauc_ndcg_at_1_max value: 40.49306809164979 - type: nauc_ndcg_at_1_std value: -12.886607701317681 - type: nauc_ndcg_at_20_diff1 value: 44.24661739902305 - type: nauc_ndcg_at_20_max value: 31.667868318249965 - type: nauc_ndcg_at_20_std value: -10.65470780066342 - type: nauc_ndcg_at_3_diff1 value: 43.39857166975522 - type: nauc_ndcg_at_3_max value: 31.764668313577495 - type: nauc_ndcg_at_3_std value: -14.494866954678152 - type: nauc_ndcg_at_5_diff1 value: 43.16976647347281 - type: nauc_ndcg_at_5_max value: 29.878329062643143 - type: nauc_ndcg_at_5_std value: -13.987689089179739 - type: nauc_precision_at_1000_diff1 value: -9.807973252625484 - type: nauc_precision_at_1000_max value: 26.6279603849494 - type: nauc_precision_at_1000_std value: 7.113187103520632 - type: nauc_precision_at_100_diff1 value: -4.777149603323976 - type: nauc_precision_at_100_max value: 31.03410463692187 - type: nauc_precision_at_100_std value: 10.463144150275435 - type: nauc_precision_at_10_diff1 value: 8.691528703215962 - type: nauc_precision_at_10_max value: 33.329579434123374 - type: nauc_precision_at_10_std value: -0.8002015226329403 - type: nauc_precision_at_1_diff1 value: 55.52626805861546 - type: nauc_precision_at_1_max value: 40.49306809164979 - type: nauc_precision_at_1_std value: -12.886607701317681 - type: nauc_precision_at_20_diff1 value: 3.4564653474184284 - type: nauc_precision_at_20_max value: 34.401070158471136 - type: nauc_precision_at_20_std value: 5.813431200164549 - type: nauc_precision_at_3_diff1 value: 22.463219705462187 - type: nauc_precision_at_3_max value: 34.77413976546924 - type: nauc_precision_at_3_std value: -7.083890789741479 - type: nauc_precision_at_5_diff1 value: 14.011006004883154 - type: nauc_precision_at_5_max value: 35.73655466853702 - type: nauc_precision_at_5_std value: -2.8395172077771598 - type: nauc_recall_at_1000_diff1 value: 16.478046357391555 - type: nauc_recall_at_1000_max value: 43.231704288282344 - type: nauc_recall_at_1000_std value: 38.430684937573645 - type: nauc_recall_at_100_diff1 value: 30.764718344602436 - type: nauc_recall_at_100_max value: 31.769050487166655 - type: nauc_recall_at_100_std value: 23.48468311677149 - type: nauc_recall_at_10_diff1 value: 34.47339565324045 - type: nauc_recall_at_10_max value: 19.054212335800454 - type: nauc_recall_at_10_std value: -11.039734015330437 - type: nauc_recall_at_1_diff1 value: 47.705150227211725 - type: nauc_recall_at_1_max value: 15.354189686550129 - type: nauc_recall_at_1_std value: -14.559819859039067 - type: nauc_recall_at_20_diff1 value: 32.1011474016873 - type: nauc_recall_at_20_max value: 25.546372988304423 - type: nauc_recall_at_20_std value: -0.007233471152482897 - type: nauc_recall_at_3_diff1 value: 37.5708138019065 - type: nauc_recall_at_3_max value: 16.66410785756736 - type: nauc_recall_at_3_std value: -15.404817020108966 - type: nauc_recall_at_5_diff1 value: 35.714519648479595 - type: nauc_recall_at_5_max value: 19.02075233009296 - type: nauc_recall_at_5_std value: -13.180963359760725 - type: ndcg_at_1 value: 55.556000000000004 - type: ndcg_at_10 value: 56.056 - type: ndcg_at_100 value: 62.44 - type: ndcg_at_1000 value: 64.263 - type: ndcg_at_20 value: 58.638999999999996 - type: ndcg_at_3 value: 51.722 - type: ndcg_at_5 value: 52.701 - type: precision_at_1 value: 55.556000000000004 - type: precision_at_10 value: 15.679000000000002 - type: precision_at_100 value: 2.252 - type: precision_at_1000 value: 0.257 - type: precision_at_20 value: 9.02 - type: precision_at_3 value: 34.619 - type: precision_at_5 value: 25.093 - type: recall_at_1 value: 28.666000000000004 - type: recall_at_10 value: 63.717999999999996 - type: recall_at_100 value: 86.938 - type: recall_at_1000 value: 97.603 - type: recall_at_20 value: 71.649 - type: recall_at_3 value: 46.663 - type: recall_at_5 value: 53.313 - task: type: Retrieval dataset: name: MTEB HotpotQA type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: main_score value: 71.74199999999999 - type: map_at_1 value: 41.729 - type: map_at_10 value: 63.168 - type: map_at_100 value: 64.132 - type: map_at_1000 value: 64.199 - type: map_at_20 value: 63.736000000000004 - type: map_at_3 value: 59.826 - type: map_at_5 value: 61.882000000000005 - type: mrr_at_1 value: 83.45712356515868 - type: mrr_at_10 value: 87.850342432719 - type: mrr_at_100 value: 88.0016320691113 - type: mrr_at_1000 value: 88.00576596968136 - type: mrr_at_20 value: 87.94463253190389 - type: mrr_at_3 value: 87.13706954760278 - type: mrr_at_5 value: 87.59419311276136 - type: nauc_map_at_1000_diff1 value: 13.635446621095054 - type: nauc_map_at_1000_max value: 18.670632529445633 - type: nauc_map_at_1000_std value: 10.444842636150575 - type: nauc_map_at_100_diff1 value: 13.599262398010783 - type: nauc_map_at_100_max value: 18.636389405484806 - type: nauc_map_at_100_std value: 10.460027483576043 - type: nauc_map_at_10_diff1 value: 13.235053919323942 - type: nauc_map_at_10_max value: 18.252140477080047 - type: nauc_map_at_10_std value: 9.9075337042203 - type: nauc_map_at_1_diff1 value: 76.51940497836482 - type: nauc_map_at_1_max value: 51.251419487235474 - type: nauc_map_at_1_std value: 0.16714896857146574 - type: nauc_map_at_20_diff1 value: 13.4178245722222 - type: nauc_map_at_20_max value: 18.40988771210718 - type: nauc_map_at_20_std value: 10.216685163366282 - type: nauc_map_at_3_diff1 value: 13.38370761663418 - type: nauc_map_at_3_max value: 17.760962555456537 - type: nauc_map_at_3_std value: 7.15741965624388 - type: nauc_map_at_5_diff1 value: 13.138133309724855 - type: nauc_map_at_5_max value: 17.871761295251044 - type: nauc_map_at_5_std value: 8.475147426940074 - type: nauc_mrr_at_1000_diff1 value: 75.82650818891959 - type: nauc_mrr_at_1000_max value: 53.6736100668434 - type: nauc_mrr_at_1000_std value: 1.8025016349213916 - type: nauc_mrr_at_100_diff1 value: 75.82530574210111 - type: nauc_mrr_at_100_max value: 53.68067545829002 - type: nauc_mrr_at_100_std value: 1.8147470536495791 - type: nauc_mrr_at_10_diff1 value: 75.8330135686799 - type: nauc_mrr_at_10_max value: 53.78626885349077 - type: nauc_mrr_at_10_std value: 1.7975782717226636 - type: nauc_mrr_at_1_diff1 value: 76.51940497836482 - type: nauc_mrr_at_1_max value: 51.251419487235474 - type: nauc_mrr_at_1_std value: 0.16714896857146574 - type: nauc_mrr_at_20_diff1 value: 75.82783382464166 - type: nauc_mrr_at_20_max value: 53.68364567043885 - type: nauc_mrr_at_20_std value: 1.742037904463963 - type: nauc_mrr_at_3_diff1 value: 75.6944609768663 - type: nauc_mrr_at_3_max value: 53.803941340341666 - type: nauc_mrr_at_3_std value: 1.1849945458077804 - type: nauc_mrr_at_5_diff1 value: 75.73006960604903 - type: nauc_mrr_at_5_max value: 53.62223096420106 - type: nauc_mrr_at_5_std value: 1.6144067563410909 - type: nauc_ndcg_at_1000_diff1 value: 21.58025241642726 - type: nauc_ndcg_at_1000_max value: 24.675747527001153 - type: nauc_ndcg_at_1000_std value: 13.075943547492718 - type: nauc_ndcg_at_100_diff1 value: 20.30260137544846 - type: nauc_ndcg_at_100_max value: 23.757528813872018 - type: nauc_ndcg_at_100_std value: 13.648994687574062 - type: nauc_ndcg_at_10_diff1 value: 18.995052360997818 - type: nauc_ndcg_at_10_max value: 22.254260808196037 - type: nauc_ndcg_at_10_std value: 11.27212390633054 - type: nauc_ndcg_at_1_diff1 value: 76.51940497836482 - type: nauc_ndcg_at_1_max value: 51.251419487235474 - type: nauc_ndcg_at_1_std value: 0.16714896857146574 - type: nauc_ndcg_at_20_diff1 value: 19.333742380695757 - type: nauc_ndcg_at_20_max value: 22.527779834633364 - type: nauc_ndcg_at_20_std value: 12.161009000707917 - type: nauc_ndcg_at_3_diff1 value: 20.013329040965534 - type: nauc_ndcg_at_3_max value: 21.99692460311921 - type: nauc_ndcg_at_3_std value: 6.8076290638386165 - type: nauc_ndcg_at_5_diff1 value: 19.08226315942471 - type: nauc_ndcg_at_5_max value: 21.71185964294168 - type: nauc_ndcg_at_5_std value: 8.671911269518214 - type: nauc_precision_at_1000_diff1 value: 2.4462475489446764 - type: nauc_precision_at_1000_max value: 29.145662064268578 - type: nauc_precision_at_1000_std value: 49.20704909525856 - type: nauc_precision_at_100_diff1 value: 0.11271196725540299 - type: nauc_precision_at_100_max value: 17.37584606388067 - type: nauc_precision_at_100_std value: 34.66099346244071 - type: nauc_precision_at_10_diff1 value: 2.9923183951227825 - type: nauc_precision_at_10_max value: 14.261884731124264 - type: nauc_precision_at_10_std value: 18.084188795498378 - type: nauc_precision_at_1_diff1 value: 76.51940497836482 - type: nauc_precision_at_1_max value: 51.251419487235474 - type: nauc_precision_at_1_std value: 0.16714896857146574 - type: nauc_precision_at_20_diff1 value: 1.9180293008303761 - type: nauc_precision_at_20_max value: 13.832269193468512 - type: nauc_precision_at_20_std value: 21.65284406055607 - type: nauc_precision_at_3_diff1 value: 7.226609484731811 - type: nauc_precision_at_3_max value: 15.162908526977272 - type: nauc_precision_at_3_std value: 8.451859972962776 - type: nauc_precision_at_5_diff1 value: 4.705236845538159 - type: nauc_precision_at_5_max value: 14.022910843582666 - type: nauc_precision_at_5_std value: 11.777269322821605 - type: nauc_recall_at_1000_diff1 value: 2.446247548945172 - type: nauc_recall_at_1000_max value: 29.14566206426889 - type: nauc_recall_at_1000_std value: 49.20704909525879 - type: nauc_recall_at_100_diff1 value: 0.1127119672553316 - type: nauc_recall_at_100_max value: 17.37584606388062 - type: nauc_recall_at_100_std value: 34.660993462440686 - type: nauc_recall_at_10_diff1 value: 2.9923183951227927 - type: nauc_recall_at_10_max value: 14.261884731124299 - type: nauc_recall_at_10_std value: 18.08418879549837 - type: nauc_recall_at_1_diff1 value: 76.51940497836482 - type: nauc_recall_at_1_max value: 51.251419487235474 - type: nauc_recall_at_1_std value: 0.16714896857146574 - type: nauc_recall_at_20_diff1 value: 1.918029300830432 - type: nauc_recall_at_20_max value: 13.832269193468566 - type: nauc_recall_at_20_std value: 21.65284406055605 - type: nauc_recall_at_3_diff1 value: 7.226609484731802 - type: nauc_recall_at_3_max value: 15.162908526977182 - type: nauc_recall_at_3_std value: 8.451859972962634 - type: nauc_recall_at_5_diff1 value: 4.705236845538197 - type: nauc_recall_at_5_max value: 14.02291084358265 - type: nauc_recall_at_5_std value: 11.777269322821638 - type: ndcg_at_1 value: 83.45700000000001 - type: ndcg_at_10 value: 71.74199999999999 - type: ndcg_at_100 value: 75.008 - type: ndcg_at_1000 value: 76.242 - type: ndcg_at_20 value: 73.114 - type: ndcg_at_3 value: 67.128 - type: ndcg_at_5 value: 69.645 - type: precision_at_1 value: 83.45700000000001 - type: precision_at_10 value: 14.747 - type: precision_at_100 value: 1.73 - type: precision_at_1000 value: 0.189 - type: precision_at_20 value: 7.8149999999999995 - type: precision_at_3 value: 42.323 - type: precision_at_5 value: 27.381 - type: recall_at_1 value: 41.729 - type: recall_at_10 value: 73.734 - type: recall_at_100 value: 86.502 - type: recall_at_1000 value: 94.60499999999999 - type: recall_at_20 value: 78.14999999999999 - type: recall_at_3 value: 63.483999999999995 - type: recall_at_5 value: 68.45400000000001 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 96.4904 - type: ap value: 94.85481918794709 - type: ap_weighted value: 94.85481918794709 - type: f1 value: 96.4898592305707 - type: f1_weighted value: 96.4898592305707 - type: main_score value: 96.4904 - task: type: Retrieval dataset: name: MTEB MSMARCO type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: main_score value: 43.692 - type: map_at_1 value: 23.751 - type: map_at_10 value: 36.553999999999995 - type: map_at_100 value: 37.721 - type: map_at_1000 value: 37.763999999999996 - type: map_at_20 value: 37.289 - type: map_at_3 value: 32.643 - type: map_at_5 value: 34.851 - type: mrr_at_1 value: 24.455587392550143 - type: mrr_at_10 value: 37.18388706963206 - type: mrr_at_100 value: 38.28330737932916 - type: mrr_at_1000 value: 38.32054399710817 - type: mrr_at_20 value: 37.8818001216278 - type: mrr_at_3 value: 33.35721107927405 - type: mrr_at_5 value: 35.52483285577843 - type: nauc_map_at_1000_diff1 value: 36.3576177260684 - type: nauc_map_at_1000_max value: 7.854511605962703 - type: nauc_map_at_1000_std value: -17.701121059746878 - type: nauc_map_at_100_diff1 value: 36.356075649230505 - type: nauc_map_at_100_max value: 7.862168042999533 - type: nauc_map_at_100_std value: -17.670102459097233 - type: nauc_map_at_10_diff1 value: 36.22122978875574 - type: nauc_map_at_10_max value: 7.80848606967416 - type: nauc_map_at_10_std value: -18.3265151386167 - type: nauc_map_at_1_diff1 value: 39.28605466408357 - type: nauc_map_at_1_max value: 6.20202977590459 - type: nauc_map_at_1_std value: -15.734334090045026 - type: nauc_map_at_20_diff1 value: 36.33637880909657 - type: nauc_map_at_20_max value: 7.843437969476022 - type: nauc_map_at_20_std value: -17.917533363025996 - type: nauc_map_at_3_diff1 value: 36.24864976076741 - type: nauc_map_at_3_max value: 7.420345251835957 - type: nauc_map_at_3_std value: -18.71678497722944 - type: nauc_map_at_5_diff1 value: 36.0789619291824 - type: nauc_map_at_5_max value: 7.7314285669514495 - type: nauc_map_at_5_std value: -18.748688764538706 - type: nauc_mrr_at_1000_diff1 value: 36.23912675623378 - type: nauc_mrr_at_1000_max value: 7.690553436255147 - type: nauc_mrr_at_1000_std value: -17.609526070212304 - type: nauc_mrr_at_100_diff1 value: 36.23782651189002 - type: nauc_mrr_at_100_max value: 7.70075095171647 - type: nauc_mrr_at_100_std value: -17.575714144960184 - type: nauc_mrr_at_10_diff1 value: 36.125229472534215 - type: nauc_mrr_at_10_max value: 7.635472248755658 - type: nauc_mrr_at_10_std value: -18.208166616511086 - type: nauc_mrr_at_1_diff1 value: 39.20986875554532 - type: nauc_mrr_at_1_max value: 6.062668487561363 - type: nauc_mrr_at_1_std value: -16.04130340817602 - type: nauc_mrr_at_20_diff1 value: 36.21207088739667 - type: nauc_mrr_at_20_max value: 7.699610250145951 - type: nauc_mrr_at_20_std value: -17.778245221724028 - type: nauc_mrr_at_3_diff1 value: 36.03957583885305 - type: nauc_mrr_at_3_max value: 7.225515576504581 - type: nauc_mrr_at_3_std value: -18.74478742943741 - type: nauc_mrr_at_5_diff1 value: 35.969152496648974 - type: nauc_mrr_at_5_max value: 7.584059789018233 - type: nauc_mrr_at_5_std value: -18.569374723129332 - type: nauc_ndcg_at_1000_diff1 value: 35.894655529841806 - type: nauc_ndcg_at_1000_max value: 8.579327424366236 - type: nauc_ndcg_at_1000_std value: -16.359677367747896 - type: nauc_ndcg_at_100_diff1 value: 35.89861902483983 - type: nauc_ndcg_at_100_max value: 8.830873623962242 - type: nauc_ndcg_at_100_std value: -15.173125564722978 - type: nauc_ndcg_at_10_diff1 value: 35.36499811105169 - type: nauc_ndcg_at_10_max value: 8.449267180956992 - type: nauc_ndcg_at_10_std value: -18.41978802362402 - type: nauc_ndcg_at_1_diff1 value: 39.15422481210622 - type: nauc_ndcg_at_1_max value: 6.055515791928331 - type: nauc_ndcg_at_1_std value: -16.042779610876252 - type: nauc_ndcg_at_20_diff1 value: 35.73402868264468 - type: nauc_ndcg_at_20_max value: 8.695705518210847 - type: nauc_ndcg_at_20_std value: -16.7735829470466 - type: nauc_ndcg_at_3_diff1 value: 35.31358242856231 - type: nauc_ndcg_at_3_max value: 7.645692789058997 - type: nauc_ndcg_at_3_std value: -19.460003734786874 - type: nauc_ndcg_at_5_diff1 value: 35.05216588927143 - type: nauc_ndcg_at_5_max value: 8.216690520604715 - type: nauc_ndcg_at_5_std value: -19.3982054492159 - type: nauc_precision_at_1000_diff1 value: -4.440002625111349 - type: nauc_precision_at_1000_max value: 7.886988951901723 - type: nauc_precision_at_1000_std value: 9.88111187048247 - type: nauc_precision_at_100_diff1 value: 15.728286119463325 - type: nauc_precision_at_100_max value: 13.218650824470654 - type: nauc_precision_at_100_std value: 16.113245895522553 - type: nauc_precision_at_10_diff1 value: 29.51218489610567 - type: nauc_precision_at_10_max value: 10.197432401942912 - type: nauc_precision_at_10_std value: -16.950603431359493 - type: nauc_precision_at_1_diff1 value: 39.15422481210622 - type: nauc_precision_at_1_max value: 6.055515791928331 - type: nauc_precision_at_1_std value: -16.042779610876252 - type: nauc_precision_at_20_diff1 value: 27.825993070397338 - type: nauc_precision_at_20_max value: 11.437632287846007 - type: nauc_precision_at_20_std value: -7.450353566405601 - type: nauc_precision_at_3_diff1 value: 32.14135556796588 - type: nauc_precision_at_3_max value: 7.989252443574163 - type: nauc_precision_at_3_std value: -21.566254595671055 - type: nauc_precision_at_5_diff1 value: 30.68778685307082 - type: nauc_precision_at_5_max value: 9.332160758499892 - type: nauc_precision_at_5_std value: -20.928554713448914 - type: nauc_recall_at_1000_diff1 value: 25.00810478716878 - type: nauc_recall_at_1000_max value: 46.518165765201644 - type: nauc_recall_at_1000_std value: 61.4734635576085 - type: nauc_recall_at_100_diff1 value: 33.895581318261726 - type: nauc_recall_at_100_max value: 20.10706035872801 - type: nauc_recall_at_100_std value: 24.204226584457047 - type: nauc_recall_at_10_diff1 value: 32.363127359576296 - type: nauc_recall_at_10_max value: 10.729923804989545 - type: nauc_recall_at_10_std value: -18.1335370184202 - type: nauc_recall_at_1_diff1 value: 39.28605466408357 - type: nauc_recall_at_1_max value: 6.20202977590459 - type: nauc_recall_at_1_std value: -15.734334090045026 - type: nauc_recall_at_20_diff1 value: 33.47804003169795 - type: nauc_recall_at_20_max value: 12.781494765263382 - type: nauc_recall_at_20_std value: -9.263970132202658 - type: nauc_recall_at_3_diff1 value: 32.71001429428999 - type: nauc_recall_at_3_max value: 8.353439197382693 - type: nauc_recall_at_3_std value: -21.235097744366954 - type: nauc_recall_at_5_diff1 value: 31.87451464963415 - type: nauc_recall_at_5_max value: 9.635051450907305 - type: nauc_recall_at_5_std value: -21.113235357132794 - type: ndcg_at_1 value: 24.47 - type: ndcg_at_10 value: 43.692 - type: ndcg_at_100 value: 49.211 - type: ndcg_at_1000 value: 50.244 - type: ndcg_at_20 value: 46.278000000000006 - type: ndcg_at_3 value: 35.719 - type: ndcg_at_5 value: 39.652 - type: precision_at_1 value: 24.47 - type: precision_at_10 value: 6.857 - type: precision_at_100 value: 0.9610000000000001 - type: precision_at_1000 value: 0.105 - type: precision_at_20 value: 3.968 - type: precision_at_3 value: 15.181000000000001 - type: precision_at_5 value: 11.117 - type: recall_at_1 value: 23.751 - type: recall_at_10 value: 65.64 - type: recall_at_100 value: 90.967 - type: recall_at_1000 value: 98.738 - type: recall_at_20 value: 75.639 - type: recall_at_3 value: 43.927 - type: recall_at_5 value: 53.366 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 98.82580939352485 - type: f1 value: 98.75201754333801 - type: f1_weighted value: 98.82795205108245 - type: main_score value: 98.82580939352485 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 92.29822161422709 - type: f1 value: 77.75210224871594 - type: f1_weighted value: 93.58661422540348 - type: main_score value: 92.29822161422709 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 4672e20407010da34463acc759c162ca9734bca6 metrics: - type: accuracy value: 85.17484868863484 - type: f1 value: 81.94484244487094 - type: f1_weighted value: 85.21022593423332 - type: main_score value: 85.17484868863484 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 metrics: - type: accuracy value: 89.61667787491594 - type: f1 value: 89.02701927621264 - type: f1_weighted value: 89.56306982022801 - type: main_score value: 89.61667787491594 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: main_score value: 46.318282423948574 - type: v_measure value: 46.318282423948574 - type: v_measure_std value: 0.9729055662461538 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: main_score value: 44.29033625273981 - type: v_measure value: 44.29033625273981 - type: v_measure_std value: 1.0596383629128594 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7 metrics: - type: main_score value: 33.0526129239962 - type: map value: 33.0526129239962 - type: mrr value: 34.29260046890935 - type: nAUC_map_diff1 value: 12.579738077238032 - type: nAUC_map_max value: -20.936629344962 - type: nAUC_map_std value: -1.6096805784945216 - type: nAUC_mrr_diff1 value: 11.597584463580807 - type: nAUC_mrr_max value: -15.723702838537504 - type: nAUC_mrr_std value: 0.2719172965777737 - task: type: Retrieval dataset: name: MTEB NFCorpus type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: main_score value: 41.486000000000004 - type: map_at_1 value: 6.866 - type: map_at_10 value: 15.895999999999999 - type: map_at_100 value: 21.093 - type: map_at_1000 value: 23.067 - type: map_at_20 value: 18.125 - type: map_at_3 value: 11.421000000000001 - type: map_at_5 value: 13.415 - type: mrr_at_1 value: 52.63157894736842 - type: mrr_at_10 value: 61.486805248415166 - type: mrr_at_100 value: 62.08211009182091 - type: mrr_at_1000 value: 62.10828701365016 - type: mrr_at_20 value: 61.904411187915784 - type: mrr_at_3 value: 59.90712074303407 - type: mrr_at_5 value: 60.91331269349847 - type: nauc_map_at_1000_diff1 value: 25.484625278529403 - type: nauc_map_at_1000_max value: 31.206600396418853 - type: nauc_map_at_1000_std value: 15.569448072357156 - type: nauc_map_at_100_diff1 value: 27.636750226316764 - type: nauc_map_at_100_max value: 29.66992681250722 - type: nauc_map_at_100_std value: 10.570600484002671 - type: nauc_map_at_10_diff1 value: 32.76642525548697 - type: nauc_map_at_10_max value: 21.459225397237663 - type: nauc_map_at_10_std value: -3.546494734209264 - type: nauc_map_at_1_diff1 value: 48.8002894871328 - type: nauc_map_at_1_max value: 5.7236722609868815 - type: nauc_map_at_1_std value: -13.283554044471352 - type: nauc_map_at_20_diff1 value: 30.57169701502308 - type: nauc_map_at_20_max value: 25.79666139518404 - type: nauc_map_at_20_std value: 1.781732492989651 - type: nauc_map_at_3_diff1 value: 40.076315947201095 - type: nauc_map_at_3_max value: 12.862524429140054 - type: nauc_map_at_3_std value: -9.188349777126817 - type: nauc_map_at_5_diff1 value: 36.9918718052938 - type: nauc_map_at_5_max value: 16.74234374361876 - type: nauc_map_at_5_std value: -7.818523349307494 - type: nauc_mrr_at_1000_diff1 value: 26.88183002609805 - type: nauc_mrr_at_1000_max value: 47.10209348428658 - type: nauc_mrr_at_1000_std value: 32.067825924992924 - type: nauc_mrr_at_100_diff1 value: 26.871482491566745 - type: nauc_mrr_at_100_max value: 47.11303868498556 - type: nauc_mrr_at_100_std value: 32.08961428818868 - type: nauc_mrr_at_10_diff1 value: 26.6356914977722 - type: nauc_mrr_at_10_max value: 47.091624558810366 - type: nauc_mrr_at_10_std value: 31.942424120660164 - type: nauc_mrr_at_1_diff1 value: 28.19774198483673 - type: nauc_mrr_at_1_max value: 41.44380927834253 - type: nauc_mrr_at_1_std value: 25.18222691885917 - type: nauc_mrr_at_20_diff1 value: 26.86487347109452 - type: nauc_mrr_at_20_max value: 47.1987778214726 - type: nauc_mrr_at_20_std value: 32.143517921610034 - type: nauc_mrr_at_3_diff1 value: 27.34340373236422 - type: nauc_mrr_at_3_max value: 46.358726506276646 - type: nauc_mrr_at_3_std value: 31.74924155572593 - type: nauc_mrr_at_5_diff1 value: 27.209667205060672 - type: nauc_mrr_at_5_max value: 46.79883369072009 - type: nauc_mrr_at_5_std value: 31.655605306670758 - type: nauc_ndcg_at_1000_diff1 value: 18.940195769769687 - type: nauc_ndcg_at_1000_max value: 46.48551313937331 - type: nauc_ndcg_at_1000_std value: 33.64819502089232 - type: nauc_ndcg_at_100_diff1 value: 19.50885253809146 - type: nauc_ndcg_at_100_max value: 40.53174462354878 - type: nauc_ndcg_at_100_std value: 28.516152877751118 - type: nauc_ndcg_at_10_diff1 value: 16.01699218096564 - type: nauc_ndcg_at_10_max value: 41.17322878314514 - type: nauc_ndcg_at_10_std value: 29.002233224832196 - type: nauc_ndcg_at_1_diff1 value: 27.443547710102205 - type: nauc_ndcg_at_1_max value: 40.66529763309582 - type: nauc_ndcg_at_1_std value: 24.15016766225869 - type: nauc_ndcg_at_20_diff1 value: 17.541197675685062 - type: nauc_ndcg_at_20_max value: 40.53231266973844 - type: nauc_ndcg_at_20_std value: 29.54096347876548 - type: nauc_ndcg_at_3_diff1 value: 18.649628357473716 - type: nauc_ndcg_at_3_max value: 41.18603570171764 - type: nauc_ndcg_at_3_std value: 27.125524188420396 - type: nauc_ndcg_at_5_diff1 value: 17.519593751448483 - type: nauc_ndcg_at_5_max value: 42.715997890377345 - type: nauc_ndcg_at_5_std value: 27.902627839899868 - type: nauc_precision_at_1000_diff1 value: -15.528797630565155 - type: nauc_precision_at_1000_max value: 13.741640921778671 - type: nauc_precision_at_1000_std value: 44.50896053788372 - type: nauc_precision_at_100_diff1 value: -14.491464489721887 - type: nauc_precision_at_100_max value: 23.136434418999457 - type: nauc_precision_at_100_std value: 49.73145147863128 - type: nauc_precision_at_10_diff1 value: -4.829188942994277 - type: nauc_precision_at_10_max value: 40.327612559528866 - type: nauc_precision_at_10_std value: 39.34919529635044 - type: nauc_precision_at_1_diff1 value: 28.19774198483673 - type: nauc_precision_at_1_max value: 41.44380927834253 - type: nauc_precision_at_1_std value: 25.18222691885917 - type: nauc_precision_at_20_diff1 value: -7.210726293112847 - type: nauc_precision_at_20_max value: 37.195679576636984 - type: nauc_precision_at_20_std value: 45.4597096418357 - type: nauc_precision_at_3_diff1 value: 7.578219537774854 - type: nauc_precision_at_3_max value: 41.59775233475654 - type: nauc_precision_at_3_std value: 30.764584790895118 - type: nauc_precision_at_5_diff1 value: 1.655451789039598 - type: nauc_precision_at_5_max value: 43.435739407610455 - type: nauc_precision_at_5_std value: 33.42552263325999 - type: nauc_recall_at_1000_diff1 value: 5.030705700690516 - type: nauc_recall_at_1000_max value: 19.108072570815583 - type: nauc_recall_at_1000_std value: 14.697734974217308 - type: nauc_recall_at_100_diff1 value: 14.746540318132407 - type: nauc_recall_at_100_max value: 21.798705033854795 - type: nauc_recall_at_100_std value: 11.416195108842587 - type: nauc_recall_at_10_diff1 value: 25.548642427860486 - type: nauc_recall_at_10_max value: 18.711677681987474 - type: nauc_recall_at_10_std value: -5.988904818971677 - type: nauc_recall_at_1_diff1 value: 48.8002894871328 - type: nauc_recall_at_1_max value: 5.7236722609868815 - type: nauc_recall_at_1_std value: -13.283554044471352 - type: nauc_recall_at_20_diff1 value: 23.39140739154809 - type: nauc_recall_at_20_max value: 19.351150636155474 - type: nauc_recall_at_20_std value: -2.757280266915132 - type: nauc_recall_at_3_diff1 value: 38.17453576012812 - type: nauc_recall_at_3_max value: 13.47003839643972 - type: nauc_recall_at_3_std value: -8.75780163862688 - type: nauc_recall_at_5_diff1 value: 33.02812855226899 - type: nauc_recall_at_5_max value: 15.477626408978477 - type: nauc_recall_at_5_std value: -9.072206441070708 - type: ndcg_at_1 value: 50.773999999999994 - type: ndcg_at_10 value: 41.486000000000004 - type: ndcg_at_100 value: 39.051 - type: ndcg_at_1000 value: 48.106 - type: ndcg_at_20 value: 39.432 - type: ndcg_at_3 value: 47.428 - type: ndcg_at_5 value: 45.227000000000004 - type: precision_at_1 value: 52.632 - type: precision_at_10 value: 31.146 - type: precision_at_100 value: 10.328 - type: precision_at_1000 value: 2.432 - type: precision_at_20 value: 23.793 - type: precision_at_3 value: 45.201 - type: precision_at_5 value: 39.876 - type: recall_at_1 value: 6.866 - type: recall_at_10 value: 20.447000000000003 - type: recall_at_100 value: 40.607 - type: recall_at_1000 value: 73.411 - type: recall_at_20 value: 26.082 - type: recall_at_3 value: 12.484 - type: recall_at_5 value: 15.847 - task: type: Retrieval dataset: name: MTEB NQ type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: main_score value: 69.072 - type: map_at_1 value: 45.483000000000004 - type: map_at_10 value: 62.050000000000004 - type: map_at_100 value: 62.693 - type: map_at_1000 value: 62.702999999999996 - type: map_at_20 value: 62.498 - type: map_at_3 value: 58.285 - type: map_at_5 value: 60.711000000000006 - type: mrr_at_1 value: 50.840092699884124 - type: mrr_at_10 value: 64.54635224116673 - type: mrr_at_100 value: 64.9526548702289 - type: mrr_at_1000 value: 64.95908460752281 - type: mrr_at_20 value: 64.82949565799959 - type: mrr_at_3 value: 61.89165701042856 - type: mrr_at_5 value: 63.632676709154026 - type: nauc_map_at_1000_diff1 value: 43.187285304185224 - type: nauc_map_at_1000_max value: 32.39921659632756 - type: nauc_map_at_1000_std value: -5.780901333066553 - type: nauc_map_at_100_diff1 value: 43.184487221204456 - type: nauc_map_at_100_max value: 32.41176116347982 - type: nauc_map_at_100_std value: -5.76422606662383 - type: nauc_map_at_10_diff1 value: 42.967066814031746 - type: nauc_map_at_10_max value: 32.489617364418514 - type: nauc_map_at_10_std value: -6.029045531102664 - type: nauc_map_at_1_diff1 value: 46.16376563218624 - type: nauc_map_at_1_max value: 26.342624776802232 - type: nauc_map_at_1_std value: -7.142171388751972 - type: nauc_map_at_20_diff1 value: 43.15894358608328 - type: nauc_map_at_20_max value: 32.46492198956245 - type: nauc_map_at_20_std value: -5.788373305449195 - type: nauc_map_at_3_diff1 value: 43.231752344608545 - type: nauc_map_at_3_max value: 31.68003009949564 - type: nauc_map_at_3_std value: -8.015235132765458 - type: nauc_map_at_5_diff1 value: 42.86197608819917 - type: nauc_map_at_5_max value: 32.363857571094485 - type: nauc_map_at_5_std value: -6.780487416387977 - type: nauc_mrr_at_1000_diff1 value: 43.40542912045782 - type: nauc_mrr_at_1000_max value: 32.8461770324533 - type: nauc_mrr_at_1000_std value: -3.6505425530008204 - type: nauc_mrr_at_100_diff1 value: 43.40233508014468 - type: nauc_mrr_at_100_max value: 32.85598538385942 - type: nauc_mrr_at_100_std value: -3.637477352635459 - type: nauc_mrr_at_10_diff1 value: 43.260179162806054 - type: nauc_mrr_at_10_max value: 32.942643527040474 - type: nauc_mrr_at_10_std value: -3.712052825320437 - type: nauc_mrr_at_1_diff1 value: 46.354919460881206 - type: nauc_mrr_at_1_max value: 29.1760258591106 - type: nauc_mrr_at_1_std value: -4.107225031227406 - type: nauc_mrr_at_20_diff1 value: 43.37092385434311 - type: nauc_mrr_at_20_max value: 32.93390254712846 - type: nauc_mrr_at_20_std value: -3.5719056112132006 - type: nauc_mrr_at_3_diff1 value: 43.1744474040527 - type: nauc_mrr_at_3_max value: 32.741290559777994 - type: nauc_mrr_at_3_std value: -4.72677925120697 - type: nauc_mrr_at_5_diff1 value: 43.108396819975674 - type: nauc_mrr_at_5_max value: 32.970519514893084 - type: nauc_mrr_at_5_std value: -4.090906158975974 - type: nauc_ndcg_at_1000_diff1 value: 42.786664193638714 - type: nauc_ndcg_at_1000_max value: 33.65554095609296 - type: nauc_ndcg_at_1000_std value: -4.024030130584482 - type: nauc_ndcg_at_100_diff1 value: 42.691246775210814 - type: nauc_ndcg_at_100_max value: 34.063232335110875 - type: nauc_ndcg_at_100_std value: -3.477813807415248 - type: nauc_ndcg_at_10_diff1 value: 41.90988990571757 - type: nauc_ndcg_at_10_max value: 34.58934812881633 - type: nauc_ndcg_at_10_std value: -4.3295110195497655 - type: nauc_ndcg_at_1_diff1 value: 46.354919460881206 - type: nauc_ndcg_at_1_max value: 29.1760258591106 - type: nauc_ndcg_at_1_std value: -4.107225031227406 - type: nauc_ndcg_at_20_diff1 value: 42.493206675867114 - type: nauc_ndcg_at_20_max value: 34.562441307459544 - type: nauc_ndcg_at_20_std value: -3.4456116866749107 - type: nauc_ndcg_at_3_diff1 value: 42.24180336502808 - type: nauc_ndcg_at_3_max value: 33.064267018100594 - type: nauc_ndcg_at_3_std value: -7.786248093572142 - type: nauc_ndcg_at_5_diff1 value: 41.692714787779565 - type: nauc_ndcg_at_5_max value: 34.20502498949156 - type: nauc_ndcg_at_5_std value: -5.979557859282785 - type: nauc_precision_at_1000_diff1 value: -13.779832506640702 - type: nauc_precision_at_1000_max value: 1.243001688631421 - type: nauc_precision_at_1000_std value: 17.351623398622323 - type: nauc_precision_at_100_diff1 value: -11.310526816290297 - type: nauc_precision_at_100_max value: 5.771669506192959 - type: nauc_precision_at_100_std value: 19.917795079540113 - type: nauc_precision_at_10_diff1 value: 2.163699384635286 - type: nauc_precision_at_10_max value: 19.66440698458386 - type: nauc_precision_at_10_std value: 13.689876348315726 - type: nauc_precision_at_1_diff1 value: 46.354919460881206 - type: nauc_precision_at_1_max value: 29.1760258591106 - type: nauc_precision_at_1_std value: -4.107225031227406 - type: nauc_precision_at_20_diff1 value: -3.038735879584471 - type: nauc_precision_at_20_max value: 14.132968299701695 - type: nauc_precision_at_20_std value: 17.78069734664346 - type: nauc_precision_at_3_diff1 value: 21.783760758070095 - type: nauc_precision_at_3_max value: 30.244127986404497 - type: nauc_precision_at_3_std value: -0.12411163467738723 - type: nauc_precision_at_5_diff1 value: 10.980635723302418 - type: nauc_precision_at_5_max value: 25.302293738975575 - type: nauc_precision_at_5_std value: 6.4740817488722024 - type: nauc_recall_at_1000_diff1 value: 34.10343772356593 - type: nauc_recall_at_1000_max value: 80.72497340357538 - type: nauc_recall_at_1000_std value: 69.54564103264093 - type: nauc_recall_at_100_diff1 value: 33.427719956774126 - type: nauc_recall_at_100_max value: 71.54086768335449 - type: nauc_recall_at_100_std value: 49.66157377654885 - type: nauc_recall_at_10_diff1 value: 33.70139560054039 - type: nauc_recall_at_10_max value: 45.47878072860151 - type: nauc_recall_at_10_std value: 1.4188516615716378 - type: nauc_recall_at_1_diff1 value: 46.16376563218624 - type: nauc_recall_at_1_max value: 26.342624776802232 - type: nauc_recall_at_1_std value: -7.142171388751972 - type: nauc_recall_at_20_diff1 value: 35.805379874970086 - type: nauc_recall_at_20_max value: 51.80479822253392 - type: nauc_recall_at_20_std value: 13.531467576460143 - type: nauc_recall_at_3_diff1 value: 37.288500141631616 - type: nauc_recall_at_3_max value: 35.07078243516728 - type: nauc_recall_at_3_std value: -10.452926441410405 - type: nauc_recall_at_5_diff1 value: 34.83186104526897 - type: nauc_recall_at_5_max value: 39.58488976496973 - type: nauc_recall_at_5_std value: -6.3049292065708835 - type: ndcg_at_1 value: 50.839999999999996 - type: ndcg_at_10 value: 69.072 - type: ndcg_at_100 value: 71.538 - type: ndcg_at_1000 value: 71.77799999999999 - type: ndcg_at_20 value: 70.41 - type: ndcg_at_3 value: 62.544999999999995 - type: ndcg_at_5 value: 66.33099999999999 - type: precision_at_1 value: 50.839999999999996 - type: precision_at_10 value: 10.495000000000001 - type: precision_at_100 value: 1.1900000000000002 - type: precision_at_1000 value: 0.121 - type: precision_at_20 value: 5.5809999999999995 - type: precision_at_3 value: 27.636 - type: precision_at_5 value: 18.864 - type: recall_at_1 value: 45.483000000000004 - type: recall_at_10 value: 87.483 - type: recall_at_100 value: 97.844 - type: recall_at_1000 value: 99.66199999999999 - type: recall_at_20 value: 92.294 - type: recall_at_3 value: 71.2 - type: recall_at_5 value: 79.753 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: mteb/quora config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: main_score value: 89.58 - type: map_at_1 value: 71.819 - type: map_at_10 value: 86.04899999999999 - type: map_at_100 value: 86.648 - type: map_at_1000 value: 86.66199999999999 - type: map_at_20 value: 86.441 - type: map_at_3 value: 83.114 - type: map_at_5 value: 84.981 - type: mrr_at_1 value: 82.62 - type: mrr_at_10 value: 88.62899999999979 - type: mrr_at_100 value: 88.70918591324215 - type: mrr_at_1000 value: 88.70973091492397 - type: mrr_at_20 value: 88.68914765317221 - type: mrr_at_3 value: 87.74999999999979 - type: mrr_at_5 value: 88.36799999999974 - type: nauc_map_at_1000_diff1 value: 77.89207709760448 - type: nauc_map_at_1000_max value: 29.63371361495422 - type: nauc_map_at_1000_std value: -48.628180385874344 - type: nauc_map_at_100_diff1 value: 77.89592179104915 - type: nauc_map_at_100_max value: 29.617171506130756 - type: nauc_map_at_100_std value: -48.66057170774648 - type: nauc_map_at_10_diff1 value: 78.0618161228185 - type: nauc_map_at_10_max value: 29.178490609366737 - type: nauc_map_at_10_std value: -50.74755004592002 - type: nauc_map_at_1_diff1 value: 81.64335579973574 - type: nauc_map_at_1_max value: 21.813832226652174 - type: nauc_map_at_1_std value: -42.57570978190876 - type: nauc_map_at_20_diff1 value: 77.9299081005938 - type: nauc_map_at_20_max value: 29.458718470003888 - type: nauc_map_at_20_std value: -49.63337236763102 - type: nauc_map_at_3_diff1 value: 78.72941448509229 - type: nauc_map_at_3_max value: 26.600997896960056 - type: nauc_map_at_3_std value: -51.889002227479885 - type: nauc_map_at_5_diff1 value: 78.31466610917171 - type: nauc_map_at_5_max value: 28.09863984582896 - type: nauc_map_at_5_std value: -52.14058096096497 - type: nauc_mrr_at_1000_diff1 value: 78.42667263739992 - type: nauc_mrr_at_1000_max value: 31.98996235127974 - type: nauc_mrr_at_1000_std value: -44.380439148429296 - type: nauc_mrr_at_100_diff1 value: 78.42661032698115 - type: nauc_mrr_at_100_max value: 31.991652631740102 - type: nauc_mrr_at_100_std value: -44.37854108460535 - type: nauc_mrr_at_10_diff1 value: 78.39126022544136 - type: nauc_mrr_at_10_max value: 32.02023484451197 - type: nauc_mrr_at_10_std value: -44.561252349176954 - type: nauc_mrr_at_1_diff1 value: 79.21630894647448 - type: nauc_mrr_at_1_max value: 31.526303156060177 - type: nauc_mrr_at_1_std value: -41.887504422443136 - type: nauc_mrr_at_20_diff1 value: 78.42548039170424 - type: nauc_mrr_at_20_max value: 31.99588275070137 - type: nauc_mrr_at_20_std value: -44.44957722627042 - type: nauc_mrr_at_3_diff1 value: 78.26165151833735 - type: nauc_mrr_at_3_max value: 32.18028826126801 - type: nauc_mrr_at_3_std value: -44.6998237213182 - type: nauc_mrr_at_5_diff1 value: 78.34786430903962 - type: nauc_mrr_at_5_max value: 32.168476272879566 - type: nauc_mrr_at_5_std value: -44.7915919956712 - type: nauc_ndcg_at_1000_diff1 value: 77.79198355957816 - type: nauc_ndcg_at_1000_max value: 31.14363511518406 - type: nauc_ndcg_at_1000_std value: -46.69335151274275 - type: nauc_ndcg_at_100_diff1 value: 77.79898090286419 - type: nauc_ndcg_at_100_max value: 31.115103811629215 - type: nauc_ndcg_at_100_std value: -46.73078913421965 - type: nauc_ndcg_at_10_diff1 value: 77.74856635461343 - type: nauc_ndcg_at_10_max value: 30.279584686212747 - type: nauc_ndcg_at_10_std value: -50.23514662356807 - type: nauc_ndcg_at_1_diff1 value: 79.17833000040999 - type: nauc_ndcg_at_1_max value: 31.703788144510746 - type: nauc_ndcg_at_1_std value: -41.854817402870715 - type: nauc_ndcg_at_20_diff1 value: 77.7380353804671 - type: nauc_ndcg_at_20_max value: 30.622294129001553 - type: nauc_ndcg_at_20_std value: -49.035794761065254 - type: nauc_ndcg_at_3_diff1 value: 77.41476880573593 - type: nauc_ndcg_at_3_max value: 29.015949978243032 - type: nauc_ndcg_at_3_std value: -49.78627087622648 - type: nauc_ndcg_at_5_diff1 value: 77.64439137502896 - type: nauc_ndcg_at_5_max value: 29.444684897492206 - type: nauc_ndcg_at_5_std value: -51.21908400252501 - type: nauc_precision_at_1000_diff1 value: -44.92396459446822 - type: nauc_precision_at_1000_max value: -3.674153720989045 - type: nauc_precision_at_1000_std value: 39.56552468277785 - type: nauc_precision_at_100_diff1 value: -44.75143023259094 - type: nauc_precision_at_100_max value: -3.705280025140011 - type: nauc_precision_at_100_std value: 39.433619999113326 - type: nauc_precision_at_10_diff1 value: -41.0651074726579 - type: nauc_precision_at_10_max value: -0.21097985601783667 - type: nauc_precision_at_10_std value: 26.24652824589493 - type: nauc_precision_at_1_diff1 value: 79.17833000040999 - type: nauc_precision_at_1_max value: 31.703788144510746 - type: nauc_precision_at_1_std value: -41.854817402870715 - type: nauc_precision_at_20_diff1 value: -43.368001340920294 - type: nauc_precision_at_20_max value: -2.036990010399129 - type: nauc_precision_at_20_std value: 32.37747041406297 - type: nauc_precision_at_3_diff1 value: -22.089307548346877 - type: nauc_precision_at_3_max value: 6.2280973175296 - type: nauc_precision_at_3_std value: 5.323992514036145 - type: nauc_precision_at_5_diff1 value: -34.07115055244003 - type: nauc_precision_at_5_max value: 2.5955315789198834 - type: nauc_precision_at_5_std value: 16.26096689407332 - type: nauc_recall_at_1000_diff1 value: 58.27703860947467 - type: nauc_recall_at_1000_max value: 68.59835835315768 - type: nauc_recall_at_1000_std value: 77.96687006056064 - type: nauc_recall_at_100_diff1 value: 73.24371223081737 - type: nauc_recall_at_100_max value: 39.55925344664591 - type: nauc_recall_at_100_std value: -32.25605030215798 - type: nauc_recall_at_10_diff1 value: 73.41261201339202 - type: nauc_recall_at_10_max value: 26.822979434062926 - type: nauc_recall_at_10_std value: -74.2909332592806 - type: nauc_recall_at_1_diff1 value: 81.64335579973574 - type: nauc_recall_at_1_max value: 21.813832226652174 - type: nauc_recall_at_1_std value: -42.57570978190876 - type: nauc_recall_at_20_diff1 value: 72.7621297920656 - type: nauc_recall_at_20_max value: 26.02492304096079 - type: nauc_recall_at_20_std value: -77.8724532438279 - type: nauc_recall_at_3_diff1 value: 75.25149312810714 - type: nauc_recall_at_3_max value: 23.20545662481487 - type: nauc_recall_at_3_std value: -59.69689982140521 - type: nauc_recall_at_5_diff1 value: 73.69807273001406 - type: nauc_recall_at_5_max value: 24.073666798066057 - type: nauc_recall_at_5_std value: -67.91121268130719 - type: ndcg_at_1 value: 82.64 - type: ndcg_at_10 value: 89.58 - type: ndcg_at_100 value: 90.606 - type: ndcg_at_1000 value: 90.676 - type: ndcg_at_20 value: 90.132 - type: ndcg_at_3 value: 86.88 - type: ndcg_at_5 value: 88.40299999999999 - type: precision_at_1 value: 82.64 - type: precision_at_10 value: 13.604 - type: precision_at_100 value: 1.539 - type: precision_at_1000 value: 0.157 - type: precision_at_20 value: 7.188 - type: precision_at_3 value: 38.083 - type: precision_at_5 value: 25.018 - type: recall_at_1 value: 71.819 - type: recall_at_10 value: 96.34700000000001 - type: recall_at_100 value: 99.715 - type: recall_at_1000 value: 99.995 - type: recall_at_20 value: 98.073 - type: recall_at_3 value: 88.57300000000001 - type: recall_at_5 value: 92.908 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: main_score value: 71.18966762070158 - type: v_measure value: 71.18966762070158 - type: v_measure_std value: 2.7498969054457048 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: main_score value: 74.42014716862516 - type: v_measure value: 74.42014716862516 - type: v_measure_std value: 9.909739891410648 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: mteb/scidocs config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: main_score value: 25.041999999999998 - type: map_at_1 value: 5.893000000000001 - type: map_at_10 value: 15.260000000000002 - type: map_at_100 value: 18.084 - type: map_at_1000 value: 18.467 - type: map_at_20 value: 16.675 - type: map_at_3 value: 10.526 - type: map_at_5 value: 12.775 - type: mrr_at_1 value: 28.999999999999996 - type: mrr_at_10 value: 41.03575396825395 - type: mrr_at_100 value: 42.136771862785835 - type: mrr_at_1000 value: 42.16698555415099 - type: mrr_at_20 value: 41.707493696104315 - type: mrr_at_3 value: 37.34999999999998 - type: mrr_at_5 value: 39.59999999999995 - type: nauc_map_at_1000_diff1 value: 12.080002654911883 - type: nauc_map_at_1000_max value: 29.813563682286276 - type: nauc_map_at_1000_std value: 20.36659817908673 - type: nauc_map_at_100_diff1 value: 12.108735517749706 - type: nauc_map_at_100_max value: 29.76830671710955 - type: nauc_map_at_100_std value: 20.3433621032846 - type: nauc_map_at_10_diff1 value: 12.91575031185637 - type: nauc_map_at_10_max value: 29.427600958386318 - type: nauc_map_at_10_std value: 16.89867275177153 - type: nauc_map_at_1_diff1 value: 19.353069488987916 - type: nauc_map_at_1_max value: 17.093914951159693 - type: nauc_map_at_1_std value: 8.19886078055046 - type: nauc_map_at_20_diff1 value: 11.977233457943113 - type: nauc_map_at_20_max value: 29.171812822948805 - type: nauc_map_at_20_std value: 18.780517506173965 - type: nauc_map_at_3_diff1 value: 14.453129464176092 - type: nauc_map_at_3_max value: 25.801958649112077 - type: nauc_map_at_3_std value: 11.572823684429643 - type: nauc_map_at_5_diff1 value: 13.167155808104997 - type: nauc_map_at_5_max value: 27.355626948365792 - type: nauc_map_at_5_std value: 14.414151839192183 - type: nauc_mrr_at_1000_diff1 value: 17.262104643988636 - type: nauc_mrr_at_1000_max value: 23.991373837217058 - type: nauc_mrr_at_1000_std value: 12.44755488671623 - type: nauc_mrr_at_100_diff1 value: 17.267280132318703 - type: nauc_mrr_at_100_max value: 24.022189287889294 - type: nauc_mrr_at_100_std value: 12.480695500214788 - type: nauc_mrr_at_10_diff1 value: 17.012383998246268 - type: nauc_mrr_at_10_max value: 24.192637911171722 - type: nauc_mrr_at_10_std value: 12.524608847408917 - type: nauc_mrr_at_1_diff1 value: 19.43518811038007 - type: nauc_mrr_at_1_max value: 17.747482933395602 - type: nauc_mrr_at_1_std value: 8.410779775558684 - type: nauc_mrr_at_20_diff1 value: 17.202663281407446 - type: nauc_mrr_at_20_max value: 24.091991130543118 - type: nauc_mrr_at_20_std value: 12.503814263019908 - type: nauc_mrr_at_3_diff1 value: 17.52733013432995 - type: nauc_mrr_at_3_max value: 23.569459518780214 - type: nauc_mrr_at_3_std value: 11.770846827520726 - type: nauc_mrr_at_5_diff1 value: 17.10817561975543 - type: nauc_mrr_at_5_max value: 23.945141435234678 - type: nauc_mrr_at_5_std value: 12.034468615317719 - type: nauc_ndcg_at_1000_diff1 value: 12.317811393346936 - type: nauc_ndcg_at_1000_max value: 30.809991350156103 - type: nauc_ndcg_at_1000_std value: 24.517501065205067 - type: nauc_ndcg_at_100_diff1 value: 12.824804203182936 - type: nauc_ndcg_at_100_max value: 30.895499817010748 - type: nauc_ndcg_at_100_std value: 25.424376279745402 - type: nauc_ndcg_at_10_diff1 value: 13.32724552457439 - type: nauc_ndcg_at_10_max value: 30.409088666807456 - type: nauc_ndcg_at_10_std value: 18.216330475714113 - type: nauc_ndcg_at_1_diff1 value: 19.43518811038007 - type: nauc_ndcg_at_1_max value: 17.747482933395602 - type: nauc_ndcg_at_1_std value: 8.410779775558684 - type: nauc_ndcg_at_20_diff1 value: 12.224399111852902 - type: nauc_ndcg_at_20_max value: 29.86352330445272 - type: nauc_ndcg_at_20_std value: 21.196937851331807 - type: nauc_ndcg_at_3_diff1 value: 15.367489533734027 - type: nauc_ndcg_at_3_max value: 26.76486390741532 - type: nauc_ndcg_at_3_std value: 12.606077508789923 - type: nauc_ndcg_at_5_diff1 value: 13.831157482390935 - type: nauc_ndcg_at_5_max value: 28.070226983968904 - type: nauc_ndcg_at_5_std value: 15.236787943125435 - type: nauc_precision_at_1000_diff1 value: 0.016122957101357048 - type: nauc_precision_at_1000_max value: 24.380929903557334 - type: nauc_precision_at_1000_std value: 34.54045112720052 - type: nauc_precision_at_100_diff1 value: 7.255224788507301 - type: nauc_precision_at_100_max value: 27.98453788447542 - type: nauc_precision_at_100_std value: 35.38999555441665 - type: nauc_precision_at_10_diff1 value: 9.69185099834181 - type: nauc_precision_at_10_max value: 32.532315522580454 - type: nauc_precision_at_10_std value: 21.48948348473612 - type: nauc_precision_at_1_diff1 value: 19.43518811038007 - type: nauc_precision_at_1_max value: 17.747482933395602 - type: nauc_precision_at_1_std value: 8.410779775558684 - type: nauc_precision_at_20_diff1 value: 6.964076536695672 - type: nauc_precision_at_20_max value: 29.30087236410044 - type: nauc_precision_at_20_std value: 26.413625895571986 - type: nauc_precision_at_3_diff1 value: 14.145134359925155 - type: nauc_precision_at_3_max value: 29.915650960808303 - type: nauc_precision_at_3_std value: 14.095370019867797 - type: nauc_precision_at_5_diff1 value: 11.043933558522692 - type: nauc_precision_at_5_max value: 30.93016505807111 - type: nauc_precision_at_5_std value: 17.749256196062603 - type: nauc_recall_at_1000_diff1 value: -0.7776817772090345 - type: nauc_recall_at_1000_max value: 23.094717340324518 - type: nauc_recall_at_1000_std value: 37.189908681396425 - type: nauc_recall_at_100_diff1 value: 6.887748742013364 - type: nauc_recall_at_100_max value: 27.00798435230277 - type: nauc_recall_at_100_std value: 35.908147807345344 - type: nauc_recall_at_10_diff1 value: 9.605632017480751 - type: nauc_recall_at_10_max value: 31.845202901168655 - type: nauc_recall_at_10_std value: 21.497414586634683 - type: nauc_recall_at_1_diff1 value: 19.353069488987916 - type: nauc_recall_at_1_max value: 17.093914951159693 - type: nauc_recall_at_1_std value: 8.19886078055046 - type: nauc_recall_at_20_diff1 value: 6.927503731844782 - type: nauc_recall_at_20_max value: 28.611698183338202 - type: nauc_recall_at_20_std value: 26.69018660149911 - type: nauc_recall_at_3_diff1 value: 14.043724087062268 - type: nauc_recall_at_3_max value: 29.269835821380465 - type: nauc_recall_at_3_std value: 14.104419605998094 - type: nauc_recall_at_5_diff1 value: 11.017319452873336 - type: nauc_recall_at_5_max value: 30.295720628306228 - type: nauc_recall_at_5_std value: 17.758048545573825 - type: ndcg_at_1 value: 28.999999999999996 - type: ndcg_at_10 value: 25.041999999999998 - type: ndcg_at_100 value: 35.045 - type: ndcg_at_1000 value: 40.803 - type: ndcg_at_20 value: 28.584 - type: ndcg_at_3 value: 23.249 - type: ndcg_at_5 value: 20.533 - type: precision_at_1 value: 28.999999999999996 - type: precision_at_10 value: 13.120000000000001 - type: precision_at_100 value: 2.7470000000000003 - type: precision_at_1000 value: 0.41200000000000003 - type: precision_at_20 value: 8.584999999999999 - type: precision_at_3 value: 21.633 - type: precision_at_5 value: 18.099999999999998 - type: recall_at_1 value: 5.893000000000001 - type: recall_at_10 value: 26.567 - type: recall_at_100 value: 55.800000000000004 - type: recall_at_1000 value: 83.608 - type: recall_at_20 value: 34.86 - type: recall_at_3 value: 13.153 - type: recall_at_5 value: 18.323 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: cosine_pearson value: 86.57284584320382 - type: cosine_spearman value: 82.20531642680812 - type: euclidean_pearson value: 83.94261758556554 - type: euclidean_spearman value: 82.20721497738559 - type: main_score value: 82.20531642680812 - type: manhattan_pearson value: 84.15902154703083 - type: manhattan_spearman value: 82.19506027155957 - type: pearson value: 86.57284584320382 - type: spearman value: 82.20531642680812 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cosine_pearson value: 86.28047602146931 - type: cosine_spearman value: 79.51504881448884 - type: euclidean_pearson value: 83.10545189967856 - type: euclidean_spearman value: 79.50586960492797 - type: main_score value: 79.51504881448884 - type: manhattan_pearson value: 83.44244457500889 - type: manhattan_spearman value: 79.730303339846 - type: pearson value: 86.28047602146931 - type: spearman value: 79.51504881448884 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cosine_pearson value: 88.74723553048702 - type: cosine_spearman value: 89.18936052329725 - type: euclidean_pearson value: 88.90400878928668 - type: euclidean_spearman value: 89.19174821431281 - type: main_score value: 89.18936052329725 - type: manhattan_pearson value: 88.81504628424054 - type: manhattan_spearman value: 89.18063294142597 - type: pearson value: 88.74723553048702 - type: spearman value: 89.18936052329725 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cosine_pearson value: 86.45403437836023 - type: cosine_spearman value: 85.14654611519086 - type: euclidean_pearson value: 85.87509624462743 - type: euclidean_spearman value: 85.1391108856681 - type: main_score value: 85.14654611519086 - type: manhattan_pearson value: 85.96635794953866 - type: manhattan_spearman value: 85.3271371527667 - type: pearson value: 86.45403437836023 - type: spearman value: 85.14654611519086 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cosine_pearson value: 87.84742260009705 - type: cosine_spearman value: 89.10215217191254 - type: euclidean_pearson value: 88.97393286325477 - type: euclidean_spearman value: 89.1014105509662 - type: main_score value: 89.10215217191254 - type: manhattan_pearson value: 89.31698781090151 - type: manhattan_spearman value: 89.53000001764433 - type: pearson value: 87.84742260009705 - type: spearman value: 89.10215217191254 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cosine_pearson value: 85.22397535461835 - type: cosine_spearman value: 87.14066355879785 - type: euclidean_pearson value: 86.31393364087295 - type: euclidean_spearman value: 87.14018892702765 - type: main_score value: 87.14066355879785 - type: manhattan_pearson value: 86.36366855248434 - type: manhattan_spearman value: 87.20858630423012 - type: pearson value: 85.22397535461835 - type: spearman value: 87.14066355879785 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: cosine_pearson value: 90.66131612061355 - type: cosine_spearman value: 90.97082650129164 - type: euclidean_pearson value: 90.98181906744969 - type: euclidean_spearman value: 90.99008476850047 - type: main_score value: 90.97082650129164 - type: manhattan_pearson value: 90.75245040709021 - type: manhattan_spearman value: 90.6199877691265 - type: pearson value: 90.66131612061355 - type: spearman value: 90.97082650129164 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: cosine_pearson value: 67.270656447085 - type: cosine_spearman value: 67.82870469746828 - type: euclidean_pearson value: 69.03857775285664 - type: euclidean_spearman value: 67.74455108773341 - type: main_score value: 67.82870469746828 - type: manhattan_pearson value: 69.25304172245812 - type: manhattan_spearman value: 68.00987097916055 - type: pearson value: 67.270656447085 - type: spearman value: 67.82870469746828 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cosine_pearson value: 87.17245205384889 - type: cosine_spearman value: 87.7360146030987 - type: euclidean_pearson value: 87.48919412794656 - type: euclidean_spearman value: 87.7312047878383 - type: main_score value: 87.7360146030987 - type: manhattan_pearson value: 87.61476224354806 - type: manhattan_spearman value: 87.95220889254693 - type: pearson value: 87.17245205384889 - type: spearman value: 87.7360146030987 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: main_score value: 88.43547871921146 - type: map value: 88.43547871921146 - type: mrr value: 96.5564473652709 - type: nAUC_map_diff1 value: -13.66029392579231 - type: nAUC_map_max value: 50.325613574053506 - type: nAUC_map_std value: 60.02986231275796 - type: nAUC_mrr_diff1 value: 23.83821476411125 - type: nAUC_mrr_max value: 86.72643311769906 - type: nAUC_mrr_std value: 72.12741063469213 - task: type: Retrieval dataset: name: MTEB SciFact type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: main_score value: 78.233 - type: map_at_1 value: 61.49400000000001 - type: map_at_10 value: 73.30600000000001 - type: map_at_100 value: 73.719 - type: map_at_1000 value: 73.724 - type: map_at_20 value: 73.611 - type: map_at_3 value: 70.626 - type: map_at_5 value: 72.417 - type: mrr_at_1 value: 64.66666666666666 - type: mrr_at_10 value: 74.30357142857143 - type: mrr_at_100 value: 74.56950898079988 - type: mrr_at_1000 value: 74.57295833098681 - type: mrr_at_20 value: 74.46165223665226 - type: mrr_at_3 value: 72.3888888888889 - type: mrr_at_5 value: 73.60555555555557 - type: nauc_map_at_1000_diff1 value: 76.51524604780636 - type: nauc_map_at_1000_max value: 53.48521938401881 - type: nauc_map_at_1000_std value: -7.347799382158861 - type: nauc_map_at_100_diff1 value: 76.5122888096236 - type: nauc_map_at_100_max value: 53.49221847471618 - type: nauc_map_at_100_std value: -7.329683735681086 - type: nauc_map_at_10_diff1 value: 76.30928630674504 - type: nauc_map_at_10_max value: 53.00102977185941 - type: nauc_map_at_10_std value: -7.7467740085108705 - type: nauc_map_at_1_diff1 value: 79.54189281784247 - type: nauc_map_at_1_max value: 46.630071622109526 - type: nauc_map_at_1_std value: -14.395943134644112 - type: nauc_map_at_20_diff1 value: 76.41604361947962 - type: nauc_map_at_20_max value: 53.578883876146875 - type: nauc_map_at_20_std value: -7.403103451288041 - type: nauc_map_at_3_diff1 value: 76.25911617571941 - type: nauc_map_at_3_max value: 49.140287380513605 - type: nauc_map_at_3_std value: -11.35992449218983 - type: nauc_map_at_5_diff1 value: 76.35122077770336 - type: nauc_map_at_5_max value: 52.1744367901208 - type: nauc_map_at_5_std value: -7.85753955055384 - type: nauc_mrr_at_1000_diff1 value: 76.97223309515867 - type: nauc_mrr_at_1000_max value: 57.263787498613326 - type: nauc_mrr_at_1000_std value: -4.884090708840035 - type: nauc_mrr_at_100_diff1 value: 76.97312970894603 - type: nauc_mrr_at_100_max value: 57.26850730446478 - type: nauc_mrr_at_100_std value: -4.875200894216617 - type: nauc_mrr_at_10_diff1 value: 76.65927674223613 - type: nauc_mrr_at_10_max value: 57.30979763941454 - type: nauc_mrr_at_10_std value: -4.863331094022142 - type: nauc_mrr_at_1_diff1 value: 80.0454932568644 - type: nauc_mrr_at_1_max value: 56.76038421319305 - type: nauc_mrr_at_1_std value: -4.101939392632653 - type: nauc_mrr_at_20_diff1 value: 76.87237970440503 - type: nauc_mrr_at_20_max value: 57.33843605225869 - type: nauc_mrr_at_20_std value: -4.96248984417978 - type: nauc_mrr_at_3_diff1 value: 76.74130186666727 - type: nauc_mrr_at_3_max value: 56.19313244846155 - type: nauc_mrr_at_3_std value: -5.684365934009136 - type: nauc_mrr_at_5_diff1 value: 76.66406918799962 - type: nauc_mrr_at_5_max value: 57.56110093228628 - type: nauc_mrr_at_5_std value: -3.7464413085588073 - type: nauc_ndcg_at_1000_diff1 value: 76.19194173971773 - type: nauc_ndcg_at_1000_max value: 55.57464600170693 - type: nauc_ndcg_at_1000_std value: -6.0761689532372625 - type: nauc_ndcg_at_100_diff1 value: 76.14631273843654 - type: nauc_ndcg_at_100_max value: 55.72246565373382 - type: nauc_ndcg_at_100_std value: -5.595160698860595 - type: nauc_ndcg_at_10_diff1 value: 75.0108223611192 - type: nauc_ndcg_at_10_max value: 55.27894212877493 - type: nauc_ndcg_at_10_std value: -6.968331740214591 - type: nauc_ndcg_at_1_diff1 value: 80.0454932568644 - type: nauc_ndcg_at_1_max value: 56.76038421319305 - type: nauc_ndcg_at_1_std value: -4.101939392632653 - type: nauc_ndcg_at_20_diff1 value: 75.54887755702472 - type: nauc_ndcg_at_20_max value: 56.406879417251496 - type: nauc_ndcg_at_20_std value: -6.495231061329629 - type: nauc_ndcg_at_3_diff1 value: 75.03620356688509 - type: nauc_ndcg_at_3_max value: 52.147381077773424 - type: nauc_ndcg_at_3_std value: -8.448005688956199 - type: nauc_ndcg_at_5_diff1 value: 75.1195898074229 - type: nauc_ndcg_at_5_max value: 54.2321033861173 - type: nauc_ndcg_at_5_std value: -5.882690780895338 - type: nauc_precision_at_1000_diff1 value: -28.081979732100532 - type: nauc_precision_at_1000_max value: 35.055348014832916 - type: nauc_precision_at_1000_std value: 59.61280468927384 - type: nauc_precision_at_100_diff1 value: -25.112740730587458 - type: nauc_precision_at_100_max value: 38.26331300116496 - type: nauc_precision_at_100_std value: 62.46316222328831 - type: nauc_precision_at_10_diff1 value: -2.6766206473658833 - type: nauc_precision_at_10_max value: 45.95321867204845 - type: nauc_precision_at_10_std value: 45.07212468670564 - type: nauc_precision_at_1_diff1 value: 80.0454932568644 - type: nauc_precision_at_1_max value: 56.76038421319305 - type: nauc_precision_at_1_std value: -4.101939392632653 - type: nauc_precision_at_20_diff1 value: -10.698911116738385 - type: nauc_precision_at_20_max value: 43.467275950182994 - type: nauc_precision_at_20_std value: 48.00467321991766 - type: nauc_precision_at_3_diff1 value: 33.6344708541193 - type: nauc_precision_at_3_max value: 49.309242331670504 - type: nauc_precision_at_3_std value: 21.02940391379915 - type: nauc_precision_at_5_diff1 value: 13.560415600596318 - type: nauc_precision_at_5_max value: 48.918726500100085 - type: nauc_precision_at_5_std value: 39.940930429172184 - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_100_diff1 value: 70.82166199813196 - type: nauc_recall_at_100_max value: 76.6106442577042 - type: nauc_recall_at_100_std value: 66.47992530345513 - type: nauc_recall_at_10_diff1 value: 62.68908885556092 - type: nauc_recall_at_10_max value: 58.14262437741839 - type: nauc_recall_at_10_std value: -12.946717875063369 - type: nauc_recall_at_1_diff1 value: 79.54189281784247 - type: nauc_recall_at_1_max value: 46.630071622109526 - type: nauc_recall_at_1_std value: -14.395943134644112 - type: nauc_recall_at_20_diff1 value: 65.79470497876567 - type: nauc_recall_at_20_max value: 71.68308183488456 - type: nauc_recall_at_20_std value: -12.556850697268453 - type: nauc_recall_at_3_diff1 value: 68.3240211318129 - type: nauc_recall_at_3_max value: 45.05998217275036 - type: nauc_recall_at_3_std value: -14.23179772593869 - type: nauc_recall_at_5_diff1 value: 67.53366869904056 - type: nauc_recall_at_5_max value: 53.57935627081027 - type: nauc_recall_at_5_std value: -3.3271112904853393 - type: ndcg_at_1 value: 64.667 - type: ndcg_at_10 value: 78.233 - type: ndcg_at_100 value: 79.806 - type: ndcg_at_1000 value: 79.92099999999999 - type: ndcg_at_20 value: 79.006 - type: ndcg_at_3 value: 74.018 - type: ndcg_at_5 value: 76.334 - type: precision_at_1 value: 64.667 - type: precision_at_10 value: 10.4 - type: precision_at_100 value: 1.1199999999999999 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_20 value: 5.383 - type: precision_at_3 value: 29.444 - type: precision_at_5 value: 19.467000000000002 - type: recall_at_1 value: 61.49400000000001 - type: recall_at_10 value: 92.156 - type: recall_at_100 value: 99.167 - type: recall_at_1000 value: 100.0 - type: recall_at_20 value: 94.833 - type: recall_at_3 value: 80.833 - type: recall_at_5 value: 86.6 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cosine_accuracy value: 99.8039603960396 - type: cosine_accuracy_threshold value: 84.54211950302124 - type: cosine_ap value: 95.59056372734358 - type: cosine_f1 value: 90.1394422310757 - type: cosine_f1_threshold value: 84.54211950302124 - type: cosine_precision value: 89.78174603174604 - type: cosine_recall value: 90.5 - type: dot_accuracy value: 99.80594059405941 - type: dot_accuracy_threshold value: 85.57180166244507 - type: dot_ap value: 95.53453431914399 - type: dot_f1 value: 90.10442565887618 - type: dot_f1_threshold value: 84.59715843200684 - type: dot_precision value: 89.61424332344214 - type: dot_recall value: 90.60000000000001 - type: euclidean_accuracy value: 99.8039603960396 - type: euclidean_accuracy_threshold value: 53.253382444381714 - type: euclidean_ap value: 95.5850992402159 - type: euclidean_f1 value: 90.09457441513192 - type: euclidean_f1_threshold value: 55.725520849227905 - type: euclidean_precision value: 89.69276511397423 - type: euclidean_recall value: 90.5 - type: main_score value: 95.7485189884476 - type: manhattan_accuracy value: 99.81485148514851 - type: manhattan_accuracy_threshold value: 3491.29638671875 - type: manhattan_ap value: 95.7485189884476 - type: manhattan_f1 value: 90.464048954615 - type: manhattan_f1_threshold value: 3491.29638671875 - type: manhattan_precision value: 92.2996878251821 - type: manhattan_recall value: 88.7 - type: max_ap value: 95.7485189884476 - type: max_f1 value: 90.464048954615 - type: max_precision value: 92.2996878251821 - type: max_recall value: 90.60000000000001 - type: similarity_accuracy value: 99.8039603960396 - type: similarity_accuracy_threshold value: 84.54211950302124 - type: similarity_ap value: 95.59056372734358 - type: similarity_f1 value: 90.1394422310757 - type: similarity_f1_threshold value: 84.54211950302124 - type: similarity_precision value: 89.78174603174604 - type: similarity_recall value: 90.5 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: main_score value: 78.49205191950675 - type: v_measure value: 78.49205191950675 - type: v_measure_std value: 2.84869550699959 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: main_score value: 48.90421736513028 - type: v_measure value: 48.90421736513028 - type: v_measure_std value: 1.6875865714471023 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: main_score value: 52.9874730481696 - type: map value: 52.9874730481696 - type: mrr value: 53.85867604617604 - type: nAUC_map_diff1 value: 39.633429293407616 - type: nAUC_map_max value: 10.236807988858546 - type: nAUC_map_std value: 10.276522217929674 - type: nAUC_mrr_diff1 value: 40.0543079218377 - type: nAUC_mrr_max value: 10.96209807382042 - type: nAUC_mrr_std value: 10.524400196109918 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cosine_pearson value: 30.727801109114232 - type: cosine_spearman value: 31.66058223980157 - type: dot_pearson value: 30.78818248622866 - type: dot_spearman value: 31.525158776890265 - type: main_score value: 31.66058223980157 - type: pearson value: 30.727801109114232 - type: spearman value: 31.66058223980157 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: mteb/trec-covid config: default split: test revision: bb9466bac8153a0349341eb1b22e06409e78ef4e metrics: - type: main_score value: 85.206 - type: map_at_1 value: 0.246 - type: map_at_10 value: 2.1950000000000003 - type: map_at_100 value: 14.179 - type: map_at_1000 value: 35.037 - type: map_at_20 value: 4.143 - type: map_at_3 value: 0.7100000000000001 - type: map_at_5 value: 1.135 - type: mrr_at_1 value: 94.0 - type: mrr_at_10 value: 96.66666666666666 - type: mrr_at_100 value: 96.66666666666666 - type: mrr_at_1000 value: 96.66666666666666 - type: mrr_at_20 value: 96.66666666666666 - type: mrr_at_3 value: 96.66666666666666 - type: mrr_at_5 value: 96.66666666666666 - type: nauc_map_at_1000_diff1 value: -4.6264497624527525 - type: nauc_map_at_1000_max value: 44.594457564749355 - type: nauc_map_at_1000_std value: 73.17642341400133 - type: nauc_map_at_100_diff1 value: 23.451335157405726 - type: nauc_map_at_100_max value: 25.426398857299525 - type: nauc_map_at_100_std value: 64.07416694472633 - type: nauc_map_at_10_diff1 value: 46.57568738568346 - type: nauc_map_at_10_max value: 9.693233249079238 - type: nauc_map_at_10_std value: 28.549530265164357 - type: nauc_map_at_1_diff1 value: 53.48238396620123 - type: nauc_map_at_1_max value: 0.33476619393733076 - type: nauc_map_at_1_std value: 8.906362219128463 - type: nauc_map_at_20_diff1 value: 39.40719602207749 - type: nauc_map_at_20_max value: 9.635915072074045 - type: nauc_map_at_20_std value: 35.15634791346394 - type: nauc_map_at_3_diff1 value: 53.11784737840137 - type: nauc_map_at_3_max value: 3.059682761072153 - type: nauc_map_at_3_std value: 21.310633086556617 - type: nauc_map_at_5_diff1 value: 49.91570701185436 - type: nauc_map_at_5_max value: 8.045082896244576 - type: nauc_map_at_5_std value: 20.597686235051647 - type: nauc_mrr_at_1000_diff1 value: 41.98412698412726 - type: nauc_mrr_at_1000_max value: 78.24463118580779 - type: nauc_mrr_at_1000_std value: 0.30812324930028195 - type: nauc_mrr_at_100_diff1 value: 41.98412698412726 - type: nauc_mrr_at_100_max value: 78.24463118580779 - type: nauc_mrr_at_100_std value: 0.30812324930028195 - type: nauc_mrr_at_10_diff1 value: 41.98412698412726 - type: nauc_mrr_at_10_max value: 78.24463118580779 - type: nauc_mrr_at_10_std value: 0.30812324930028195 - type: nauc_mrr_at_1_diff1 value: 38.62433862433873 - type: nauc_mrr_at_1_max value: 80.78120136943666 - type: nauc_mrr_at_1_std value: -10.768751945222197 - type: nauc_mrr_at_20_diff1 value: 41.98412698412726 - type: nauc_mrr_at_20_max value: 78.24463118580779 - type: nauc_mrr_at_20_std value: 0.30812324930028195 - type: nauc_mrr_at_3_diff1 value: 41.98412698412726 - type: nauc_mrr_at_3_max value: 78.24463118580779 - type: nauc_mrr_at_3_std value: 0.30812324930028195 - type: nauc_mrr_at_5_diff1 value: 41.98412698412726 - type: nauc_mrr_at_5_max value: 78.24463118580779 - type: nauc_mrr_at_5_std value: 0.30812324930028195 - type: nauc_ndcg_at_1000_diff1 value: 0.5174948602880207 - type: nauc_ndcg_at_1000_max value: 48.60686602077053 - type: nauc_ndcg_at_1000_std value: 75.72456343175277 - type: nauc_ndcg_at_100_diff1 value: -20.747252137999254 - type: nauc_ndcg_at_100_max value: 49.985132618254994 - type: nauc_ndcg_at_100_std value: 61.096383293836574 - type: nauc_ndcg_at_10_diff1 value: 6.791377920463332 - type: nauc_ndcg_at_10_max value: 57.50019332833286 - type: nauc_ndcg_at_10_std value: 49.201028841219426 - type: nauc_ndcg_at_1_diff1 value: 54.92683440362145 - type: nauc_ndcg_at_1_max value: 83.8667228129276 - type: nauc_ndcg_at_1_std value: 1.6738604063586122 - type: nauc_ndcg_at_20_diff1 value: -5.1948699196314925 - type: nauc_ndcg_at_20_max value: 54.483087684806556 - type: nauc_ndcg_at_20_std value: 50.54823818118781 - type: nauc_ndcg_at_3_diff1 value: 26.267246500164372 - type: nauc_ndcg_at_3_max value: 63.0173212926611 - type: nauc_ndcg_at_3_std value: 41.025597406368256 - type: nauc_ndcg_at_5_diff1 value: 16.910185454343036 - type: nauc_ndcg_at_5_max value: 60.9328683868778 - type: nauc_ndcg_at_5_std value: 36.70169905857712 - type: nauc_precision_at_1000_diff1 value: -46.374447765983525 - type: nauc_precision_at_1000_max value: 35.36052337813863 - type: nauc_precision_at_1000_std value: 14.219220668161018 - type: nauc_precision_at_100_diff1 value: -29.7838083657744 - type: nauc_precision_at_100_max value: 43.93589400385112 - type: nauc_precision_at_100_std value: 55.425045718579945 - type: nauc_precision_at_10_diff1 value: -12.016613405227687 - type: nauc_precision_at_10_max value: 57.79924427743131 - type: nauc_precision_at_10_std value: 49.022036703550675 - type: nauc_precision_at_1_diff1 value: 38.62433862433873 - type: nauc_precision_at_1_max value: 80.78120136943666 - type: nauc_precision_at_1_std value: -10.768751945222197 - type: nauc_precision_at_20_diff1 value: -23.95633847880195 - type: nauc_precision_at_20_max value: 48.34715917258276 - type: nauc_precision_at_20_std value: 48.82198285255887 - type: nauc_precision_at_3_diff1 value: 6.871296905858807 - type: nauc_precision_at_3_max value: 70.54805793285054 - type: nauc_precision_at_3_std value: 44.65108624094803 - type: nauc_precision_at_5_diff1 value: -9.074932448759695 - type: nauc_precision_at_5_max value: 67.41284242437573 - type: nauc_precision_at_5_std value: 23.876891983919577 - type: nauc_recall_at_1000_diff1 value: 8.142288830293255 - type: nauc_recall_at_1000_max value: 38.85182826835104 - type: nauc_recall_at_1000_std value: 68.60783819217335 - type: nauc_recall_at_100_diff1 value: 34.262914076287466 - type: nauc_recall_at_100_max value: 12.87009658528838 - type: nauc_recall_at_100_std value: 56.21330603762995 - type: nauc_recall_at_10_diff1 value: 49.33830945338758 - type: nauc_recall_at_10_max value: 0.3539875530671406 - type: nauc_recall_at_10_std value: 26.85864465557644 - type: nauc_recall_at_1_diff1 value: 53.48238396620123 - type: nauc_recall_at_1_max value: 0.33476619393733076 - type: nauc_recall_at_1_std value: 8.906362219128463 - type: nauc_recall_at_20_diff1 value: 44.21928181266254 - type: nauc_recall_at_20_max value: -0.9198356057088594 - type: nauc_recall_at_20_std value: 31.484376992896784 - type: nauc_recall_at_3_diff1 value: 53.038093080990876 - type: nauc_recall_at_3_max value: -1.4170895916973003 - type: nauc_recall_at_3_std value: 21.890202855574497 - type: nauc_recall_at_5_diff1 value: 49.39742214825278 - type: nauc_recall_at_5_max value: 2.8412267611894517 - type: nauc_recall_at_5_std value: 18.01598921859512 - type: ndcg_at_1 value: 91.0 - type: ndcg_at_10 value: 85.206 - type: ndcg_at_100 value: 67.29 - type: ndcg_at_1000 value: 60.584 - type: ndcg_at_20 value: 82.321 - type: ndcg_at_3 value: 88.642 - type: ndcg_at_5 value: 87.063 - type: precision_at_1 value: 94.0 - type: precision_at_10 value: 89.8 - type: precision_at_100 value: 69.78 - type: precision_at_1000 value: 26.738 - type: precision_at_20 value: 87.2 - type: precision_at_3 value: 92.0 - type: precision_at_5 value: 90.8 - type: recall_at_1 value: 0.246 - type: recall_at_10 value: 2.344 - type: recall_at_100 value: 16.962 - type: recall_at_1000 value: 57.325 - type: recall_at_20 value: 4.517 - type: recall_at_3 value: 0.731 - type: recall_at_5 value: 1.1780000000000002 - task: type: Retrieval dataset: name: MTEB Touche2020 type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: main_score value: 31.455 - type: map_at_1 value: 2.9739999999999998 - type: map_at_10 value: 12.183 - type: map_at_100 value: 18.772 - type: map_at_1000 value: 20.415 - type: map_at_20 value: 14.451 - type: map_at_3 value: 6.507000000000001 - type: map_at_5 value: 8.66 - type: mrr_at_1 value: 40.816326530612244 - type: mrr_at_10 value: 57.70975056689341 - type: mrr_at_100 value: 58.18379126542391 - type: mrr_at_1000 value: 58.18379126542391 - type: mrr_at_20 value: 57.85552316164561 - type: mrr_at_3 value: 54.08163265306123 - type: mrr_at_5 value: 56.42857142857143 - type: nauc_map_at_1000_diff1 value: 3.1567471051481437 - type: nauc_map_at_1000_max value: -1.5882060729791523 - type: nauc_map_at_1000_std value: 18.69622198722074 - type: nauc_map_at_100_diff1 value: 3.3449677678147536 - type: nauc_map_at_100_max value: -2.8928606866168405 - type: nauc_map_at_100_std value: 15.789984947653412 - type: nauc_map_at_10_diff1 value: 2.9696743570444264 - type: nauc_map_at_10_max value: -9.096749212011876 - type: nauc_map_at_10_std value: -5.38545817258353 - type: nauc_map_at_1_diff1 value: 20.680780404542546 - type: nauc_map_at_1_max value: -7.04722927447817 - type: nauc_map_at_1_std value: -7.062494733973898 - type: nauc_map_at_20_diff1 value: 4.070437790119271 - type: nauc_map_at_20_max value: -4.84491434686032 - type: nauc_map_at_20_std value: 0.5846341109021014 - type: nauc_map_at_3_diff1 value: 11.9634978045925 - type: nauc_map_at_3_max value: -8.27834591046608 - type: nauc_map_at_3_std value: -8.687615453381065 - type: nauc_map_at_5_diff1 value: 0.9195191526009436 - type: nauc_map_at_5_max value: -1.673813362719489 - type: nauc_map_at_5_std value: -6.67549753473631 - type: nauc_mrr_at_1000_diff1 value: 19.877993208719573 - type: nauc_mrr_at_1000_max value: -10.37776706406218 - type: nauc_mrr_at_1000_std value: 7.132169578056367 - type: nauc_mrr_at_100_diff1 value: 19.877993208719573 - type: nauc_mrr_at_100_max value: -10.37776706406218 - type: nauc_mrr_at_100_std value: 7.132169578056367 - type: nauc_mrr_at_10_diff1 value: 20.414285568401457 - type: nauc_mrr_at_10_max value: -9.677800295687861 - type: nauc_mrr_at_10_std value: 8.001103690180859 - type: nauc_mrr_at_1_diff1 value: 22.393284073955723 - type: nauc_mrr_at_1_max value: -5.889370191243167 - type: nauc_mrr_at_1_std value: -1.5183536173658247 - type: nauc_mrr_at_20_diff1 value: 20.455564720604055 - type: nauc_mrr_at_20_max value: -10.230642830103074 - type: nauc_mrr_at_20_std value: 7.863582453266621 - type: nauc_mrr_at_3_diff1 value: 17.554895390732618 - type: nauc_mrr_at_3_max value: -15.618463505555052 - type: nauc_mrr_at_3_std value: 5.913231577966864 - type: nauc_mrr_at_5_diff1 value: 18.393678507779914 - type: nauc_mrr_at_5_max value: -11.903593353147762 - type: nauc_mrr_at_5_std value: 7.580745996262831 - type: nauc_ndcg_at_1000_diff1 value: 13.746937095530473 - type: nauc_ndcg_at_1000_max value: -0.9319249687895838 - type: nauc_ndcg_at_1000_std value: 38.56328031451904 - type: nauc_ndcg_at_100_diff1 value: 13.854865944415895 - type: nauc_ndcg_at_100_max value: -7.142142012591404 - type: nauc_ndcg_at_100_std value: 35.61341954818848 - type: nauc_ndcg_at_10_diff1 value: 9.010144273248759 - type: nauc_ndcg_at_10_max value: -15.320014897424574 - type: nauc_ndcg_at_10_std value: 2.84883880489144 - type: nauc_ndcg_at_1_diff1 value: 20.939533945592967 - type: nauc_ndcg_at_1_max value: -6.387319972188946 - type: nauc_ndcg_at_1_std value: -0.5258673122126726 - type: nauc_ndcg_at_20_diff1 value: 14.660827309009496 - type: nauc_ndcg_at_20_max value: -13.476196120145994 - type: nauc_ndcg_at_20_std value: 8.22391881710838 - type: nauc_ndcg_at_3_diff1 value: 13.429985227235935 - type: nauc_ndcg_at_3_max value: -14.904544592570247 - type: nauc_ndcg_at_3_std value: 1.599779998183342 - type: nauc_ndcg_at_5_diff1 value: 8.085466231900622 - type: nauc_ndcg_at_5_max value: -9.09591969526831 - type: nauc_ndcg_at_5_std value: 3.5794092637248505 - type: nauc_precision_at_1000_diff1 value: -9.31941215946743 - type: nauc_precision_at_1000_max value: 31.52913520470716 - type: nauc_precision_at_1000_std value: 22.720784312185856 - type: nauc_precision_at_100_diff1 value: 8.958548406995279 - type: nauc_precision_at_100_max value: 15.100597910674104 - type: nauc_precision_at_100_std value: 71.04548238175113 - type: nauc_precision_at_10_diff1 value: 12.4698194690008 - type: nauc_precision_at_10_max value: -15.84870544871496 - type: nauc_precision_at_10_std value: 7.575297622501928 - type: nauc_precision_at_1_diff1 value: 22.393284073955723 - type: nauc_precision_at_1_max value: -5.889370191243167 - type: nauc_precision_at_1_std value: -1.5183536173658247 - type: nauc_precision_at_20_diff1 value: 15.393505718138758 - type: nauc_precision_at_20_max value: -3.70684298539384 - type: nauc_precision_at_20_std value: 29.426137824970304 - type: nauc_precision_at_3_diff1 value: 9.997768085465394 - type: nauc_precision_at_3_max value: -17.12224314347674 - type: nauc_precision_at_3_std value: -1.343018166772313 - type: nauc_precision_at_5_diff1 value: 3.8936997437913554 - type: nauc_precision_at_5_max value: -5.689104289687632 - type: nauc_precision_at_5_std value: 3.181098051304285 - type: nauc_recall_at_1000_diff1 value: 9.908303508158387 - type: nauc_recall_at_1000_max value: 6.174506592699848 - type: nauc_recall_at_1000_std value: 77.41931114780012 - type: nauc_recall_at_100_diff1 value: 10.286839241876192 - type: nauc_recall_at_100_max value: -6.6138697026666815 - type: nauc_recall_at_100_std value: 49.608313692633224 - type: nauc_recall_at_10_diff1 value: 2.215545846659851 - type: nauc_recall_at_10_max value: -17.83025802478445 - type: nauc_recall_at_10_std value: -3.3784768673705465 - type: nauc_recall_at_1_diff1 value: 20.680780404542546 - type: nauc_recall_at_1_max value: -7.04722927447817 - type: nauc_recall_at_1_std value: -7.062494733973898 - type: nauc_recall_at_20_diff1 value: 6.974410239251615 - type: nauc_recall_at_20_max value: -14.161147924731646 - type: nauc_recall_at_20_std value: 9.328412057721454 - type: nauc_recall_at_3_diff1 value: 7.904589805754212 - type: nauc_recall_at_3_max value: -12.1912388648593 - type: nauc_recall_at_3_std value: -9.221542013385555 - type: nauc_recall_at_5_diff1 value: -3.2604132752706914 - type: nauc_recall_at_5_max value: -6.886351441658915 - type: nauc_recall_at_5_std value: -7.014252851712789 - type: ndcg_at_1 value: 39.796 - type: ndcg_at_10 value: 31.455 - type: ndcg_at_100 value: 42.388999999999996 - type: ndcg_at_1000 value: 53.556000000000004 - type: ndcg_at_20 value: 30.808000000000003 - type: ndcg_at_3 value: 35.831 - type: ndcg_at_5 value: 32.845 - type: precision_at_1 value: 40.816 - type: precision_at_10 value: 27.143 - type: precision_at_100 value: 8.449 - type: precision_at_1000 value: 1.6179999999999999 - type: precision_at_20 value: 19.387999999999998 - type: precision_at_3 value: 35.374 - type: precision_at_5 value: 31.019999999999996 - type: recall_at_1 value: 2.9739999999999998 - type: recall_at_10 value: 19.39 - type: recall_at_100 value: 51.636 - type: recall_at_1000 value: 86.99900000000001 - type: recall_at_20 value: 26.478 - type: recall_at_3 value: 7.703 - type: recall_at_5 value: 11.42 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de metrics: - type: accuracy value: 86.9384765625 - type: ap value: 31.737513704141552 - type: ap_weighted value: 31.737513704141552 - type: f1 value: 71.5490757306975 - type: f1_weighted value: 89.14632533489856 - type: main_score value: 86.9384765625 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 73.57668364459535 - type: f1 value: 73.90467103648074 - type: f1_weighted value: 73.42158415034704 - type: main_score value: 73.57668364459535 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: main_score value: 58.574148097494685 - type: v_measure value: 58.574148097494685 - type: v_measure_std value: 0.9443161637490822 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cosine_accuracy value: 88.1385229778864 - type: cosine_accuracy_threshold value: 83.86307954788208 - type: cosine_ap value: 80.17965893449055 - type: cosine_f1 value: 73.0614300100705 - type: cosine_f1_threshold value: 80.7942807674408 - type: cosine_precision value: 69.8603755416466 - type: cosine_recall value: 76.56992084432717 - type: dot_accuracy value: 88.2100494724921 - type: dot_accuracy_threshold value: 83.84793996810913 - type: dot_ap value: 80.18603932881858 - type: dot_f1 value: 73.07643714466204 - type: dot_f1_threshold value: 80.87586164474487 - type: dot_precision value: 70.10909090909091 - type: dot_recall value: 76.3060686015831 - type: euclidean_accuracy value: 88.1385229778864 - type: euclidean_accuracy_threshold value: 56.77661895751953 - type: euclidean_ap value: 80.1784070881624 - type: euclidean_f1 value: 73.04830369529574 - type: euclidean_f1_threshold value: 61.91838979721069 - type: euclidean_precision value: 69.96859144720948 - type: euclidean_recall value: 76.41160949868075 - type: main_score value: 80.18603932881858 - type: manhattan_accuracy value: 88.0431543184121 - type: manhattan_accuracy_threshold value: 3755.6137084960938 - type: manhattan_ap value: 79.98270453664578 - type: manhattan_f1 value: 72.68242015061023 - type: manhattan_f1_threshold value: 3892.494583129883 - type: manhattan_precision value: 71.54907975460122 - type: manhattan_recall value: 73.85224274406332 - type: max_ap value: 80.18603932881858 - type: max_f1 value: 73.07643714466204 - type: max_precision value: 71.54907975460122 - type: max_recall value: 76.56992084432717 - type: similarity_accuracy value: 88.1385229778864 - type: similarity_accuracy_threshold value: 83.86307954788208 - type: similarity_ap value: 80.17965893449055 - type: similarity_f1 value: 73.0614300100705 - type: similarity_f1_threshold value: 80.7942807674408 - type: similarity_precision value: 69.8603755416466 - type: similarity_recall value: 76.56992084432717 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cosine_accuracy value: 89.7892653393876 - type: cosine_accuracy_threshold value: 79.69566583633423 - type: cosine_ap value: 87.4579867302024 - type: cosine_f1 value: 79.91620843152658 - type: cosine_f1_threshold value: 78.53609323501587 - type: cosine_precision value: 77.7155329210622 - type: cosine_recall value: 82.24514936864799 - type: dot_accuracy value: 89.78732487289945 - type: dot_accuracy_threshold value: 80.05315661430359 - type: dot_ap value: 87.44916182456272 - type: dot_f1 value: 79.90419878751591 - type: dot_f1_threshold value: 78.57890725135803 - type: dot_precision value: 77.73409057812728 - type: dot_recall value: 82.19895287958116 - type: euclidean_accuracy value: 89.78538440641131 - type: euclidean_accuracy_threshold value: 62.29925751686096 - type: euclidean_ap value: 87.45904868911386 - type: euclidean_f1 value: 79.93127404474657 - type: euclidean_f1_threshold value: 65.61101078987122 - type: euclidean_precision value: 77.62060210373595 - type: euclidean_recall value: 82.38373883584848 - type: main_score value: 87.46554314325058 - type: manhattan_accuracy value: 89.76597974152986 - type: manhattan_accuracy_threshold value: 3988.5299682617188 - type: manhattan_ap value: 87.46554314325058 - type: manhattan_f1 value: 79.97181740645973 - type: manhattan_f1_threshold value: 4235.905838012695 - type: manhattan_precision value: 77.13713427283783 - type: manhattan_recall value: 83.02279026793964 - type: max_ap value: 87.46554314325058 - type: max_f1 value: 79.97181740645973 - type: max_precision value: 77.73409057812728 - type: max_recall value: 83.02279026793964 - type: similarity_accuracy value: 89.7892653393876 - type: similarity_accuracy_threshold value: 79.69566583633423 - type: similarity_ap value: 87.4579867302024 - type: similarity_f1 value: 79.91620843152658 - type: similarity_f1_threshold value: 78.53609323501587 - type: similarity_precision value: 77.7155329210622 - type: similarity_recall value: 82.24514936864799 --- # Updates New open-source models and ToDoList will be listed on https://github.com/DunZhang/Stella/blob/main/news_and_todo.md. You can also find these models on my [homepage](https://huggingface.co/infgrad). # Introduction The models are trained based on `Alibaba-NLP/gte-large-en-v1.5` and `Alibaba-NLP/gte-Qwen2-1.5B-instruct`. Thanks for their contributions! **We simplify usage of prompts, providing two prompts for most general tasks, one is for s2p, another one is for s2s.** Prompt of s2p task(e.g. retrieve task): ```text Instruct: Given a web search query, retrieve relevant passages that answer the query.\nQuery: {query} ``` Prompt of s2s task(e.g. semantic textual similarity task): ```text Instruct: Retrieve semantically similar text.\nQuery: {query} ``` The models are finally trained by [MRL](https://arxiv.org/abs/2205.13147), so they have multiple dimensions: 512, 768, 1024, 2048, 4096, 6144 and 8192. The higher the dimension, the better the performance. **Generally speaking, 1024d is good enough.** The MTEB score of 1024d is only 0.001 lower than 8192d. # Model directory structure The model directory structure is very simple, it is a standard SentenceTransformer directory **with a series of `2_Dense_{dims}` folders**, where `dims` represents the final vector dimension. For example, the `2_Dense_256` folder stores Linear weights that convert vector dimensions to 256 dimensions. Please refer to the following chapters for specific instructions on how to use them. # Usage You can use `SentenceTransformers` or `transformers` library to encode text. ## Sentence Transformers ```python from sentence_transformers import SentenceTransformer # This model supports two prompts: "s2p_query" and "s2s_query" for sentence-to-passage and sentence-to-sentence tasks, respectively. # They are defined in `config_sentence_transformers.json` query_prompt_name = "s2p_query" queries = [ "What are some ways to reduce stress?", "What are the benefits of drinking green tea?", ] # docs do not need any prompts docs = [ "There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent stress from building up.", "Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties.", ] # !The default dimension is 1024, if you need other dimensions, please clone the model and modify `modules.json` to replace `2_Dense_1024` with another dimension, e.g. `2_Dense_256` or `2_Dense_8192` ! # on gpu model = SentenceTransformer("dunzhang/stella_en_400M_v5", trust_remote_code=True).cuda() # you can also use this model without the features of `use_memory_efficient_attention` and `unpad_inputs`. It can be worked in CPU. # model = SentenceTransformer( # "dunzhang/stella_en_400M_v5", # trust_remote_code=True, # device="cpu", # config_kwargs={"use_memory_efficient_attention": False, "unpad_inputs": False} # ) query_embeddings = model.encode(queries, prompt_name=query_prompt_name) doc_embeddings = model.encode(docs) print(query_embeddings.shape, doc_embeddings.shape) # (2, 1024) (2, 1024) similarities = model.similarity(query_embeddings, doc_embeddings) print(similarities) # tensor([[0.8398, 0.2990], # [0.3282, 0.8095]]) ``` ## Transformers ```python import os import torch from transformers import AutoModel, AutoTokenizer from sklearn.preprocessing import normalize query_prompt = "Instruct: Given a web search query, retrieve relevant passages that answer the query.\nQuery: " queries = [ "What are some ways to reduce stress?", "What are the benefits of drinking green tea?", ] queries = [query_prompt + query for query in queries] # docs do not need any prompts docs = [ "There are many effective ways to reduce stress. Some common techniques include deep breathing, meditation, and physical activity. Engaging in hobbies, spending time in nature, and connecting with loved ones can also help alleviate stress. Additionally, setting boundaries, practicing self-care, and learning to say no can prevent stress from building up.", "Green tea has been consumed for centuries and is known for its potential health benefits. It contains antioxidants that may help protect the body against damage caused by free radicals. Regular consumption of green tea has been associated with improved heart health, enhanced cognitive function, and a reduced risk of certain types of cancer. The polyphenols in green tea may also have anti-inflammatory and weight loss properties.", ] # The path of your model after cloning it model_dir = "{Your MODEL_PATH}" vector_dim = 1024 vector_linear_directory = f"2_Dense_{vector_dim}" model = AutoModel.from_pretrained(model_dir, trust_remote_code=True).cuda().eval() # you can also use this model without the features of `use_memory_efficient_attention` and `unpad_inputs`. It can be worked in CPU. # model = AutoModel.from_pretrained(model_dir, trust_remote_code=True,use_memory_efficient_attention=False,unpad_inputs=False).cuda().eval() tokenizer = AutoTokenizer.from_pretrained(model_dir, trust_remote_code=True) vector_linear = torch.nn.Linear(in_features=model.config.hidden_size, out_features=vector_dim) vector_linear_dict = { k.replace("linear.", ""): v for k, v in torch.load(os.path.join(model_dir, f"{vector_linear_directory}/pytorch_model.bin")).items() } vector_linear.load_state_dict(vector_linear_dict) vector_linear.cuda() # Embed the queries with torch.no_grad(): input_data = tokenizer(queries, padding="longest", truncation=True, max_length=512, return_tensors="pt") input_data = {k: v.cuda() for k, v in input_data.items()} attention_mask = input_data["attention_mask"] last_hidden_state = model(**input_data)[0] last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) query_vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] query_vectors = normalize(vector_linear(query_vectors).cpu().numpy()) # Embed the documents with torch.no_grad(): input_data = tokenizer(docs, padding="longest", truncation=True, max_length=512, return_tensors="pt") input_data = {k: v.cuda() for k, v in input_data.items()} attention_mask = input_data["attention_mask"] last_hidden_state = model(**input_data)[0] last_hidden = last_hidden_state.masked_fill(~attention_mask[..., None].bool(), 0.0) docs_vectors = last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] docs_vectors = normalize(vector_linear(docs_vectors).cpu().numpy()) print(query_vectors.shape, docs_vectors.shape) # (2, 1024) (2, 1024) similarities = query_vectors @ docs_vectors.T print(similarities) # [[0.8397531 0.29900077] # [0.32818374 0.80954516]] ``` # FAQ Q: The details of training? A: The training method and datasets will be released in the future. (specific time unknown, may be provided in a paper) Q: How to choose a suitable prompt for my own task? A: In most cases, please use the s2p and s2s prompts. These two prompts account for the vast majority of the training data. Q: How to reproduce MTEB results? A: Please use evaluation scripts in `Alibaba-NLP/gte-Qwen2-1.5B-instruct` or `intfloat/e5-mistral-7b-instruct` Q: Why each dimension has a linear weight? A: MRL has multiple training methods, we choose this method which has the best performance. Q: What is the sequence length of models? A: 512 is recommended, in our experiments, almost all models perform poorly on specialized long text retrieval datasets. Besides, the model is trained on datasets of 512 length. This may be an optimization term. If you have any questions, please start a discussion on community.
[ "BIOSSES", "SCIFACT" ]
pingkeest/learning2_model
pingkeest
sentence-similarity
[ "transformers", "safetensors", "new", "feature-extraction", "sentence-transformers", "gte", "mteb", "transformers.js", "sentence-similarity", "custom_code", "en", "dataset:allenai/c4", "arxiv:2407.19669", "arxiv:2308.03281", "license:apache-2.0", "model-index", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-10-25T09:58:09Z
2024-10-25T10:00:18+00:00
18
2
--- datasets: - allenai/c4 language: - en library_name: transformers license: apache-2.0 tags: - sentence-transformers - gte - mteb - transformers.js - sentence-similarity model-index: - name: gte-large-en-v1.5 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.01492537313432 - type: ap value: 35.05341696659522 - type: f1 value: 66.71270310883853 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 93.97189999999999 - type: ap value: 90.5952493948908 - type: f1 value: 93.95848137716877 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 54.196 - type: f1 value: 53.80122334012787 - task: type: Retrieval dataset: name: MTEB ArguAna type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: map_at_1 value: 47.297 - type: map_at_10 value: 64.303 - type: map_at_100 value: 64.541 - type: map_at_1000 value: 64.541 - type: map_at_3 value: 60.728 - type: map_at_5 value: 63.114000000000004 - type: mrr_at_1 value: 48.435 - type: mrr_at_10 value: 64.657 - type: mrr_at_100 value: 64.901 - type: mrr_at_1000 value: 64.901 - type: mrr_at_3 value: 61.06 - type: mrr_at_5 value: 63.514 - type: ndcg_at_1 value: 47.297 - type: ndcg_at_10 value: 72.107 - type: ndcg_at_100 value: 72.963 - type: ndcg_at_1000 value: 72.963 - type: ndcg_at_3 value: 65.063 - type: ndcg_at_5 value: 69.352 - type: precision_at_1 value: 47.297 - type: precision_at_10 value: 9.623 - type: precision_at_100 value: 0.996 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 25.865 - type: precision_at_5 value: 17.596 - type: recall_at_1 value: 47.297 - type: recall_at_10 value: 96.23 - type: recall_at_100 value: 99.644 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 77.596 - type: recall_at_5 value: 87.98 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.467787861077475 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 43.39198391914257 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 63.12794820591384 - type: mrr value: 75.9331442641692 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 87.85062993863319 - type: cos_sim_spearman value: 85.39049989733459 - type: euclidean_pearson value: 86.00222680278333 - type: euclidean_spearman value: 85.45556162077396 - type: manhattan_pearson value: 85.88769871785621 - type: manhattan_spearman value: 85.11760211290839 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.32792207792208 - type: f1 value: 87.29132945999555 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 40.5779328301945 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 37.94425623865118 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: mteb/cqadupstack-android config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: map_at_1 value: 32.978 - type: map_at_10 value: 44.45 - type: map_at_100 value: 46.19 - type: map_at_1000 value: 46.303 - type: map_at_3 value: 40.849000000000004 - type: map_at_5 value: 42.55 - type: mrr_at_1 value: 40.629 - type: mrr_at_10 value: 50.848000000000006 - type: mrr_at_100 value: 51.669 - type: mrr_at_1000 value: 51.705 - type: mrr_at_3 value: 47.997 - type: mrr_at_5 value: 49.506 - type: ndcg_at_1 value: 40.629 - type: ndcg_at_10 value: 51.102000000000004 - type: ndcg_at_100 value: 57.159000000000006 - type: ndcg_at_1000 value: 58.669000000000004 - type: ndcg_at_3 value: 45.738 - type: ndcg_at_5 value: 47.632999999999996 - type: precision_at_1 value: 40.629 - type: precision_at_10 value: 9.700000000000001 - type: precision_at_100 value: 1.5970000000000002 - type: precision_at_1000 value: 0.202 - type: precision_at_3 value: 21.698 - type: precision_at_5 value: 15.393 - type: recall_at_1 value: 32.978 - type: recall_at_10 value: 63.711 - type: recall_at_100 value: 88.39399999999999 - type: recall_at_1000 value: 97.513 - type: recall_at_3 value: 48.025 - type: recall_at_5 value: 53.52 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval type: mteb/cqadupstack-english config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: map_at_1 value: 30.767 - type: map_at_10 value: 42.195 - type: map_at_100 value: 43.541999999999994 - type: map_at_1000 value: 43.673 - type: map_at_3 value: 38.561 - type: map_at_5 value: 40.532000000000004 - type: mrr_at_1 value: 38.79 - type: mrr_at_10 value: 48.021 - type: mrr_at_100 value: 48.735 - type: mrr_at_1000 value: 48.776 - type: mrr_at_3 value: 45.594 - type: mrr_at_5 value: 46.986 - type: ndcg_at_1 value: 38.79 - type: ndcg_at_10 value: 48.468 - type: ndcg_at_100 value: 53.037 - type: ndcg_at_1000 value: 55.001999999999995 - type: ndcg_at_3 value: 43.409 - type: ndcg_at_5 value: 45.654 - type: precision_at_1 value: 38.79 - type: precision_at_10 value: 9.452 - type: precision_at_100 value: 1.518 - type: precision_at_1000 value: 0.201 - type: precision_at_3 value: 21.21 - type: precision_at_5 value: 15.171999999999999 - type: recall_at_1 value: 30.767 - type: recall_at_10 value: 60.118 - type: recall_at_100 value: 79.271 - type: recall_at_1000 value: 91.43299999999999 - type: recall_at_3 value: 45.36 - type: recall_at_5 value: 51.705 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval type: mteb/cqadupstack-gaming config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: map_at_1 value: 40.007 - type: map_at_10 value: 53.529 - type: map_at_100 value: 54.602 - type: map_at_1000 value: 54.647 - type: map_at_3 value: 49.951 - type: map_at_5 value: 52.066 - type: mrr_at_1 value: 45.705 - type: mrr_at_10 value: 56.745000000000005 - type: mrr_at_100 value: 57.43899999999999 - type: mrr_at_1000 value: 57.462999999999994 - type: mrr_at_3 value: 54.25299999999999 - type: mrr_at_5 value: 55.842000000000006 - type: ndcg_at_1 value: 45.705 - type: ndcg_at_10 value: 59.809 - type: ndcg_at_100 value: 63.837999999999994 - type: ndcg_at_1000 value: 64.729 - type: ndcg_at_3 value: 53.994 - type: ndcg_at_5 value: 57.028 - type: precision_at_1 value: 45.705 - type: precision_at_10 value: 9.762 - type: precision_at_100 value: 1.275 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 24.368000000000002 - type: precision_at_5 value: 16.84 - type: recall_at_1 value: 40.007 - type: recall_at_10 value: 75.017 - type: recall_at_100 value: 91.99000000000001 - type: recall_at_1000 value: 98.265 - type: recall_at_3 value: 59.704 - type: recall_at_5 value: 67.109 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval type: mteb/cqadupstack-gis config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: map_at_1 value: 26.639000000000003 - type: map_at_10 value: 35.926 - type: map_at_100 value: 37.126999999999995 - type: map_at_1000 value: 37.202 - type: map_at_3 value: 32.989000000000004 - type: map_at_5 value: 34.465 - type: mrr_at_1 value: 28.475 - type: mrr_at_10 value: 37.7 - type: mrr_at_100 value: 38.753 - type: mrr_at_1000 value: 38.807 - type: mrr_at_3 value: 35.066 - type: mrr_at_5 value: 36.512 - type: ndcg_at_1 value: 28.475 - type: ndcg_at_10 value: 41.245 - type: ndcg_at_100 value: 46.814 - type: ndcg_at_1000 value: 48.571 - type: ndcg_at_3 value: 35.528999999999996 - type: ndcg_at_5 value: 38.066 - type: precision_at_1 value: 28.475 - type: precision_at_10 value: 6.497 - type: precision_at_100 value: 0.9650000000000001 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 15.065999999999999 - type: precision_at_5 value: 10.599 - type: recall_at_1 value: 26.639000000000003 - type: recall_at_10 value: 55.759 - type: recall_at_100 value: 80.913 - type: recall_at_1000 value: 93.929 - type: recall_at_3 value: 40.454 - type: recall_at_5 value: 46.439 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval type: mteb/cqadupstack-mathematica config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: map_at_1 value: 15.767999999999999 - type: map_at_10 value: 24.811 - type: map_at_100 value: 26.064999999999998 - type: map_at_1000 value: 26.186999999999998 - type: map_at_3 value: 21.736 - type: map_at_5 value: 23.283 - type: mrr_at_1 value: 19.527 - type: mrr_at_10 value: 29.179 - type: mrr_at_100 value: 30.153999999999996 - type: mrr_at_1000 value: 30.215999999999998 - type: mrr_at_3 value: 26.223000000000003 - type: mrr_at_5 value: 27.733999999999998 - type: ndcg_at_1 value: 19.527 - type: ndcg_at_10 value: 30.786 - type: ndcg_at_100 value: 36.644 - type: ndcg_at_1000 value: 39.440999999999995 - type: ndcg_at_3 value: 24.958 - type: ndcg_at_5 value: 27.392 - type: precision_at_1 value: 19.527 - type: precision_at_10 value: 5.995 - type: precision_at_100 value: 1.03 - type: precision_at_1000 value: 0.14100000000000001 - type: precision_at_3 value: 12.520999999999999 - type: precision_at_5 value: 9.129 - type: recall_at_1 value: 15.767999999999999 - type: recall_at_10 value: 44.824000000000005 - type: recall_at_100 value: 70.186 - type: recall_at_1000 value: 89.934 - type: recall_at_3 value: 28.607 - type: recall_at_5 value: 34.836 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval type: mteb/cqadupstack-physics config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: map_at_1 value: 31.952 - type: map_at_10 value: 44.438 - type: map_at_100 value: 45.778 - type: map_at_1000 value: 45.883 - type: map_at_3 value: 41.044000000000004 - type: map_at_5 value: 42.986000000000004 - type: mrr_at_1 value: 39.172000000000004 - type: mrr_at_10 value: 49.76 - type: mrr_at_100 value: 50.583999999999996 - type: mrr_at_1000 value: 50.621 - type: mrr_at_3 value: 47.353 - type: mrr_at_5 value: 48.739 - type: ndcg_at_1 value: 39.172000000000004 - type: ndcg_at_10 value: 50.760000000000005 - type: ndcg_at_100 value: 56.084 - type: ndcg_at_1000 value: 57.865 - type: ndcg_at_3 value: 45.663 - type: ndcg_at_5 value: 48.178 - type: precision_at_1 value: 39.172000000000004 - type: precision_at_10 value: 9.22 - type: precision_at_100 value: 1.387 - type: precision_at_1000 value: 0.17099999999999999 - type: precision_at_3 value: 21.976000000000003 - type: precision_at_5 value: 15.457 - type: recall_at_1 value: 31.952 - type: recall_at_10 value: 63.900999999999996 - type: recall_at_100 value: 85.676 - type: recall_at_1000 value: 97.03699999999999 - type: recall_at_3 value: 49.781 - type: recall_at_5 value: 56.330000000000005 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval type: mteb/cqadupstack-programmers config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: map_at_1 value: 25.332 - type: map_at_10 value: 36.874 - type: map_at_100 value: 38.340999999999994 - type: map_at_1000 value: 38.452 - type: map_at_3 value: 33.068 - type: map_at_5 value: 35.324 - type: mrr_at_1 value: 30.822 - type: mrr_at_10 value: 41.641 - type: mrr_at_100 value: 42.519 - type: mrr_at_1000 value: 42.573 - type: mrr_at_3 value: 38.413000000000004 - type: mrr_at_5 value: 40.542 - type: ndcg_at_1 value: 30.822 - type: ndcg_at_10 value: 43.414 - type: ndcg_at_100 value: 49.196 - type: ndcg_at_1000 value: 51.237 - type: ndcg_at_3 value: 37.230000000000004 - type: ndcg_at_5 value: 40.405 - type: precision_at_1 value: 30.822 - type: precision_at_10 value: 8.379 - type: precision_at_100 value: 1.315 - type: precision_at_1000 value: 0.168 - type: precision_at_3 value: 18.417 - type: precision_at_5 value: 13.744 - type: recall_at_1 value: 25.332 - type: recall_at_10 value: 57.774 - type: recall_at_100 value: 82.071 - type: recall_at_1000 value: 95.60600000000001 - type: recall_at_3 value: 40.722 - type: recall_at_5 value: 48.754999999999995 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: mteb/cqadupstack config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: map_at_1 value: 25.91033333333334 - type: map_at_10 value: 36.23225000000001 - type: map_at_100 value: 37.55766666666667 - type: map_at_1000 value: 37.672583333333336 - type: map_at_3 value: 32.95666666666667 - type: map_at_5 value: 34.73375 - type: mrr_at_1 value: 30.634 - type: mrr_at_10 value: 40.19449999999999 - type: mrr_at_100 value: 41.099250000000005 - type: mrr_at_1000 value: 41.15091666666667 - type: mrr_at_3 value: 37.4615 - type: mrr_at_5 value: 39.00216666666667 - type: ndcg_at_1 value: 30.634 - type: ndcg_at_10 value: 42.162166666666664 - type: ndcg_at_100 value: 47.60708333333333 - type: ndcg_at_1000 value: 49.68616666666666 - type: ndcg_at_3 value: 36.60316666666666 - type: ndcg_at_5 value: 39.15616666666668 - type: precision_at_1 value: 30.634 - type: precision_at_10 value: 7.6193333333333335 - type: precision_at_100 value: 1.2198333333333333 - type: precision_at_1000 value: 0.15975000000000003 - type: precision_at_3 value: 17.087 - type: precision_at_5 value: 12.298333333333334 - type: recall_at_1 value: 25.91033333333334 - type: recall_at_10 value: 55.67300000000001 - type: recall_at_100 value: 79.20608333333334 - type: recall_at_1000 value: 93.34866666666667 - type: recall_at_3 value: 40.34858333333333 - type: recall_at_5 value: 46.834083333333325 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval type: mteb/cqadupstack-stats config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: map_at_1 value: 25.006 - type: map_at_10 value: 32.177 - type: map_at_100 value: 33.324999999999996 - type: map_at_1000 value: 33.419 - type: map_at_3 value: 29.952 - type: map_at_5 value: 31.095 - type: mrr_at_1 value: 28.066999999999997 - type: mrr_at_10 value: 34.995 - type: mrr_at_100 value: 35.978 - type: mrr_at_1000 value: 36.042 - type: mrr_at_3 value: 33.103 - type: mrr_at_5 value: 34.001 - type: ndcg_at_1 value: 28.066999999999997 - type: ndcg_at_10 value: 36.481 - type: ndcg_at_100 value: 42.022999999999996 - type: ndcg_at_1000 value: 44.377 - type: ndcg_at_3 value: 32.394 - type: ndcg_at_5 value: 34.108 - type: precision_at_1 value: 28.066999999999997 - type: precision_at_10 value: 5.736 - type: precision_at_100 value: 0.9259999999999999 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 13.804 - type: precision_at_5 value: 9.508999999999999 - type: recall_at_1 value: 25.006 - type: recall_at_10 value: 46.972 - type: recall_at_100 value: 72.138 - type: recall_at_1000 value: 89.479 - type: recall_at_3 value: 35.793 - type: recall_at_5 value: 39.947 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval type: mteb/cqadupstack-tex config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: map_at_1 value: 16.07 - type: map_at_10 value: 24.447 - type: map_at_100 value: 25.685999999999996 - type: map_at_1000 value: 25.813999999999997 - type: map_at_3 value: 21.634 - type: map_at_5 value: 23.133 - type: mrr_at_1 value: 19.580000000000002 - type: mrr_at_10 value: 28.127999999999997 - type: mrr_at_100 value: 29.119 - type: mrr_at_1000 value: 29.192 - type: mrr_at_3 value: 25.509999999999998 - type: mrr_at_5 value: 26.878 - type: ndcg_at_1 value: 19.580000000000002 - type: ndcg_at_10 value: 29.804000000000002 - type: ndcg_at_100 value: 35.555 - type: ndcg_at_1000 value: 38.421 - type: ndcg_at_3 value: 24.654999999999998 - type: ndcg_at_5 value: 26.881 - type: precision_at_1 value: 19.580000000000002 - type: precision_at_10 value: 5.736 - type: precision_at_100 value: 1.005 - type: precision_at_1000 value: 0.145 - type: precision_at_3 value: 12.033000000000001 - type: precision_at_5 value: 8.871 - type: recall_at_1 value: 16.07 - type: recall_at_10 value: 42.364000000000004 - type: recall_at_100 value: 68.01899999999999 - type: recall_at_1000 value: 88.122 - type: recall_at_3 value: 27.846 - type: recall_at_5 value: 33.638 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval type: mteb/cqadupstack-unix config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: map_at_1 value: 26.365 - type: map_at_10 value: 36.591 - type: map_at_100 value: 37.730000000000004 - type: map_at_1000 value: 37.84 - type: map_at_3 value: 33.403 - type: map_at_5 value: 35.272999999999996 - type: mrr_at_1 value: 30.503999999999998 - type: mrr_at_10 value: 39.940999999999995 - type: mrr_at_100 value: 40.818 - type: mrr_at_1000 value: 40.876000000000005 - type: mrr_at_3 value: 37.065 - type: mrr_at_5 value: 38.814 - type: ndcg_at_1 value: 30.503999999999998 - type: ndcg_at_10 value: 42.185 - type: ndcg_at_100 value: 47.416000000000004 - type: ndcg_at_1000 value: 49.705 - type: ndcg_at_3 value: 36.568 - type: ndcg_at_5 value: 39.416000000000004 - type: precision_at_1 value: 30.503999999999998 - type: precision_at_10 value: 7.276000000000001 - type: precision_at_100 value: 1.118 - type: precision_at_1000 value: 0.14300000000000002 - type: precision_at_3 value: 16.729 - type: precision_at_5 value: 12.107999999999999 - type: recall_at_1 value: 26.365 - type: recall_at_10 value: 55.616 - type: recall_at_100 value: 78.129 - type: recall_at_1000 value: 93.95599999999999 - type: recall_at_3 value: 40.686 - type: recall_at_5 value: 47.668 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval type: mteb/cqadupstack-webmasters config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: map_at_1 value: 22.750999999999998 - type: map_at_10 value: 33.446 - type: map_at_100 value: 35.235 - type: map_at_1000 value: 35.478 - type: map_at_3 value: 29.358 - type: map_at_5 value: 31.525 - type: mrr_at_1 value: 27.668 - type: mrr_at_10 value: 37.694 - type: mrr_at_100 value: 38.732 - type: mrr_at_1000 value: 38.779 - type: mrr_at_3 value: 34.223 - type: mrr_at_5 value: 36.08 - type: ndcg_at_1 value: 27.668 - type: ndcg_at_10 value: 40.557 - type: ndcg_at_100 value: 46.605999999999995 - type: ndcg_at_1000 value: 48.917 - type: ndcg_at_3 value: 33.677 - type: ndcg_at_5 value: 36.85 - type: precision_at_1 value: 27.668 - type: precision_at_10 value: 8.3 - type: precision_at_100 value: 1.6260000000000001 - type: precision_at_1000 value: 0.253 - type: precision_at_3 value: 16.008 - type: precision_at_5 value: 12.292 - type: recall_at_1 value: 22.750999999999998 - type: recall_at_10 value: 55.643 - type: recall_at_100 value: 82.151 - type: recall_at_1000 value: 95.963 - type: recall_at_3 value: 36.623 - type: recall_at_5 value: 44.708 - task: type: Retrieval dataset: name: MTEB CQADupstackWordpressRetrieval type: mteb/cqadupstack-wordpress config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: map_at_1 value: 17.288999999999998 - type: map_at_10 value: 25.903 - type: map_at_100 value: 27.071 - type: map_at_1000 value: 27.173000000000002 - type: map_at_3 value: 22.935 - type: map_at_5 value: 24.573 - type: mrr_at_1 value: 18.669 - type: mrr_at_10 value: 27.682000000000002 - type: mrr_at_100 value: 28.691 - type: mrr_at_1000 value: 28.761 - type: mrr_at_3 value: 24.738 - type: mrr_at_5 value: 26.392 - type: ndcg_at_1 value: 18.669 - type: ndcg_at_10 value: 31.335 - type: ndcg_at_100 value: 36.913000000000004 - type: ndcg_at_1000 value: 39.300000000000004 - type: ndcg_at_3 value: 25.423000000000002 - type: ndcg_at_5 value: 28.262999999999998 - type: precision_at_1 value: 18.669 - type: precision_at_10 value: 5.379 - type: precision_at_100 value: 0.876 - type: precision_at_1000 value: 0.11900000000000001 - type: precision_at_3 value: 11.214 - type: precision_at_5 value: 8.466 - type: recall_at_1 value: 17.288999999999998 - type: recall_at_10 value: 46.377 - type: recall_at_100 value: 71.53500000000001 - type: recall_at_1000 value: 88.947 - type: recall_at_3 value: 30.581999999999997 - type: recall_at_5 value: 37.354 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: map_at_1 value: 21.795 - type: map_at_10 value: 37.614999999999995 - type: map_at_100 value: 40.037 - type: map_at_1000 value: 40.184999999999995 - type: map_at_3 value: 32.221 - type: map_at_5 value: 35.154999999999994 - type: mrr_at_1 value: 50.358000000000004 - type: mrr_at_10 value: 62.129 - type: mrr_at_100 value: 62.613 - type: mrr_at_1000 value: 62.62 - type: mrr_at_3 value: 59.272999999999996 - type: mrr_at_5 value: 61.138999999999996 - type: ndcg_at_1 value: 50.358000000000004 - type: ndcg_at_10 value: 48.362 - type: ndcg_at_100 value: 55.932 - type: ndcg_at_1000 value: 58.062999999999995 - type: ndcg_at_3 value: 42.111 - type: ndcg_at_5 value: 44.063 - type: precision_at_1 value: 50.358000000000004 - type: precision_at_10 value: 14.677999999999999 - type: precision_at_100 value: 2.2950000000000004 - type: precision_at_1000 value: 0.271 - type: precision_at_3 value: 31.77 - type: precision_at_5 value: 23.375 - type: recall_at_1 value: 21.795 - type: recall_at_10 value: 53.846000000000004 - type: recall_at_100 value: 78.952 - type: recall_at_1000 value: 90.41900000000001 - type: recall_at_3 value: 37.257 - type: recall_at_5 value: 44.661 - task: type: Retrieval dataset: name: MTEB DBPedia type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: map_at_1 value: 9.728 - type: map_at_10 value: 22.691 - type: map_at_100 value: 31.734 - type: map_at_1000 value: 33.464 - type: map_at_3 value: 16.273 - type: map_at_5 value: 19.016 - type: mrr_at_1 value: 73.25 - type: mrr_at_10 value: 80.782 - type: mrr_at_100 value: 81.01899999999999 - type: mrr_at_1000 value: 81.021 - type: mrr_at_3 value: 79.583 - type: mrr_at_5 value: 80.146 - type: ndcg_at_1 value: 59.62499999999999 - type: ndcg_at_10 value: 46.304 - type: ndcg_at_100 value: 51.23 - type: ndcg_at_1000 value: 58.048 - type: ndcg_at_3 value: 51.541000000000004 - type: ndcg_at_5 value: 48.635 - type: precision_at_1 value: 73.25 - type: precision_at_10 value: 36.375 - type: precision_at_100 value: 11.53 - type: precision_at_1000 value: 2.23 - type: precision_at_3 value: 55.583000000000006 - type: precision_at_5 value: 47.15 - type: recall_at_1 value: 9.728 - type: recall_at_10 value: 28.793999999999997 - type: recall_at_100 value: 57.885 - type: recall_at_1000 value: 78.759 - type: recall_at_3 value: 17.79 - type: recall_at_5 value: 21.733 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 46.775 - type: f1 value: 41.89794273264891 - task: type: Retrieval dataset: name: MTEB FEVER type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: map_at_1 value: 85.378 - type: map_at_10 value: 91.51 - type: map_at_100 value: 91.666 - type: map_at_1000 value: 91.676 - type: map_at_3 value: 90.757 - type: map_at_5 value: 91.277 - type: mrr_at_1 value: 91.839 - type: mrr_at_10 value: 95.49 - type: mrr_at_100 value: 95.493 - type: mrr_at_1000 value: 95.493 - type: mrr_at_3 value: 95.345 - type: mrr_at_5 value: 95.47200000000001 - type: ndcg_at_1 value: 91.839 - type: ndcg_at_10 value: 93.806 - type: ndcg_at_100 value: 94.255 - type: ndcg_at_1000 value: 94.399 - type: ndcg_at_3 value: 93.027 - type: ndcg_at_5 value: 93.51 - type: precision_at_1 value: 91.839 - type: precision_at_10 value: 10.93 - type: precision_at_100 value: 1.1400000000000001 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 34.873 - type: precision_at_5 value: 21.44 - type: recall_at_1 value: 85.378 - type: recall_at_10 value: 96.814 - type: recall_at_100 value: 98.386 - type: recall_at_1000 value: 99.21600000000001 - type: recall_at_3 value: 94.643 - type: recall_at_5 value: 95.976 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: map_at_1 value: 32.190000000000005 - type: map_at_10 value: 53.605000000000004 - type: map_at_100 value: 55.550999999999995 - type: map_at_1000 value: 55.665 - type: map_at_3 value: 46.62 - type: map_at_5 value: 50.517999999999994 - type: mrr_at_1 value: 60.34 - type: mrr_at_10 value: 70.775 - type: mrr_at_100 value: 71.238 - type: mrr_at_1000 value: 71.244 - type: mrr_at_3 value: 68.72399999999999 - type: mrr_at_5 value: 69.959 - type: ndcg_at_1 value: 60.34 - type: ndcg_at_10 value: 63.226000000000006 - type: ndcg_at_100 value: 68.60300000000001 - type: ndcg_at_1000 value: 69.901 - type: ndcg_at_3 value: 58.048 - type: ndcg_at_5 value: 59.789 - type: precision_at_1 value: 60.34 - type: precision_at_10 value: 17.130000000000003 - type: precision_at_100 value: 2.29 - type: precision_at_1000 value: 0.256 - type: precision_at_3 value: 38.323 - type: precision_at_5 value: 27.87 - type: recall_at_1 value: 32.190000000000005 - type: recall_at_10 value: 73.041 - type: recall_at_100 value: 91.31 - type: recall_at_1000 value: 98.104 - type: recall_at_3 value: 53.70399999999999 - type: recall_at_5 value: 62.358999999999995 - task: type: Retrieval dataset: name: MTEB HotpotQA type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: map_at_1 value: 43.511 - type: map_at_10 value: 58.15 - type: map_at_100 value: 58.95399999999999 - type: map_at_1000 value: 59.018 - type: map_at_3 value: 55.31700000000001 - type: map_at_5 value: 57.04900000000001 - type: mrr_at_1 value: 87.022 - type: mrr_at_10 value: 91.32000000000001 - type: mrr_at_100 value: 91.401 - type: mrr_at_1000 value: 91.403 - type: mrr_at_3 value: 90.77 - type: mrr_at_5 value: 91.156 - type: ndcg_at_1 value: 87.022 - type: ndcg_at_10 value: 68.183 - type: ndcg_at_100 value: 70.781 - type: ndcg_at_1000 value: 72.009 - type: ndcg_at_3 value: 64.334 - type: ndcg_at_5 value: 66.449 - type: precision_at_1 value: 87.022 - type: precision_at_10 value: 13.406 - type: precision_at_100 value: 1.542 - type: precision_at_1000 value: 0.17099999999999999 - type: precision_at_3 value: 39.023 - type: precision_at_5 value: 25.080000000000002 - type: recall_at_1 value: 43.511 - type: recall_at_10 value: 67.02900000000001 - type: recall_at_100 value: 77.11 - type: recall_at_1000 value: 85.294 - type: recall_at_3 value: 58.535000000000004 - type: recall_at_5 value: 62.70099999999999 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 92.0996 - type: ap value: 87.86206089096373 - type: f1 value: 92.07554547510763 - task: type: Retrieval dataset: name: MTEB MSMARCO type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: map_at_1 value: 23.179 - type: map_at_10 value: 35.86 - type: map_at_100 value: 37.025999999999996 - type: map_at_1000 value: 37.068 - type: map_at_3 value: 31.921 - type: map_at_5 value: 34.172000000000004 - type: mrr_at_1 value: 23.926 - type: mrr_at_10 value: 36.525999999999996 - type: mrr_at_100 value: 37.627 - type: mrr_at_1000 value: 37.665 - type: mrr_at_3 value: 32.653 - type: mrr_at_5 value: 34.897 - type: ndcg_at_1 value: 23.910999999999998 - type: ndcg_at_10 value: 42.927 - type: ndcg_at_100 value: 48.464 - type: ndcg_at_1000 value: 49.533 - type: ndcg_at_3 value: 34.910000000000004 - type: ndcg_at_5 value: 38.937 - type: precision_at_1 value: 23.910999999999998 - type: precision_at_10 value: 6.758 - type: precision_at_100 value: 0.9520000000000001 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.838000000000001 - type: precision_at_5 value: 10.934000000000001 - type: recall_at_1 value: 23.179 - type: recall_at_10 value: 64.622 - type: recall_at_100 value: 90.135 - type: recall_at_1000 value: 98.301 - type: recall_at_3 value: 42.836999999999996 - type: recall_at_5 value: 52.512 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 96.59598723210215 - type: f1 value: 96.41913500001952 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 82.89557683538533 - type: f1 value: 63.379319722356264 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 78.93745796906524 - type: f1 value: 75.71616541785902 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 81.41223940820443 - type: f1 value: 81.2877893719078 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 35.03682528325662 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 32.942529406124 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.459949660460317 - type: mrr value: 32.70509582031616 - task: type: Retrieval dataset: name: MTEB NFCorpus type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: map_at_1 value: 6.497 - type: map_at_10 value: 13.843 - type: map_at_100 value: 17.713 - type: map_at_1000 value: 19.241 - type: map_at_3 value: 10.096 - type: map_at_5 value: 11.85 - type: mrr_at_1 value: 48.916 - type: mrr_at_10 value: 57.764 - type: mrr_at_100 value: 58.251 - type: mrr_at_1000 value: 58.282999999999994 - type: mrr_at_3 value: 55.623999999999995 - type: mrr_at_5 value: 57.018 - type: ndcg_at_1 value: 46.594 - type: ndcg_at_10 value: 36.945 - type: ndcg_at_100 value: 34.06 - type: ndcg_at_1000 value: 43.05 - type: ndcg_at_3 value: 41.738 - type: ndcg_at_5 value: 39.330999999999996 - type: precision_at_1 value: 48.916 - type: precision_at_10 value: 27.43 - type: precision_at_100 value: 8.616 - type: precision_at_1000 value: 2.155 - type: precision_at_3 value: 39.112 - type: precision_at_5 value: 33.808 - type: recall_at_1 value: 6.497 - type: recall_at_10 value: 18.163 - type: recall_at_100 value: 34.566 - type: recall_at_1000 value: 67.15 - type: recall_at_3 value: 11.100999999999999 - type: recall_at_5 value: 14.205000000000002 - task: type: Retrieval dataset: name: MTEB NQ type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: map_at_1 value: 31.916 - type: map_at_10 value: 48.123 - type: map_at_100 value: 49.103 - type: map_at_1000 value: 49.131 - type: map_at_3 value: 43.711 - type: map_at_5 value: 46.323 - type: mrr_at_1 value: 36.181999999999995 - type: mrr_at_10 value: 50.617999999999995 - type: mrr_at_100 value: 51.329 - type: mrr_at_1000 value: 51.348000000000006 - type: mrr_at_3 value: 47.010999999999996 - type: mrr_at_5 value: 49.175000000000004 - type: ndcg_at_1 value: 36.181999999999995 - type: ndcg_at_10 value: 56.077999999999996 - type: ndcg_at_100 value: 60.037 - type: ndcg_at_1000 value: 60.63499999999999 - type: ndcg_at_3 value: 47.859 - type: ndcg_at_5 value: 52.178999999999995 - type: precision_at_1 value: 36.181999999999995 - type: precision_at_10 value: 9.284 - type: precision_at_100 value: 1.149 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 22.006999999999998 - type: precision_at_5 value: 15.695 - type: recall_at_1 value: 31.916 - type: recall_at_10 value: 77.771 - type: recall_at_100 value: 94.602 - type: recall_at_1000 value: 98.967 - type: recall_at_3 value: 56.528 - type: recall_at_5 value: 66.527 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: mteb/quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.486 - type: map_at_10 value: 85.978 - type: map_at_100 value: 86.587 - type: map_at_1000 value: 86.598 - type: map_at_3 value: 83.04899999999999 - type: map_at_5 value: 84.857 - type: mrr_at_1 value: 82.32000000000001 - type: mrr_at_10 value: 88.64 - type: mrr_at_100 value: 88.702 - type: mrr_at_1000 value: 88.702 - type: mrr_at_3 value: 87.735 - type: mrr_at_5 value: 88.36 - type: ndcg_at_1 value: 82.34 - type: ndcg_at_10 value: 89.67 - type: ndcg_at_100 value: 90.642 - type: ndcg_at_1000 value: 90.688 - type: ndcg_at_3 value: 86.932 - type: ndcg_at_5 value: 88.408 - type: precision_at_1 value: 82.34 - type: precision_at_10 value: 13.675999999999998 - type: precision_at_100 value: 1.544 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 38.24 - type: precision_at_5 value: 25.068 - type: recall_at_1 value: 71.486 - type: recall_at_10 value: 96.844 - type: recall_at_100 value: 99.843 - type: recall_at_1000 value: 99.996 - type: recall_at_3 value: 88.92099999999999 - type: recall_at_5 value: 93.215 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 59.75758437908334 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 68.03497914092789 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: mteb/scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 5.808 - type: map_at_10 value: 16.059 - type: map_at_100 value: 19.048000000000002 - type: map_at_1000 value: 19.43 - type: map_at_3 value: 10.953 - type: map_at_5 value: 13.363 - type: mrr_at_1 value: 28.7 - type: mrr_at_10 value: 42.436 - type: mrr_at_100 value: 43.599 - type: mrr_at_1000 value: 43.62 - type: mrr_at_3 value: 38.45 - type: mrr_at_5 value: 40.89 - type: ndcg_at_1 value: 28.7 - type: ndcg_at_10 value: 26.346000000000004 - type: ndcg_at_100 value: 36.758 - type: ndcg_at_1000 value: 42.113 - type: ndcg_at_3 value: 24.254 - type: ndcg_at_5 value: 21.506 - type: precision_at_1 value: 28.7 - type: precision_at_10 value: 13.969999999999999 - type: precision_at_100 value: 2.881 - type: precision_at_1000 value: 0.414 - type: precision_at_3 value: 22.933 - type: precision_at_5 value: 19.220000000000002 - type: recall_at_1 value: 5.808 - type: recall_at_10 value: 28.310000000000002 - type: recall_at_100 value: 58.475 - type: recall_at_1000 value: 84.072 - type: recall_at_3 value: 13.957 - type: recall_at_5 value: 19.515 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 82.39274129958557 - type: cos_sim_spearman value: 79.78021235170053 - type: euclidean_pearson value: 79.35335401300166 - type: euclidean_spearman value: 79.7271870968275 - type: manhattan_pearson value: 79.35256263340601 - type: manhattan_spearman value: 79.76036386976321 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 83.99130429246708 - type: cos_sim_spearman value: 73.88322811171203 - type: euclidean_pearson value: 80.7569419170376 - type: euclidean_spearman value: 73.82542155409597 - type: manhattan_pearson value: 80.79468183847625 - type: manhattan_spearman value: 73.87027144047784 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 84.88548789489907 - type: cos_sim_spearman value: 85.07535893847255 - type: euclidean_pearson value: 84.6637222061494 - type: euclidean_spearman value: 85.14200626702456 - type: manhattan_pearson value: 84.75327892344734 - type: manhattan_spearman value: 85.24406181838596 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 82.88140039325008 - type: cos_sim_spearman value: 79.61211268112362 - type: euclidean_pearson value: 81.29639728816458 - type: euclidean_spearman value: 79.51284578041442 - type: manhattan_pearson value: 81.3381797137111 - type: manhattan_spearman value: 79.55683684039808 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 85.16716737270485 - type: cos_sim_spearman value: 86.14823841857738 - type: euclidean_pearson value: 85.36325733440725 - type: euclidean_spearman value: 86.04919691402029 - type: manhattan_pearson value: 85.3147511385052 - type: manhattan_spearman value: 86.00676205857764 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 80.34266645861588 - type: cos_sim_spearman value: 81.59914035005882 - type: euclidean_pearson value: 81.15053076245988 - type: euclidean_spearman value: 81.52776915798489 - type: manhattan_pearson value: 81.1819647418673 - type: manhattan_spearman value: 81.57479527353556 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 89.38263326821439 - type: cos_sim_spearman value: 89.10946308202642 - type: euclidean_pearson value: 88.87831312540068 - type: euclidean_spearman value: 89.03615865973664 - type: manhattan_pearson value: 88.79835539970384 - type: manhattan_spearman value: 88.9766156339753 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 70.1574915581685 - type: cos_sim_spearman value: 70.59144980004054 - type: euclidean_pearson value: 71.43246306918755 - type: euclidean_spearman value: 70.5544189562984 - type: manhattan_pearson value: 71.4071414609503 - type: manhattan_spearman value: 70.31799126163712 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 83.36215796635351 - type: cos_sim_spearman value: 83.07276756467208 - type: euclidean_pearson value: 83.06690453635584 - type: euclidean_spearman value: 82.9635366303289 - type: manhattan_pearson value: 83.04994049700815 - type: manhattan_spearman value: 82.98120125356036 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 86.92530011616722 - type: mrr value: 96.21826793395421 - task: type: Retrieval dataset: name: MTEB SciFact type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: map_at_1 value: 65.75 - type: map_at_10 value: 77.701 - type: map_at_100 value: 78.005 - type: map_at_1000 value: 78.006 - type: map_at_3 value: 75.48 - type: map_at_5 value: 76.927 - type: mrr_at_1 value: 68.333 - type: mrr_at_10 value: 78.511 - type: mrr_at_100 value: 78.704 - type: mrr_at_1000 value: 78.704 - type: mrr_at_3 value: 77 - type: mrr_at_5 value: 78.083 - type: ndcg_at_1 value: 68.333 - type: ndcg_at_10 value: 82.42699999999999 - type: ndcg_at_100 value: 83.486 - type: ndcg_at_1000 value: 83.511 - type: ndcg_at_3 value: 78.96300000000001 - type: ndcg_at_5 value: 81.028 - type: precision_at_1 value: 68.333 - type: precision_at_10 value: 10.667 - type: precision_at_100 value: 1.127 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 31.333 - type: precision_at_5 value: 20.133000000000003 - type: recall_at_1 value: 65.75 - type: recall_at_10 value: 95.578 - type: recall_at_100 value: 99.833 - type: recall_at_1000 value: 100 - type: recall_at_3 value: 86.506 - type: recall_at_5 value: 91.75 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.75247524752476 - type: cos_sim_ap value: 94.16065078045173 - type: cos_sim_f1 value: 87.22986247544205 - type: cos_sim_precision value: 85.71428571428571 - type: cos_sim_recall value: 88.8 - type: dot_accuracy value: 99.74554455445545 - type: dot_ap value: 93.90633887037264 - type: dot_f1 value: 86.9873417721519 - type: dot_precision value: 88.1025641025641 - type: dot_recall value: 85.9 - type: euclidean_accuracy value: 99.75247524752476 - type: euclidean_ap value: 94.17466319018055 - type: euclidean_f1 value: 87.3405299313052 - type: euclidean_precision value: 85.74181117533719 - type: euclidean_recall value: 89 - type: manhattan_accuracy value: 99.75445544554455 - type: manhattan_ap value: 94.27688371923577 - type: manhattan_f1 value: 87.74002954209749 - type: manhattan_precision value: 86.42095053346266 - type: manhattan_recall value: 89.1 - type: max_accuracy value: 99.75445544554455 - type: max_ap value: 94.27688371923577 - type: max_f1 value: 87.74002954209749 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 71.26500637517056 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 39.17507906280528 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 52.4848744828509 - type: mrr value: 53.33678168236992 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.599864323827887 - type: cos_sim_spearman value: 30.91116204665598 - type: dot_pearson value: 30.82637894269936 - type: dot_spearman value: 30.957573868416066 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: mteb/trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.23600000000000002 - type: map_at_10 value: 1.892 - type: map_at_100 value: 11.586 - type: map_at_1000 value: 27.761999999999997 - type: map_at_3 value: 0.653 - type: map_at_5 value: 1.028 - type: mrr_at_1 value: 88 - type: mrr_at_10 value: 94 - type: mrr_at_100 value: 94 - type: mrr_at_1000 value: 94 - type: mrr_at_3 value: 94 - type: mrr_at_5 value: 94 - type: ndcg_at_1 value: 82 - type: ndcg_at_10 value: 77.48899999999999 - type: ndcg_at_100 value: 60.141 - type: ndcg_at_1000 value: 54.228 - type: ndcg_at_3 value: 82.358 - type: ndcg_at_5 value: 80.449 - type: precision_at_1 value: 88 - type: precision_at_10 value: 82.19999999999999 - type: precision_at_100 value: 61.760000000000005 - type: precision_at_1000 value: 23.684 - type: precision_at_3 value: 88 - type: precision_at_5 value: 85.6 - type: recall_at_1 value: 0.23600000000000002 - type: recall_at_10 value: 2.117 - type: recall_at_100 value: 14.985000000000001 - type: recall_at_1000 value: 51.107 - type: recall_at_3 value: 0.688 - type: recall_at_5 value: 1.1039999999999999 - task: type: Retrieval dataset: name: MTEB Touche2020 type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: map_at_1 value: 2.3040000000000003 - type: map_at_10 value: 9.025 - type: map_at_100 value: 15.312999999999999 - type: map_at_1000 value: 16.954 - type: map_at_3 value: 4.981 - type: map_at_5 value: 6.32 - type: mrr_at_1 value: 24.490000000000002 - type: mrr_at_10 value: 39.835 - type: mrr_at_100 value: 40.8 - type: mrr_at_1000 value: 40.8 - type: mrr_at_3 value: 35.034 - type: mrr_at_5 value: 37.687 - type: ndcg_at_1 value: 22.448999999999998 - type: ndcg_at_10 value: 22.545 - type: ndcg_at_100 value: 35.931999999999995 - type: ndcg_at_1000 value: 47.665 - type: ndcg_at_3 value: 23.311 - type: ndcg_at_5 value: 22.421 - type: precision_at_1 value: 24.490000000000002 - type: precision_at_10 value: 20.408 - type: precision_at_100 value: 7.815999999999999 - type: precision_at_1000 value: 1.553 - type: precision_at_3 value: 25.169999999999998 - type: precision_at_5 value: 23.265 - type: recall_at_1 value: 2.3040000000000003 - type: recall_at_10 value: 15.693999999999999 - type: recall_at_100 value: 48.917 - type: recall_at_1000 value: 84.964 - type: recall_at_3 value: 6.026 - type: recall_at_5 value: 9.066 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 82.6074 - type: ap value: 23.187467098602013 - type: f1 value: 65.36829506379657 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 63.16355404640635 - type: f1 value: 63.534725639863346 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 50.91004094411276 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.55301901412649 - type: cos_sim_ap value: 75.25312618556728 - type: cos_sim_f1 value: 68.76561719140429 - type: cos_sim_precision value: 65.3061224489796 - type: cos_sim_recall value: 72.61213720316623 - type: dot_accuracy value: 86.29671574178936 - type: dot_ap value: 75.11910195501207 - type: dot_f1 value: 68.44048376830045 - type: dot_precision value: 66.12546125461255 - type: dot_recall value: 70.92348284960423 - type: euclidean_accuracy value: 86.5828217202122 - type: euclidean_ap value: 75.22986344900924 - type: euclidean_f1 value: 68.81267797449549 - type: euclidean_precision value: 64.8238861674831 - type: euclidean_recall value: 73.3245382585752 - type: manhattan_accuracy value: 86.61262442629791 - type: manhattan_ap value: 75.24401608557328 - type: manhattan_f1 value: 68.80473982483257 - type: manhattan_precision value: 67.21187720181177 - type: manhattan_recall value: 70.47493403693932 - type: max_accuracy value: 86.61262442629791 - type: max_ap value: 75.25312618556728 - type: max_f1 value: 68.81267797449549 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.10688089416696 - type: cos_sim_ap value: 84.17862178779863 - type: cos_sim_f1 value: 76.17305208781748 - type: cos_sim_precision value: 71.31246641590543 - type: cos_sim_recall value: 81.74468740375731 - type: dot_accuracy value: 88.1844995536927 - type: dot_ap value: 84.33816725235876 - type: dot_f1 value: 76.43554032918746 - type: dot_precision value: 74.01557767200346 - type: dot_recall value: 79.0190945488143 - type: euclidean_accuracy value: 88.07001203089223 - type: euclidean_ap value: 84.12267000814985 - type: euclidean_f1 value: 76.12232600180778 - type: euclidean_precision value: 74.50604541433205 - type: euclidean_recall value: 77.81028641823221 - type: manhattan_accuracy value: 88.06419063142779 - type: manhattan_ap value: 84.11648917164187 - type: manhattan_f1 value: 76.20579953925474 - type: manhattan_precision value: 72.56772755762935 - type: manhattan_recall value: 80.22790267939637 - type: max_accuracy value: 88.1844995536927 - type: max_ap value: 84.33816725235876 - type: max_f1 value: 76.43554032918746 --- <!-- **English** | [中文](./README_zh.md) --> # gte-large-en-v1.5 We introduce `gte-v1.5` series, upgraded `gte` embeddings that support the context length of up to **8192**, while further enhancing model performance. The models are built upon the `transformer++` encoder [backbone](https://huggingface.co/Alibaba-NLP/new-impl) (BERT + RoPE + GLU). The `gte-v1.5` series achieve state-of-the-art scores on the MTEB benchmark within the same model size category and prodvide competitive on the LoCo long-context retrieval tests (refer to [Evaluation](#evaluation)). We also present the [`gte-Qwen1.5-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen1.5-7B-instruct), a SOTA instruction-tuned multi-lingual embedding model that ranked 2nd in MTEB and 1st in C-MTEB. <!-- Provide a longer summary of what this model is. --> - **Developed by:** Institute for Intelligent Computing, Alibaba Group - **Model type:** Text Embeddings - **Paper:** [mGTE: Generalized Long-Context Text Representation and Reranking Models for Multilingual Text Retrieval](https://arxiv.org/pdf/2407.19669) <!-- - **Demo [optional]:** [More Information Needed] --> ### Model list | Models | Language | Model Size | Max Seq. Length | Dimension | MTEB-en | LoCo | |:-----: | :-----: |:-----: |:-----: |:-----: | :-----: | :-----: | |[`gte-Qwen1.5-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen1.5-7B-instruct)| Multiple | 7720 | 32768 | 4096 | 67.34 | 87.57 | |[`gte-large-en-v1.5`](https://huggingface.co/Alibaba-NLP/gte-large-en-v1.5) | English | 434 | 8192 | 1024 | 65.39 | 86.71 | |[`gte-base-en-v1.5`](https://huggingface.co/Alibaba-NLP/gte-base-en-v1.5) | English | 137 | 8192 | 768 | 64.11 | 87.44 | ## How to Get Started with the Model Use the code below to get started with the model. ```python # Requires transformers>=4.36.0 import torch.nn.functional as F from transformers import AutoModel, AutoTokenizer input_texts = [ "what is the capital of China?", "how to implement quick sort in python?", "Beijing", "sorting algorithms" ] model_path = 'Alibaba-NLP/gte-large-en-v1.5' tokenizer = AutoTokenizer.from_pretrained(model_path) model = AutoModel.from_pretrained(model_path, trust_remote_code=True) # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=8192, padding=True, truncation=True, return_tensors='pt') outputs = model(**batch_dict) embeddings = outputs.last_hidden_state[:, 0] # (Optionally) normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:1] @ embeddings[1:].T) * 100 print(scores.tolist()) ``` **It is recommended to install xformers and enable unpadding for acceleration, refer to [enable-unpadding-and-xformers](https://huggingface.co/Alibaba-NLP/new-impl#recommendation-enable-unpadding-and-acceleration-with-xformers).** Use with sentence-transformers: ```python # Requires sentence_transformers>=2.7.0 from sentence_transformers import SentenceTransformer from sentence_transformers.util import cos_sim sentences = ['That is a happy person', 'That is a very happy person'] model = SentenceTransformer('Alibaba-NLP/gte-large-en-v1.5', trust_remote_code=True) embeddings = model.encode(sentences) print(cos_sim(embeddings[0], embeddings[1])) ``` Use with `transformers.js`: ```js // npm i @xenova/transformers import { pipeline, dot } from '@xenova/transformers'; // Create feature extraction pipeline const extractor = await pipeline('feature-extraction', 'Alibaba-NLP/gte-large-en-v1.5', { quantized: false, // Comment out this line to use the quantized version }); // Generate sentence embeddings const sentences = [ "what is the capital of China?", "how to implement quick sort in python?", "Beijing", "sorting algorithms" ] const output = await extractor(sentences, { normalize: true, pooling: 'cls' }); // Compute similarity scores const [source_embeddings, ...document_embeddings ] = output.tolist(); const similarities = document_embeddings.map(x => 100 * dot(source_embeddings, x)); console.log(similarities); // [41.86354093370361, 77.07076371259589, 37.02981979677899] ``` ## Training Details ### Training Data - Masked language modeling (MLM): `c4-en` - Weak-supervised contrastive pre-training (CPT): [GTE](https://arxiv.org/pdf/2308.03281.pdf) pre-training data - Supervised contrastive fine-tuning: [GTE](https://arxiv.org/pdf/2308.03281.pdf) fine-tuning data ### Training Procedure To enable the backbone model to support a context length of 8192, we adopted a multi-stage training strategy. The model first undergoes preliminary MLM pre-training on shorter lengths. And then, we resample the data, reducing the proportion of short texts, and continue the MLM pre-training. The entire training process is as follows: - MLM-512: lr 2e-4, mlm_probability 0.3, batch_size 4096, num_steps 300000, rope_base 10000 - MLM-2048: lr 5e-5, mlm_probability 0.3, batch_size 4096, num_steps 30000, rope_base 10000 - [MLM-8192](https://huggingface.co/Alibaba-NLP/gte-en-mlm-large): lr 5e-5, mlm_probability 0.3, batch_size 1024, num_steps 30000, rope_base 160000 - CPT: max_len 512, lr 5e-5, batch_size 28672, num_steps 100000 - Fine-tuning: TODO ## Evaluation ### MTEB The results of other models are retrieved from [MTEB leaderboard](https://huggingface.co/spaces/mteb/leaderboard). The gte evaluation setting: `mteb==1.2.0, fp16 auto mix precision, max_length=8192`, and set ntk scaling factor to 2 (equivalent to rope_base * 2). | Model Name | Param Size (M) | Dimension | Sequence Length | Average (56) | Class. (12) | Clust. (11) | Pair Class. (3) | Reran. (4) | Retr. (15) | STS (10) | Summ. (1) | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [**gte-large-en-v1.5**](https://huggingface.co/Alibaba-NLP/gte-large-en-v1.5) | 409 | 1024 | 8192 | **65.39** | 77.75 | 47.95 | 84.63 | 58.50 | 57.91 | 81.43 | 30.91 | | [mxbai-embed-large-v1](https://huggingface.co/mixedbread-ai/mxbai-embed-large-v1) | 335 | 1024 | 512 | 64.68 | 75.64 | 46.71 | 87.2 | 60.11 | 54.39 | 85 | 32.71 | | [multilingual-e5-large-instruct](https://huggingface.co/intfloat/multilingual-e5-large-instruct) | 560 | 1024 | 514 | 64.41 | 77.56 | 47.1 | 86.19 | 58.58 | 52.47 | 84.78 | 30.39 | | [bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5)| 335 | 1024 | 512 | 64.23 | 75.97 | 46.08 | 87.12 | 60.03 | 54.29 | 83.11 | 31.61 | | [**gte-base-en-v1.5**](https://huggingface.co/Alibaba-NLP/gte-base-en-v1.5) | 137 | 768 | 8192 | **64.11** | 77.17 | 46.82 | 85.33 | 57.66 | 54.09 | 81.97 | 31.17 | | [bge-base-en-v1.5](https://huggingface.co/BAAI/bge-base-en-v1.5)| 109 | 768 | 512 | 63.55 | 75.53 | 45.77 | 86.55 | 58.86 | 53.25 | 82.4 | 31.07 | ### LoCo | Model Name | Dimension | Sequence Length | Average (5) | QsmsumRetrieval | SummScreenRetrieval | QasperAbastractRetrieval | QasperTitleRetrieval | GovReportRetrieval | |:----:|:---:|:---:|:---:|:---:|:---:|:---:|:---:|:---:| | [gte-qwen1.5-7b](https://huggingface.co/Alibaba-NLP/gte-qwen1.5-7b) | 4096 | 32768 | 87.57 | 49.37 | 93.10 | 99.67 | 97.54 | 98.21 | | [gte-large-v1.5](https://huggingface.co/Alibaba-NLP/gte-large-v1.5) |1024 | 8192 | 86.71 | 44.55 | 92.61 | 99.82 | 97.81 | 98.74 | | [gte-base-v1.5](https://huggingface.co/Alibaba-NLP/gte-base-v1.5) | 768 | 8192 | 87.44 | 49.91 | 91.78 | 99.82 | 97.13 | 98.58 | ## Citation If you find our paper or models helpful, please consider citing them as follows: ``` @article{zhang2024mgte, title={mGTE: Generalized Long-Context Text Representation and Reranking Models for Multilingual Text Retrieval}, author={Zhang, Xin and Zhang, Yanzhao and Long, Dingkun and Xie, Wen and Dai, Ziqi and Tang, Jialong and Lin, Huan and Yang, Baosong and Xie, Pengjun and Huang, Fei and others}, journal={arXiv preprint arXiv:2407.19669}, year={2024} } @article{li2023towards, title={Towards general text embeddings with multi-stage contrastive learning}, author={Li, Zehan and Zhang, Xin and Zhang, Yanzhao and Long, Dingkun and Xie, Pengjun and Zhang, Meishan}, journal={arXiv preprint arXiv:2308.03281}, year={2023} } ```
[ "BIOSSES", "SCIFACT" ]