{ // 获取包含Hugging Face文本的span元素 const spans = link.querySelectorAll('span.whitespace-nowrap, span.hidden.whitespace-nowrap'); spans.forEach(span => { if (span.textContent && span.textContent.trim().match(/Hugging\s*Face/i)) { span.textContent = 'AI快站'; } }); }); // 替换logo图片的alt属性 document.querySelectorAll('img[alt*="Hugging"], img[alt*="Face"]').forEach(img => { if (img.alt.match(/Hugging\s*Face/i)) { img.alt = 'AI快站 logo'; } }); } // 替换导航栏中的链接 function replaceNavigationLinks() { // 已替换标记,防止重复运行 if (window._navLinksReplaced) { return; } // 已经替换过的链接集合,防止重复替换 const replacedLinks = new Set(); // 只在导航栏区域查找和替换链接 const headerArea = document.querySelector('header') || document.querySelector('nav'); if (!headerArea) { return; } // 在导航区域内查找链接 const navLinks = headerArea.querySelectorAll('a'); navLinks.forEach(link => { // 如果已经替换过,跳过 if (replacedLinks.has(link)) return; const linkText = link.textContent.trim(); const linkHref = link.getAttribute('href') || ''; // 替换Spaces链接 - 仅替换一次 if ( (linkHref.includes('/spaces') || linkHref === '/spaces' || linkText === 'Spaces' || linkText.match(/^s*Spacess*$/i)) && linkText !== 'GitHub加速' && linkText !== 'GitHub加速' ) { link.textContent = 'GitHub加速'; link.href = 'https://githubproxy.cc'; link.setAttribute('target', '_blank'); link.setAttribute('rel', 'noopener noreferrer'); replacedLinks.add(link); } // 删除Posts链接 else if ( (linkHref.includes('/posts') || linkHref === '/posts' || linkText === 'Posts' || linkText.match(/^s*Postss*$/i)) ) { if (link.parentNode) { link.parentNode.removeChild(link); } replacedLinks.add(link); } // 替换Docs链接 - 仅替换一次 else if ( (linkHref.includes('/docs') || linkHref === '/docs' || linkText === 'Docs' || linkText.match(/^s*Docss*$/i)) && linkText !== 'Vibevoice' ) { link.textContent = 'Vibevoice'; link.href = 'https://vibevoice.info/'; replacedLinks.add(link); } // 替换Pricing链接 - 仅替换一次 else if ( (linkHref.includes('/pricing') || linkHref === '/pricing' || linkText === 'Pricing' || linkText.match(/^s*Pricings*$/i)) && linkText !== 'VoxCPM' ) { link.textContent = 'VoxCPM'; link.href = 'https://voxcpm.net/'; replacedLinks.add(link); } // 替换Enterprise链接 else if ( (linkHref.includes('/enterprise') || linkHref === '/enterprise' || linkText === 'Enterprise' || linkText.match(/^s*Enterprises*$/i)) && linkText !== 'IndexTTS2' ) { link.textContent = 'IndexTTS2'; link.href = 'https://vibevoice.info/indextts2'; replacedLinks.add(link); } }); // 查找可能嵌套的Spaces和Posts文本 const textNodes = []; function findTextNodes(element) { if (element.nodeType === Node.TEXT_NODE) { const text = element.textContent.trim(); if (text === 'Spaces' || text === 'Posts' || text === 'Enterprise') { textNodes.push(element); } } else { for (const child of element.childNodes) { findTextNodes(child); } } } // 只在导航区域内查找文本节点 findTextNodes(headerArea); // 替换找到的文本节点 textNodes.forEach(node => { const text = node.textContent.trim(); if (text === 'Spaces') { node.textContent = node.textContent.replace(/Spaces/g, 'GitHub加速'); } else if (text === 'Posts') { // 删除Posts文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } else if (text === 'Enterprise') { // 删除Enterprise文本节点 if (node.parentNode) { node.parentNode.removeChild(node); } } }); // 标记已替换完成 window._navLinksReplaced = true; } // 替换代码区域中的域名 function replaceCodeDomains() { // 特别处理span.hljs-string和span.njs-string元素 document.querySelectorAll('span.hljs-string, span.njs-string, span[class*="hljs-string"], span[class*="njs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换hljs-string类的span中的域名(移除多余的转义符号) document.querySelectorAll('span.hljs-string, span[class*="hljs-string"]').forEach(span => { if (span.textContent && span.textContent.includes('huggingface.co')) { span.textContent = span.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 替换pre和code标签中包含git clone命令的域名 document.querySelectorAll('pre, code').forEach(element => { if (element.textContent && element.textContent.includes('git clone')) { const text = element.innerHTML; if (text.includes('huggingface.co')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 处理特定的命令行示例 document.querySelectorAll('pre, code').forEach(element => { const text = element.innerHTML; if (text.includes('huggingface.co')) { // 针对git clone命令的专门处理 if (text.includes('git clone') || text.includes('GIT_LFS_SKIP_SMUDGE=1')) { element.innerHTML = text.replace(/huggingface.co/g, 'aifasthub.com'); } } }); // 特别处理模型下载页面上的代码片段 document.querySelectorAll('.flex.border-t, .svelte_hydrator, .inline-block').forEach(container => { const content = container.innerHTML; if (content && content.includes('huggingface.co')) { container.innerHTML = content.replace(/huggingface.co/g, 'aifasthub.com'); } }); // 特别处理模型仓库克隆对话框中的代码片段 try { // 查找包含"Clone this model repository"标题的对话框 const cloneDialog = document.querySelector('.svelte_hydration_boundary, [data-target="MainHeader"]'); if (cloneDialog) { // 查找对话框中所有的代码片段和命令示例 const codeElements = cloneDialog.querySelectorAll('pre, code, span'); codeElements.forEach(element => { if (element.textContent && element.textContent.includes('huggingface.co')) { if (element.innerHTML.includes('huggingface.co')) { element.innerHTML = element.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { element.textContent = element.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); } // 更精确地定位克隆命令中的域名 document.querySelectorAll('[data-target]').forEach(container => { const codeBlocks = container.querySelectorAll('pre, code, span.hljs-string'); codeBlocks.forEach(block => { if (block.textContent && block.textContent.includes('huggingface.co')) { if (block.innerHTML.includes('huggingface.co')) { block.innerHTML = block.innerHTML.replace(/huggingface.co/g, 'aifasthub.com'); } else { block.textContent = block.textContent.replace(/huggingface.co/g, 'aifasthub.com'); } } }); }); } catch (e) { // 错误处理但不打印日志 } } // 当DOM加载完成后执行替换 if (document.readyState === 'loading') { document.addEventListener('DOMContentLoaded', () => { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); }); } else { replaceHeaderBranding(); replaceNavigationLinks(); replaceCodeDomains(); // 只在必要时执行替换 - 3秒后再次检查 setTimeout(() => { if (!window._navLinksReplaced) { console.log('[Client] 3秒后重新检查导航链接'); replaceNavigationLinks(); } }, 3000); } // 增加一个MutationObserver来处理可能的动态元素加载 const observer = new MutationObserver(mutations => { // 检查是否导航区域有变化 const hasNavChanges = mutations.some(mutation => { // 检查是否存在header或nav元素变化 return Array.from(mutation.addedNodes).some(node => { if (node.nodeType === Node.ELEMENT_NODE) { // 检查是否是导航元素或其子元素 if (node.tagName === 'HEADER' || node.tagName === 'NAV' || node.querySelector('header, nav')) { return true; } // 检查是否在导航元素内部 let parent = node.parentElement; while (parent) { if (parent.tagName === 'HEADER' || parent.tagName === 'NAV') { return true; } parent = parent.parentElement; } } return false; }); }); // 只在导航区域有变化时执行替换 if (hasNavChanges) { // 重置替换状态,允许再次替换 window._navLinksReplaced = false; replaceHeaderBranding(); replaceNavigationLinks(); } }); // 开始观察document.body的变化,包括子节点 if (document.body) { observer.observe(document.body, { childList: true, subtree: true }); } else { document.addEventListener('DOMContentLoaded', () => { observer.observe(document.body, { childList: true, subtree: true }); }); } })(); \n\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2251,"cells":{"id":{"kind":"string","value":"RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF"},"author":{"kind":"string","value":"RcINS"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","gguf","mteb","transformers","Qwen2","sentence-similarity","llama-cpp","gguf-my-repo","base_model:Alibaba-NLP/gte-Qwen2-7B-instruct","base_model:quantized:Alibaba-NLP/gte-Qwen2-7B-instruct","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us","conversational"],"string":"[\n \"sentence-transformers\",\n \"gguf\",\n \"mteb\",\n \"transformers\",\n \"Qwen2\",\n \"sentence-similarity\",\n \"llama-cpp\",\n \"gguf-my-repo\",\n \"base_model:Alibaba-NLP/gte-Qwen2-7B-instruct\",\n \"base_model:quantized:Alibaba-NLP/gte-Qwen2-7B-instruct\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2025-02-26T06:03:06Z","string":"2025-02-26T06:03:06Z"},"last_modified":{"kind":"string","value":"2025-02-26T06:03:36+00:00"},"downloads":{"kind":"number","value":23,"string":"23"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: Alibaba-NLP/gte-Qwen2-7B-instruct\nlicense: apache-2.0\ntags:\n- mteb\n- sentence-transformers\n- transformers\n- Qwen2\n- sentence-similarity\n- llama-cpp\n- gguf-my-repo\nmodel-index:\n- name: gte-qwen2-7B-instruct\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 91.31343283582089\n - type: ap\n value: 67.64251402604096\n - type: f1\n value: 87.53372530755692\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 97.497825\n - type: ap\n value: 96.30329547047529\n - type: f1\n value: 97.49769793778039\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 62.564\n - type: f1\n value: 60.975777935041066\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: mteb/arguana\n config: default\n split: test\n revision: c22ab2a51041ffd869aaddef7af8d8215647e41a\n metrics:\n - type: map_at_1\n value: 36.486000000000004\n - type: map_at_10\n value: 54.842\n - type: map_at_100\n value: 55.206999999999994\n - type: map_at_1000\n value: 55.206999999999994\n - type: map_at_3\n value: 49.893\n - type: map_at_5\n value: 53.105000000000004\n - type: mrr_at_1\n value: 37.34\n - type: mrr_at_10\n value: 55.143\n - type: mrr_at_100\n value: 55.509\n - type: mrr_at_1000\n value: 55.509\n - type: mrr_at_3\n value: 50.212999999999994\n - type: mrr_at_5\n value: 53.432\n - type: ndcg_at_1\n value: 36.486000000000004\n - type: ndcg_at_10\n value: 64.273\n - type: ndcg_at_100\n value: 65.66199999999999\n - type: ndcg_at_1000\n value: 65.66199999999999\n - type: ndcg_at_3\n value: 54.352999999999994\n - type: ndcg_at_5\n value: 60.131\n - type: precision_at_1\n value: 36.486000000000004\n - type: precision_at_10\n value: 9.395000000000001\n - type: precision_at_100\n value: 0.996\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 22.428\n - type: precision_at_5\n value: 16.259\n - type: recall_at_1\n value: 36.486000000000004\n - type: recall_at_10\n value: 93.95400000000001\n - type: recall_at_100\n value: 99.644\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 67.283\n - type: recall_at_5\n value: 81.294\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 56.461169803700564\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 51.73600434466286\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 67.57827065898053\n - type: mrr\n value: 79.08136569493911\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 83.53324575999243\n - type: cos_sim_spearman\n value: 81.37173362822374\n - type: euclidean_pearson\n value: 82.19243335103444\n - type: euclidean_spearman\n value: 81.33679307304334\n - type: manhattan_pearson\n value: 82.38752665975699\n - type: manhattan_spearman\n value: 81.31510583189689\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 87.56818181818181\n - type: f1\n value: 87.25826722019875\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 50.09239610327673\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 46.64733054606282\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: f46a197baaae43b4f621051089b82a364682dfeb\n metrics:\n - type: map_at_1\n value: 33.997\n - type: map_at_10\n value: 48.176\n - type: map_at_100\n value: 49.82\n - type: map_at_1000\n value: 49.924\n - type: map_at_3\n value: 43.626\n - type: map_at_5\n value: 46.275\n - type: mrr_at_1\n value: 42.059999999999995\n - type: mrr_at_10\n value: 53.726\n - type: mrr_at_100\n value: 54.398\n - type: mrr_at_1000\n value: 54.416\n - type: mrr_at_3\n value: 50.714999999999996\n - type: mrr_at_5\n value: 52.639\n - type: ndcg_at_1\n value: 42.059999999999995\n - type: ndcg_at_10\n value: 55.574999999999996\n - type: ndcg_at_100\n value: 60.744\n - type: ndcg_at_1000\n value: 61.85699999999999\n - type: ndcg_at_3\n value: 49.363\n - type: ndcg_at_5\n value: 52.44\n - type: precision_at_1\n value: 42.059999999999995\n - type: precision_at_10\n value: 11.101999999999999\n - type: precision_at_100\n value: 1.73\n - type: precision_at_1000\n value: 0.218\n - type: precision_at_3\n value: 24.464\n - type: precision_at_5\n value: 18.026\n - type: recall_at_1\n value: 33.997\n - type: recall_at_10\n value: 70.35900000000001\n - type: recall_at_100\n value: 91.642\n - type: recall_at_1000\n value: 97.977\n - type: recall_at_3\n value: 52.76\n - type: recall_at_5\n value: 61.148\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackEnglishRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: ad9991cb51e31e31e430383c75ffb2885547b5f0\n metrics:\n - type: map_at_1\n value: 35.884\n - type: map_at_10\n value: 48.14\n - type: map_at_100\n value: 49.5\n - type: map_at_1000\n value: 49.63\n - type: map_at_3\n value: 44.646\n - type: map_at_5\n value: 46.617999999999995\n - type: mrr_at_1\n value: 44.458999999999996\n - type: mrr_at_10\n value: 53.751000000000005\n - type: mrr_at_100\n value: 54.37800000000001\n - type: mrr_at_1000\n value: 54.415\n - type: mrr_at_3\n value: 51.815\n - type: mrr_at_5\n value: 52.882\n - type: ndcg_at_1\n value: 44.458999999999996\n - type: ndcg_at_10\n value: 54.157\n - type: ndcg_at_100\n value: 58.362\n - type: ndcg_at_1000\n value: 60.178\n - type: ndcg_at_3\n value: 49.661\n - type: ndcg_at_5\n value: 51.74999999999999\n - type: precision_at_1\n value: 44.458999999999996\n - type: precision_at_10\n value: 10.248\n - type: precision_at_100\n value: 1.5890000000000002\n - type: precision_at_1000\n value: 0.207\n - type: precision_at_3\n value: 23.928\n - type: precision_at_5\n value: 16.878999999999998\n - type: recall_at_1\n value: 35.884\n - type: recall_at_10\n value: 64.798\n - type: recall_at_100\n value: 82.345\n - type: recall_at_1000\n value: 93.267\n - type: recall_at_3\n value: 51.847\n - type: recall_at_5\n value: 57.601\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackGamingRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 4885aa143210c98657558c04aaf3dc47cfb54340\n metrics:\n - type: map_at_1\n value: 39.383\n - type: map_at_10\n value: 53.714\n - type: map_at_100\n value: 54.838\n - type: map_at_1000\n value: 54.87800000000001\n - type: map_at_3\n value: 50.114999999999995\n - type: map_at_5\n value: 52.153000000000006\n - type: mrr_at_1\n value: 45.016\n - type: mrr_at_10\n value: 56.732000000000006\n - type: mrr_at_100\n value: 57.411\n - type: mrr_at_1000\n value: 57.431\n - type: mrr_at_3\n value: 54.044000000000004\n - type: mrr_at_5\n value: 55.639\n - type: ndcg_at_1\n value: 45.016\n - type: ndcg_at_10\n value: 60.228\n - type: ndcg_at_100\n value: 64.277\n - type: ndcg_at_1000\n value: 65.07\n - type: ndcg_at_3\n value: 54.124\n - type: ndcg_at_5\n value: 57.147000000000006\n - type: precision_at_1\n value: 45.016\n - type: precision_at_10\n value: 9.937\n - type: precision_at_100\n value: 1.288\n - type: precision_at_1000\n value: 0.13899999999999998\n - type: precision_at_3\n value: 24.471999999999998\n - type: precision_at_5\n value: 16.991\n - type: recall_at_1\n value: 39.383\n - type: recall_at_10\n value: 76.175\n - type: recall_at_100\n value: 93.02\n - type: recall_at_1000\n value: 98.60900000000001\n - type: recall_at_3\n value: 60.265\n - type: recall_at_5\n value: 67.46600000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackGisRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 5003b3064772da1887988e05400cf3806fe491f2\n metrics:\n - type: map_at_1\n value: 27.426000000000002\n - type: map_at_10\n value: 37.397000000000006\n - type: map_at_100\n value: 38.61\n - type: map_at_1000\n value: 38.678000000000004\n - type: map_at_3\n value: 34.150999999999996\n - type: map_at_5\n value: 36.137\n - type: mrr_at_1\n value: 29.944\n - type: mrr_at_10\n value: 39.654\n - type: mrr_at_100\n value: 40.638000000000005\n - type: mrr_at_1000\n value: 40.691\n - type: mrr_at_3\n value: 36.817\n - type: mrr_at_5\n value: 38.524\n - type: ndcg_at_1\n value: 29.944\n - type: ndcg_at_10\n value: 43.094\n - type: ndcg_at_100\n value: 48.789\n - type: ndcg_at_1000\n value: 50.339999999999996\n - type: ndcg_at_3\n value: 36.984\n - type: ndcg_at_5\n value: 40.248\n - type: precision_at_1\n value: 29.944\n - type: precision_at_10\n value: 6.78\n - type: precision_at_100\n value: 1.024\n - type: precision_at_1000\n value: 0.11800000000000001\n - type: precision_at_3\n value: 15.895000000000001\n - type: precision_at_5\n value: 11.39\n - type: recall_at_1\n value: 27.426000000000002\n - type: recall_at_10\n value: 58.464000000000006\n - type: recall_at_100\n value: 84.193\n - type: recall_at_1000\n value: 95.52000000000001\n - type: recall_at_3\n value: 42.172\n - type: recall_at_5\n value: 50.101\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackMathematicaRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 90fceea13679c63fe563ded68f3b6f06e50061de\n metrics:\n - type: map_at_1\n value: 19.721\n - type: map_at_10\n value: 31.604\n - type: map_at_100\n value: 32.972\n - type: map_at_1000\n value: 33.077\n - type: map_at_3\n value: 27.218999999999998\n - type: map_at_5\n value: 29.53\n - type: mrr_at_1\n value: 25.0\n - type: mrr_at_10\n value: 35.843\n - type: mrr_at_100\n value: 36.785000000000004\n - type: mrr_at_1000\n value: 36.842000000000006\n - type: mrr_at_3\n value: 32.193\n - type: mrr_at_5\n value: 34.264\n - type: ndcg_at_1\n value: 25.0\n - type: ndcg_at_10\n value: 38.606\n - type: ndcg_at_100\n value: 44.272\n - type: ndcg_at_1000\n value: 46.527\n - type: ndcg_at_3\n value: 30.985000000000003\n - type: ndcg_at_5\n value: 34.43\n - type: precision_at_1\n value: 25.0\n - type: precision_at_10\n value: 7.811\n - type: precision_at_100\n value: 1.203\n - type: precision_at_1000\n value: 0.15\n - type: precision_at_3\n value: 15.423\n - type: precision_at_5\n value: 11.791\n - type: recall_at_1\n value: 19.721\n - type: recall_at_10\n value: 55.625\n - type: recall_at_100\n value: 79.34400000000001\n - type: recall_at_1000\n value: 95.208\n - type: recall_at_3\n value: 35.19\n - type: recall_at_5\n value: 43.626\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackPhysicsRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4\n metrics:\n - type: map_at_1\n value: 33.784\n - type: map_at_10\n value: 47.522\n - type: map_at_100\n value: 48.949999999999996\n - type: map_at_1000\n value: 49.038\n - type: map_at_3\n value: 43.284\n - type: map_at_5\n value: 45.629\n - type: mrr_at_1\n value: 41.482\n - type: mrr_at_10\n value: 52.830999999999996\n - type: mrr_at_100\n value: 53.559999999999995\n - type: mrr_at_1000\n value: 53.588\n - type: mrr_at_3\n value: 50.016000000000005\n - type: mrr_at_5\n value: 51.614000000000004\n - type: ndcg_at_1\n value: 41.482\n - type: ndcg_at_10\n value: 54.569\n - type: ndcg_at_100\n value: 59.675999999999995\n - type: ndcg_at_1000\n value: 60.989000000000004\n - type: ndcg_at_3\n value: 48.187000000000005\n - type: ndcg_at_5\n value: 51.183\n - type: precision_at_1\n value: 41.482\n - type: precision_at_10\n value: 10.221\n - type: precision_at_100\n value: 1.486\n - type: precision_at_1000\n value: 0.17500000000000002\n - type: precision_at_3\n value: 23.548\n - type: precision_at_5\n value: 16.805\n - type: recall_at_1\n value: 33.784\n - type: recall_at_10\n value: 69.798\n - type: recall_at_100\n value: 90.098\n - type: recall_at_1000\n value: 98.176\n - type: recall_at_3\n value: 52.127\n - type: recall_at_5\n value: 59.861\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackProgrammersRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 6184bc1440d2dbc7612be22b50686b8826d22b32\n metrics:\n - type: map_at_1\n value: 28.038999999999998\n - type: map_at_10\n value: 41.904\n - type: map_at_100\n value: 43.36\n - type: map_at_1000\n value: 43.453\n - type: map_at_3\n value: 37.785999999999994\n - type: map_at_5\n value: 40.105000000000004\n - type: mrr_at_1\n value: 35.046\n - type: mrr_at_10\n value: 46.926\n - type: mrr_at_100\n value: 47.815000000000005\n - type: mrr_at_1000\n value: 47.849000000000004\n - type: mrr_at_3\n value: 44.273\n - type: mrr_at_5\n value: 45.774\n - type: ndcg_at_1\n value: 35.046\n - type: ndcg_at_10\n value: 48.937000000000005\n - type: ndcg_at_100\n value: 54.544000000000004\n - type: ndcg_at_1000\n value: 56.069\n - type: ndcg_at_3\n value: 42.858000000000004\n - type: ndcg_at_5\n value: 45.644\n - type: precision_at_1\n value: 35.046\n - type: precision_at_10\n value: 9.452\n - type: precision_at_100\n value: 1.429\n - type: precision_at_1000\n value: 0.173\n - type: precision_at_3\n value: 21.346999999999998\n - type: precision_at_5\n value: 15.342\n - type: recall_at_1\n value: 28.038999999999998\n - type: recall_at_10\n value: 64.59700000000001\n - type: recall_at_100\n value: 87.735\n - type: recall_at_1000\n value: 97.41300000000001\n - type: recall_at_3\n value: 47.368\n - type: recall_at_5\n value: 54.93900000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4\n metrics:\n - type: map_at_1\n value: 28.17291666666667\n - type: map_at_10\n value: 40.025749999999995\n - type: map_at_100\n value: 41.39208333333333\n - type: map_at_1000\n value: 41.499249999999996\n - type: map_at_3\n value: 36.347\n - type: map_at_5\n value: 38.41391666666667\n - type: mrr_at_1\n value: 33.65925\n - type: mrr_at_10\n value: 44.085499999999996\n - type: mrr_at_100\n value: 44.94116666666667\n - type: mrr_at_1000\n value: 44.9855\n - type: mrr_at_3\n value: 41.2815\n - type: mrr_at_5\n value: 42.91491666666666\n - type: ndcg_at_1\n value: 33.65925\n - type: ndcg_at_10\n value: 46.430833333333325\n - type: ndcg_at_100\n value: 51.761\n - type: ndcg_at_1000\n value: 53.50899999999999\n - type: ndcg_at_3\n value: 40.45133333333333\n - type: ndcg_at_5\n value: 43.31483333333334\n - type: precision_at_1\n value: 33.65925\n - type: precision_at_10\n value: 8.4995\n - type: precision_at_100\n value: 1.3210000000000004\n - type: precision_at_1000\n value: 0.16591666666666666\n - type: precision_at_3\n value: 19.165083333333335\n - type: precision_at_5\n value: 13.81816666666667\n - type: recall_at_1\n value: 28.17291666666667\n - type: recall_at_10\n value: 61.12624999999999\n - type: recall_at_100\n value: 83.97266666666667\n - type: recall_at_1000\n value: 95.66550000000001\n - type: recall_at_3\n value: 44.661249999999995\n - type: recall_at_5\n value: 51.983333333333334\n - type: map_at_1\n value: 17.936\n - type: map_at_10\n value: 27.399\n - type: map_at_100\n value: 28.632\n - type: map_at_1000\n value: 28.738000000000003\n - type: map_at_3\n value: 24.456\n - type: map_at_5\n value: 26.06\n - type: mrr_at_1\n value: 19.224\n - type: mrr_at_10\n value: 28.998\n - type: mrr_at_100\n value: 30.11\n - type: mrr_at_1000\n value: 30.177\n - type: mrr_at_3\n value: 26.247999999999998\n - type: mrr_at_5\n value: 27.708\n - type: ndcg_at_1\n value: 19.224\n - type: ndcg_at_10\n value: 32.911\n - type: ndcg_at_100\n value: 38.873999999999995\n - type: ndcg_at_1000\n value: 41.277\n - type: ndcg_at_3\n value: 27.142\n - type: ndcg_at_5\n value: 29.755\n - type: precision_at_1\n value: 19.224\n - type: precision_at_10\n value: 5.6930000000000005\n - type: precision_at_100\n value: 0.9259999999999999\n - type: precision_at_1000\n value: 0.126\n - type: precision_at_3\n value: 12.138\n - type: precision_at_5\n value: 8.909\n - type: recall_at_1\n value: 17.936\n - type: recall_at_10\n value: 48.096\n - type: recall_at_100\n value: 75.389\n - type: recall_at_1000\n value: 92.803\n - type: recall_at_3\n value: 32.812999999999995\n - type: recall_at_5\n value: 38.851\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackStatsRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a\n metrics:\n - type: map_at_1\n value: 24.681\n - type: map_at_10\n value: 34.892\n - type: map_at_100\n value: 35.996\n - type: map_at_1000\n value: 36.083\n - type: map_at_3\n value: 31.491999999999997\n - type: map_at_5\n value: 33.632\n - type: mrr_at_1\n value: 28.528\n - type: mrr_at_10\n value: 37.694\n - type: mrr_at_100\n value: 38.613\n - type: mrr_at_1000\n value: 38.668\n - type: mrr_at_3\n value: 34.714\n - type: mrr_at_5\n value: 36.616\n - type: ndcg_at_1\n value: 28.528\n - type: ndcg_at_10\n value: 40.703\n - type: ndcg_at_100\n value: 45.993\n - type: ndcg_at_1000\n value: 47.847\n - type: ndcg_at_3\n value: 34.622\n - type: ndcg_at_5\n value: 38.035999999999994\n - type: precision_at_1\n value: 28.528\n - type: precision_at_10\n value: 6.902\n - type: precision_at_100\n value: 1.0370000000000001\n - type: precision_at_1000\n value: 0.126\n - type: precision_at_3\n value: 15.798000000000002\n - type: precision_at_5\n value: 11.655999999999999\n - type: recall_at_1\n value: 24.681\n - type: recall_at_10\n value: 55.81\n - type: recall_at_100\n value: 79.785\n - type: recall_at_1000\n value: 92.959\n - type: recall_at_3\n value: 39.074\n - type: recall_at_5\n value: 47.568\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackTexRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 46989137a86843e03a6195de44b09deda022eec7\n metrics:\n - type: map_at_1\n value: 18.627\n - type: map_at_10\n value: 27.872000000000003\n - type: map_at_100\n value: 29.237999999999996\n - type: map_at_1000\n value: 29.363\n - type: map_at_3\n value: 24.751\n - type: map_at_5\n value: 26.521\n - type: mrr_at_1\n value: 23.021\n - type: mrr_at_10\n value: 31.924000000000003\n - type: mrr_at_100\n value: 32.922000000000004\n - type: mrr_at_1000\n value: 32.988\n - type: mrr_at_3\n value: 29.192\n - type: mrr_at_5\n value: 30.798\n - type: ndcg_at_1\n value: 23.021\n - type: ndcg_at_10\n value: 33.535\n - type: ndcg_at_100\n value: 39.732\n - type: ndcg_at_1000\n value: 42.201\n - type: ndcg_at_3\n value: 28.153\n - type: ndcg_at_5\n value: 30.746000000000002\n - type: precision_at_1\n value: 23.021\n - type: precision_at_10\n value: 6.459\n - type: precision_at_100\n value: 1.1320000000000001\n - type: precision_at_1000\n value: 0.153\n - type: precision_at_3\n value: 13.719000000000001\n - type: precision_at_5\n value: 10.193000000000001\n - type: recall_at_1\n value: 18.627\n - type: recall_at_10\n value: 46.463\n - type: recall_at_100\n value: 74.226\n - type: recall_at_1000\n value: 91.28500000000001\n - type: recall_at_3\n value: 31.357000000000003\n - type: recall_at_5\n value: 38.067\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackUnixRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53\n metrics:\n - type: map_at_1\n value: 31.457\n - type: map_at_10\n value: 42.888\n - type: map_at_100\n value: 44.24\n - type: map_at_1000\n value: 44.327\n - type: map_at_3\n value: 39.588\n - type: map_at_5\n value: 41.423\n - type: mrr_at_1\n value: 37.126999999999995\n - type: mrr_at_10\n value: 47.083000000000006\n - type: mrr_at_100\n value: 47.997\n - type: mrr_at_1000\n value: 48.044\n - type: mrr_at_3\n value: 44.574000000000005\n - type: mrr_at_5\n value: 46.202\n - type: ndcg_at_1\n value: 37.126999999999995\n - type: ndcg_at_10\n value: 48.833\n - type: ndcg_at_100\n value: 54.327000000000005\n - type: ndcg_at_1000\n value: 56.011\n - type: ndcg_at_3\n value: 43.541999999999994\n - type: ndcg_at_5\n value: 46.127\n - type: precision_at_1\n value: 37.126999999999995\n - type: precision_at_10\n value: 8.376999999999999\n - type: precision_at_100\n value: 1.2309999999999999\n - type: precision_at_1000\n value: 0.146\n - type: precision_at_3\n value: 20.211000000000002\n - type: precision_at_5\n value: 14.16\n - type: recall_at_1\n value: 31.457\n - type: recall_at_10\n value: 62.369\n - type: recall_at_100\n value: 85.444\n - type: recall_at_1000\n value: 96.65599999999999\n - type: recall_at_3\n value: 47.961\n - type: recall_at_5\n value: 54.676\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackWebmastersRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: 160c094312a0e1facb97e55eeddb698c0abe3571\n metrics:\n - type: map_at_1\n value: 27.139999999999997\n - type: map_at_10\n value: 38.801\n - type: map_at_100\n value: 40.549\n - type: map_at_1000\n value: 40.802\n - type: map_at_3\n value: 35.05\n - type: map_at_5\n value: 36.884\n - type: mrr_at_1\n value: 33.004\n - type: mrr_at_10\n value: 43.864\n - type: mrr_at_100\n value: 44.667\n - type: mrr_at_1000\n value: 44.717\n - type: mrr_at_3\n value: 40.777\n - type: mrr_at_5\n value: 42.319\n - type: ndcg_at_1\n value: 33.004\n - type: ndcg_at_10\n value: 46.022\n - type: ndcg_at_100\n value: 51.542\n - type: ndcg_at_1000\n value: 53.742000000000004\n - type: ndcg_at_3\n value: 39.795\n - type: ndcg_at_5\n value: 42.272\n - type: precision_at_1\n value: 33.004\n - type: precision_at_10\n value: 9.012\n - type: precision_at_100\n value: 1.7770000000000001\n - type: precision_at_1000\n value: 0.26\n - type: precision_at_3\n value: 19.038\n - type: precision_at_5\n value: 13.675999999999998\n - type: recall_at_1\n value: 27.139999999999997\n - type: recall_at_10\n value: 60.961\n - type: recall_at_100\n value: 84.451\n - type: recall_at_1000\n value: 98.113\n - type: recall_at_3\n value: 43.001\n - type: recall_at_5\n value: 49.896\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: mteb/climate-fever\n config: default\n split: test\n revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380\n metrics:\n - type: map_at_1\n value: 22.076999999999998\n - type: map_at_10\n value: 35.44\n - type: map_at_100\n value: 37.651\n - type: map_at_1000\n value: 37.824999999999996\n - type: map_at_3\n value: 30.764999999999997\n - type: map_at_5\n value: 33.26\n - type: mrr_at_1\n value: 50.163000000000004\n - type: mrr_at_10\n value: 61.207\n - type: mrr_at_100\n value: 61.675000000000004\n - type: mrr_at_1000\n value: 61.692\n - type: mrr_at_3\n value: 58.60999999999999\n - type: mrr_at_5\n value: 60.307\n - type: ndcg_at_1\n value: 50.163000000000004\n - type: ndcg_at_10\n value: 45.882\n - type: ndcg_at_100\n value: 53.239999999999995\n - type: ndcg_at_1000\n value: 55.852000000000004\n - type: ndcg_at_3\n value: 40.514\n - type: ndcg_at_5\n value: 42.038\n - type: precision_at_1\n value: 50.163000000000004\n - type: precision_at_10\n value: 13.466000000000001\n - type: precision_at_100\n value: 2.164\n - type: precision_at_1000\n value: 0.266\n - type: precision_at_3\n value: 29.707\n - type: precision_at_5\n value: 21.694\n - type: recall_at_1\n value: 22.076999999999998\n - type: recall_at_10\n value: 50.193\n - type: recall_at_100\n value: 74.993\n - type: recall_at_1000\n value: 89.131\n - type: recall_at_3\n value: 35.472\n - type: recall_at_5\n value: 41.814\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: mteb/dbpedia\n config: default\n split: test\n revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659\n metrics:\n - type: map_at_1\n value: 9.953\n - type: map_at_10\n value: 24.515\n - type: map_at_100\n value: 36.173\n - type: map_at_1000\n value: 38.351\n - type: map_at_3\n value: 16.592000000000002\n - type: map_at_5\n value: 20.036\n - type: mrr_at_1\n value: 74.25\n - type: mrr_at_10\n value: 81.813\n - type: mrr_at_100\n value: 82.006\n - type: mrr_at_1000\n value: 82.011\n - type: mrr_at_3\n value: 80.875\n - type: mrr_at_5\n value: 81.362\n - type: ndcg_at_1\n value: 62.5\n - type: ndcg_at_10\n value: 52.42\n - type: ndcg_at_100\n value: 56.808\n - type: ndcg_at_1000\n value: 63.532999999999994\n - type: ndcg_at_3\n value: 56.654\n - type: ndcg_at_5\n value: 54.18300000000001\n - type: precision_at_1\n value: 74.25\n - type: precision_at_10\n value: 42.699999999999996\n - type: precision_at_100\n value: 13.675\n - type: precision_at_1000\n value: 2.664\n - type: precision_at_3\n value: 60.5\n - type: precision_at_5\n value: 52.800000000000004\n - type: recall_at_1\n value: 9.953\n - type: recall_at_10\n value: 30.253999999999998\n - type: recall_at_100\n value: 62.516000000000005\n - type: recall_at_1000\n value: 84.163\n - type: recall_at_3\n value: 18.13\n - type: recall_at_5\n value: 22.771\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 79.455\n - type: f1\n value: 74.16798697647569\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: mteb/fever\n config: default\n split: test\n revision: bea83ef9e8fb933d90a2f1d5515737465d613e12\n metrics:\n - type: map_at_1\n value: 87.531\n - type: map_at_10\n value: 93.16799999999999\n - type: map_at_100\n value: 93.341\n - type: map_at_1000\n value: 93.349\n - type: map_at_3\n value: 92.444\n - type: map_at_5\n value: 92.865\n - type: mrr_at_1\n value: 94.014\n - type: mrr_at_10\n value: 96.761\n - type: mrr_at_100\n value: 96.762\n - type: mrr_at_1000\n value: 96.762\n - type: mrr_at_3\n value: 96.672\n - type: mrr_at_5\n value: 96.736\n - type: ndcg_at_1\n value: 94.014\n - type: ndcg_at_10\n value: 95.112\n - type: ndcg_at_100\n value: 95.578\n - type: ndcg_at_1000\n value: 95.68900000000001\n - type: ndcg_at_3\n value: 94.392\n - type: ndcg_at_5\n value: 94.72500000000001\n - type: precision_at_1\n value: 94.014\n - type: precision_at_10\n value: 11.065\n - type: precision_at_100\n value: 1.157\n - type: precision_at_1000\n value: 0.11800000000000001\n - type: precision_at_3\n value: 35.259\n - type: precision_at_5\n value: 21.599\n - type: recall_at_1\n value: 87.531\n - type: recall_at_10\n value: 97.356\n - type: recall_at_100\n value: 98.965\n - type: recall_at_1000\n value: 99.607\n - type: recall_at_3\n value: 95.312\n - type: recall_at_5\n value: 96.295\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: mteb/fiqa\n config: default\n split: test\n revision: 27a168819829fe9bcd655c2df245fb19452e8e06\n metrics:\n - type: map_at_1\n value: 32.055\n - type: map_at_10\n value: 53.114\n - type: map_at_100\n value: 55.235\n - type: map_at_1000\n value: 55.345\n - type: map_at_3\n value: 45.854\n - type: map_at_5\n value: 50.025\n - type: mrr_at_1\n value: 60.34\n - type: mrr_at_10\n value: 68.804\n - type: mrr_at_100\n value: 69.309\n - type: mrr_at_1000\n value: 69.32199999999999\n - type: mrr_at_3\n value: 66.40899999999999\n - type: mrr_at_5\n value: 67.976\n - type: ndcg_at_1\n value: 60.34\n - type: ndcg_at_10\n value: 62.031000000000006\n - type: ndcg_at_100\n value: 68.00500000000001\n - type: ndcg_at_1000\n value: 69.286\n - type: ndcg_at_3\n value: 56.355999999999995\n - type: ndcg_at_5\n value: 58.687\n - type: precision_at_1\n value: 60.34\n - type: precision_at_10\n value: 17.176\n - type: precision_at_100\n value: 2.36\n - type: precision_at_1000\n value: 0.259\n - type: precision_at_3\n value: 37.14\n - type: precision_at_5\n value: 27.809\n - type: recall_at_1\n value: 32.055\n - type: recall_at_10\n value: 70.91\n - type: recall_at_100\n value: 91.83\n - type: recall_at_1000\n value: 98.871\n - type: recall_at_3\n value: 51.202999999999996\n - type: recall_at_5\n value: 60.563\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: mteb/hotpotqa\n config: default\n split: test\n revision: ab518f4d6fcca38d87c25209f94beba119d02014\n metrics:\n - type: map_at_1\n value: 43.68\n - type: map_at_10\n value: 64.389\n - type: map_at_100\n value: 65.24\n - type: map_at_1000\n value: 65.303\n - type: map_at_3\n value: 61.309000000000005\n - type: map_at_5\n value: 63.275999999999996\n - type: mrr_at_1\n value: 87.36\n - type: mrr_at_10\n value: 91.12\n - type: mrr_at_100\n value: 91.227\n - type: mrr_at_1000\n value: 91.229\n - type: mrr_at_3\n value: 90.57600000000001\n - type: mrr_at_5\n value: 90.912\n - type: ndcg_at_1\n value: 87.36\n - type: ndcg_at_10\n value: 73.076\n - type: ndcg_at_100\n value: 75.895\n - type: ndcg_at_1000\n value: 77.049\n - type: ndcg_at_3\n value: 68.929\n - type: ndcg_at_5\n value: 71.28\n - type: precision_at_1\n value: 87.36\n - type: precision_at_10\n value: 14.741000000000001\n - type: precision_at_100\n value: 1.694\n - type: precision_at_1000\n value: 0.185\n - type: precision_at_3\n value: 43.043\n - type: precision_at_5\n value: 27.681\n - type: recall_at_1\n value: 43.68\n - type: recall_at_10\n value: 73.707\n - type: recall_at_100\n value: 84.7\n - type: recall_at_1000\n value: 92.309\n - type: recall_at_3\n value: 64.564\n - type: recall_at_5\n value: 69.203\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 96.75399999999999\n - type: ap\n value: 95.29389839242187\n - type: f1\n value: 96.75348377433475\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: mteb/msmarco\n config: default\n split: dev\n revision: c5a29a104738b98a9e76336939199e264163d4a0\n metrics:\n - type: map_at_1\n value: 25.176\n - type: map_at_10\n value: 38.598\n - type: map_at_100\n value: 39.707\n - type: map_at_1000\n value: 39.744\n - type: map_at_3\n value: 34.566\n - type: map_at_5\n value: 36.863\n - type: mrr_at_1\n value: 25.874000000000002\n - type: mrr_at_10\n value: 39.214\n - type: mrr_at_100\n value: 40.251\n - type: mrr_at_1000\n value: 40.281\n - type: mrr_at_3\n value: 35.291\n - type: mrr_at_5\n value: 37.545\n - type: ndcg_at_1\n value: 25.874000000000002\n - type: ndcg_at_10\n value: 45.98\n - type: ndcg_at_100\n value: 51.197\n - type: ndcg_at_1000\n value: 52.073\n - type: ndcg_at_3\n value: 37.785999999999994\n - type: ndcg_at_5\n value: 41.870000000000005\n - type: precision_at_1\n value: 25.874000000000002\n - type: precision_at_10\n value: 7.181\n - type: precision_at_100\n value: 0.979\n - type: precision_at_1000\n value: 0.106\n - type: precision_at_3\n value: 16.051000000000002\n - type: precision_at_5\n value: 11.713\n - type: recall_at_1\n value: 25.176\n - type: recall_at_10\n value: 68.67699999999999\n - type: recall_at_100\n value: 92.55\n - type: recall_at_1000\n value: 99.164\n - type: recall_at_3\n value: 46.372\n - type: recall_at_5\n value: 56.16\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 99.03784769721841\n - type: f1\n value: 98.97791641821495\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 91.88326493388054\n - type: f1\n value: 73.74809928034335\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 85.41358439811701\n - type: f1\n value: 83.503679460639\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 89.77135171486215\n - type: f1\n value: 88.89843747468366\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 46.22695362087359\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 44.132372165849425\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 33.35680810650402\n - type: mrr\n value: 34.72625715637218\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: mteb/nfcorpus\n config: default\n split: test\n revision: ec0fa4fe99da2ff19ca1214b7966684033a58814\n metrics:\n - type: map_at_1\n value: 7.165000000000001\n - type: map_at_10\n value: 15.424\n - type: map_at_100\n value: 20.28\n - type: map_at_1000\n value: 22.065\n - type: map_at_3\n value: 11.236\n - type: map_at_5\n value: 13.025999999999998\n - type: mrr_at_1\n value: 51.702999999999996\n - type: mrr_at_10\n value: 59.965\n - type: mrr_at_100\n value: 60.667\n - type: mrr_at_1000\n value: 60.702999999999996\n - type: mrr_at_3\n value: 58.772000000000006\n - type: mrr_at_5\n value: 59.267\n - type: ndcg_at_1\n value: 49.536\n - type: ndcg_at_10\n value: 40.6\n - type: ndcg_at_100\n value: 37.848\n - type: ndcg_at_1000\n value: 46.657\n - type: ndcg_at_3\n value: 46.117999999999995\n - type: ndcg_at_5\n value: 43.619\n - type: precision_at_1\n value: 51.393\n - type: precision_at_10\n value: 30.31\n - type: precision_at_100\n value: 9.972\n - type: precision_at_1000\n value: 2.329\n - type: precision_at_3\n value: 43.137\n - type: precision_at_5\n value: 37.585\n - type: recall_at_1\n value: 7.165000000000001\n - type: recall_at_10\n value: 19.689999999999998\n - type: recall_at_100\n value: 39.237\n - type: recall_at_1000\n value: 71.417\n - type: recall_at_3\n value: 12.247\n - type: recall_at_5\n value: 14.902999999999999\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: mteb/nq\n config: default\n split: test\n revision: b774495ed302d8c44a3a7ea25c90dbce03968f31\n metrics:\n - type: map_at_1\n value: 42.653999999999996\n - type: map_at_10\n value: 59.611999999999995\n - type: map_at_100\n value: 60.32300000000001\n - type: map_at_1000\n value: 60.336\n - type: map_at_3\n value: 55.584999999999994\n - type: map_at_5\n value: 58.19\n - type: mrr_at_1\n value: 47.683\n - type: mrr_at_10\n value: 62.06700000000001\n - type: mrr_at_100\n value: 62.537\n - type: mrr_at_1000\n value: 62.544999999999995\n - type: mrr_at_3\n value: 59.178\n - type: mrr_at_5\n value: 61.034\n - type: ndcg_at_1\n value: 47.654\n - type: ndcg_at_10\n value: 67.001\n - type: ndcg_at_100\n value: 69.73899999999999\n - type: ndcg_at_1000\n value: 69.986\n - type: ndcg_at_3\n value: 59.95700000000001\n - type: ndcg_at_5\n value: 64.025\n - type: precision_at_1\n value: 47.654\n - type: precision_at_10\n value: 10.367999999999999\n - type: precision_at_100\n value: 1.192\n - type: precision_at_1000\n value: 0.121\n - type: precision_at_3\n value: 26.651000000000003\n - type: precision_at_5\n value: 18.459\n - type: recall_at_1\n value: 42.653999999999996\n - type: recall_at_10\n value: 86.619\n - type: recall_at_100\n value: 98.04899999999999\n - type: recall_at_1000\n value: 99.812\n - type: recall_at_3\n value: 68.987\n - type: recall_at_5\n value: 78.158\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: mteb/quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 72.538\n - type: map_at_10\n value: 86.702\n - type: map_at_100\n value: 87.31\n - type: map_at_1000\n value: 87.323\n - type: map_at_3\n value: 83.87\n - type: map_at_5\n value: 85.682\n - type: mrr_at_1\n value: 83.31\n - type: mrr_at_10\n value: 89.225\n - type: mrr_at_100\n value: 89.30399999999999\n - type: mrr_at_1000\n value: 89.30399999999999\n - type: mrr_at_3\n value: 88.44300000000001\n - type: mrr_at_5\n value: 89.005\n - type: ndcg_at_1\n value: 83.32000000000001\n - type: ndcg_at_10\n value: 90.095\n - type: ndcg_at_100\n value: 91.12\n - type: ndcg_at_1000\n value: 91.179\n - type: ndcg_at_3\n value: 87.606\n - type: ndcg_at_5\n value: 89.031\n - type: precision_at_1\n value: 83.32000000000001\n - type: precision_at_10\n value: 13.641\n - type: precision_at_100\n value: 1.541\n - type: precision_at_1000\n value: 0.157\n - type: precision_at_3\n value: 38.377\n - type: precision_at_5\n value: 25.162000000000003\n - type: recall_at_1\n value: 72.538\n - type: recall_at_10\n value: 96.47200000000001\n - type: recall_at_100\n value: 99.785\n - type: recall_at_1000\n value: 99.99900000000001\n - type: recall_at_3\n value: 89.278\n - type: recall_at_5\n value: 93.367\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 73.55219145406065\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 74.13437105242755\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: mteb/scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 6.873\n - type: map_at_10\n value: 17.944\n - type: map_at_100\n value: 21.171\n - type: map_at_1000\n value: 21.528\n - type: map_at_3\n value: 12.415\n - type: map_at_5\n value: 15.187999999999999\n - type: mrr_at_1\n value: 33.800000000000004\n - type: mrr_at_10\n value: 46.455\n - type: mrr_at_100\n value: 47.378\n - type: mrr_at_1000\n value: 47.394999999999996\n - type: mrr_at_3\n value: 42.367\n - type: mrr_at_5\n value: 44.972\n - type: ndcg_at_1\n value: 33.800000000000004\n - type: ndcg_at_10\n value: 28.907\n - type: ndcg_at_100\n value: 39.695\n - type: ndcg_at_1000\n value: 44.582\n - type: ndcg_at_3\n value: 26.949\n - type: ndcg_at_5\n value: 23.988\n - type: precision_at_1\n value: 33.800000000000004\n - type: precision_at_10\n value: 15.079999999999998\n - type: precision_at_100\n value: 3.056\n - type: precision_at_1000\n value: 0.42100000000000004\n - type: precision_at_3\n value: 25.167\n - type: precision_at_5\n value: 21.26\n - type: recall_at_1\n value: 6.873\n - type: recall_at_10\n value: 30.568\n - type: recall_at_100\n value: 62.062\n - type: recall_at_1000\n value: 85.37700000000001\n - type: recall_at_3\n value: 15.312999999999999\n - type: recall_at_5\n value: 21.575\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 82.37009118256057\n - type: cos_sim_spearman\n value: 79.27986395671529\n - type: euclidean_pearson\n value: 79.18037715442115\n - type: euclidean_spearman\n value: 79.28004791561621\n - type: manhattan_pearson\n value: 79.34062972800541\n - type: manhattan_spearman\n value: 79.43106695543402\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 87.48474767383833\n - type: cos_sim_spearman\n value: 79.54505388752513\n - type: euclidean_pearson\n value: 83.43282704179565\n - type: euclidean_spearman\n value: 79.54579919925405\n - type: manhattan_pearson\n value: 83.77564492427952\n - type: manhattan_spearman\n value: 79.84558396989286\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 88.803698035802\n - type: cos_sim_spearman\n value: 88.83451367754881\n - type: euclidean_pearson\n value: 88.28939285711628\n - type: euclidean_spearman\n value: 88.83528996073112\n - type: manhattan_pearson\n value: 88.28017412671795\n - type: manhattan_spearman\n value: 88.9228828016344\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 85.27469288153428\n - type: cos_sim_spearman\n value: 83.87477064876288\n - type: euclidean_pearson\n value: 84.2601737035379\n - type: euclidean_spearman\n value: 83.87431082479074\n - type: manhattan_pearson\n value: 84.3621547772745\n - type: manhattan_spearman\n value: 84.12094375000423\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 88.12749863201587\n - type: cos_sim_spearman\n value: 88.54287568368565\n - type: euclidean_pearson\n value: 87.90429700607999\n - type: euclidean_spearman\n value: 88.5437689576261\n - type: manhattan_pearson\n value: 88.19276653356833\n - type: manhattan_spearman\n value: 88.99995393814679\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 85.68398747560902\n - type: cos_sim_spearman\n value: 86.48815303460574\n - type: euclidean_pearson\n value: 85.52356631237954\n - type: euclidean_spearman\n value: 86.486391949551\n - type: manhattan_pearson\n value: 85.67267981761788\n - type: manhattan_spearman\n value: 86.7073696332485\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 88.9057107443124\n - type: cos_sim_spearman\n value: 88.7312168757697\n - type: euclidean_pearson\n value: 88.72810439714794\n - type: euclidean_spearman\n value: 88.71976185854771\n - type: manhattan_pearson\n value: 88.50433745949111\n - type: manhattan_spearman\n value: 88.51726175544195\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 67.59391795109886\n - type: cos_sim_spearman\n value: 66.87613008631367\n - type: euclidean_pearson\n value: 69.23198488262217\n - type: euclidean_spearman\n value: 66.85427723013692\n - type: manhattan_pearson\n value: 69.50730124841084\n - type: manhattan_spearman\n value: 67.10404669820792\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 87.0820605344619\n - type: cos_sim_spearman\n value: 86.8518089863434\n - type: euclidean_pearson\n value: 86.31087134689284\n - type: euclidean_spearman\n value: 86.8518520517941\n - type: manhattan_pearson\n value: 86.47203796160612\n - type: manhattan_spearman\n value: 87.1080149734421\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 89.09255369305481\n - type: mrr\n value: 97.10323445617563\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: mteb/scifact\n config: default\n split: test\n revision: 0228b52cf27578f30900b9e5271d331663a030d7\n metrics:\n - type: map_at_1\n value: 61.260999999999996\n - type: map_at_10\n value: 74.043\n - type: map_at_100\n value: 74.37700000000001\n - type: map_at_1000\n value: 74.384\n - type: map_at_3\n value: 71.222\n - type: map_at_5\n value: 72.875\n - type: mrr_at_1\n value: 64.333\n - type: mrr_at_10\n value: 74.984\n - type: mrr_at_100\n value: 75.247\n - type: mrr_at_1000\n value: 75.25500000000001\n - type: mrr_at_3\n value: 73.167\n - type: mrr_at_5\n value: 74.35000000000001\n - type: ndcg_at_1\n value: 64.333\n - type: ndcg_at_10\n value: 79.06\n - type: ndcg_at_100\n value: 80.416\n - type: ndcg_at_1000\n value: 80.55600000000001\n - type: ndcg_at_3\n value: 74.753\n - type: ndcg_at_5\n value: 76.97500000000001\n - type: precision_at_1\n value: 64.333\n - type: precision_at_10\n value: 10.567\n - type: precision_at_100\n value: 1.1199999999999999\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_3\n value: 29.889\n - type: precision_at_5\n value: 19.533\n - type: recall_at_1\n value: 61.260999999999996\n - type: recall_at_10\n value: 93.167\n - type: recall_at_100\n value: 99.0\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 81.667\n - type: recall_at_5\n value: 87.394\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.71980198019801\n - type: cos_sim_ap\n value: 92.81616007802704\n - type: cos_sim_f1\n value: 85.17548454688318\n - type: cos_sim_precision\n value: 89.43894389438944\n - type: cos_sim_recall\n value: 81.3\n - type: dot_accuracy\n value: 99.71980198019801\n - type: dot_ap\n value: 92.81398760591358\n - type: dot_f1\n value: 85.17548454688318\n - type: dot_precision\n value: 89.43894389438944\n - type: dot_recall\n value: 81.3\n - type: euclidean_accuracy\n value: 99.71980198019801\n - type: euclidean_ap\n value: 92.81560637245072\n - type: euclidean_f1\n value: 85.17548454688318\n - type: euclidean_precision\n value: 89.43894389438944\n - type: euclidean_recall\n value: 81.3\n - type: manhattan_accuracy\n value: 99.73069306930694\n - type: manhattan_ap\n value: 93.14005487480794\n - type: manhattan_f1\n value: 85.56263269639068\n - type: manhattan_precision\n value: 91.17647058823529\n - type: manhattan_recall\n value: 80.60000000000001\n - type: max_accuracy\n value: 99.73069306930694\n - type: max_ap\n value: 93.14005487480794\n - type: max_f1\n value: 85.56263269639068\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 79.86443362395185\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 49.40897096662564\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 55.66040806627947\n - type: mrr\n value: 56.58670475766064\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 31.51015090598575\n - type: cos_sim_spearman\n value: 31.35016454939226\n - type: dot_pearson\n value: 31.5150068731\n - type: dot_spearman\n value: 31.34790869023487\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: mteb/trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.254\n - type: map_at_10\n value: 2.064\n - type: map_at_100\n value: 12.909\n - type: map_at_1000\n value: 31.761\n - type: map_at_3\n value: 0.738\n - type: map_at_5\n value: 1.155\n - type: mrr_at_1\n value: 96.0\n - type: mrr_at_10\n value: 98.0\n - type: mrr_at_100\n value: 98.0\n - type: mrr_at_1000\n value: 98.0\n - type: mrr_at_3\n value: 98.0\n - type: mrr_at_5\n value: 98.0\n - type: ndcg_at_1\n value: 93.0\n - type: ndcg_at_10\n value: 82.258\n - type: ndcg_at_100\n value: 64.34\n - type: ndcg_at_1000\n value: 57.912\n - type: ndcg_at_3\n value: 90.827\n - type: ndcg_at_5\n value: 86.79\n - type: precision_at_1\n value: 96.0\n - type: precision_at_10\n value: 84.8\n - type: precision_at_100\n value: 66.0\n - type: precision_at_1000\n value: 25.356\n - type: precision_at_3\n value: 94.667\n - type: precision_at_5\n value: 90.4\n - type: recall_at_1\n value: 0.254\n - type: recall_at_10\n value: 2.1950000000000003\n - type: recall_at_100\n value: 16.088\n - type: recall_at_1000\n value: 54.559000000000005\n - type: recall_at_3\n value: 0.75\n - type: recall_at_5\n value: 1.191\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: mteb/touche2020\n config: default\n split: test\n revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f\n metrics:\n - type: map_at_1\n value: 2.976\n - type: map_at_10\n value: 11.389000000000001\n - type: map_at_100\n value: 18.429000000000002\n - type: map_at_1000\n value: 20.113\n - type: map_at_3\n value: 6.483\n - type: map_at_5\n value: 8.770999999999999\n - type: mrr_at_1\n value: 40.816\n - type: mrr_at_10\n value: 58.118\n - type: mrr_at_100\n value: 58.489999999999995\n - type: mrr_at_1000\n value: 58.489999999999995\n - type: mrr_at_3\n value: 53.061\n - type: mrr_at_5\n value: 57.041\n - type: ndcg_at_1\n value: 40.816\n - type: ndcg_at_10\n value: 30.567\n - type: ndcg_at_100\n value: 42.44\n - type: ndcg_at_1000\n value: 53.480000000000004\n - type: ndcg_at_3\n value: 36.016\n - type: ndcg_at_5\n value: 34.257\n - type: precision_at_1\n value: 42.857\n - type: precision_at_10\n value: 25.714\n - type: precision_at_100\n value: 8.429\n - type: precision_at_1000\n value: 1.5939999999999999\n - type: precision_at_3\n value: 36.735\n - type: precision_at_5\n value: 33.878\n - type: recall_at_1\n value: 2.976\n - type: recall_at_10\n value: 17.854999999999997\n - type: recall_at_100\n value: 51.833\n - type: recall_at_1000\n value: 86.223\n - type: recall_at_3\n value: 7.887\n - type: recall_at_5\n value: 12.026\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 85.1174\n - type: ap\n value: 30.169441069345748\n - type: f1\n value: 69.79254701873245\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 72.58347481607245\n - type: f1\n value: 72.74877295564937\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 53.90586138221305\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 87.35769207844072\n - type: cos_sim_ap\n value: 77.9645072410354\n - type: cos_sim_f1\n value: 71.32352941176471\n - type: cos_sim_precision\n value: 66.5903890160183\n - type: cos_sim_recall\n value: 76.78100263852242\n - type: dot_accuracy\n value: 87.37557370209214\n - type: dot_ap\n value: 77.96250046429908\n - type: dot_f1\n value: 71.28932757557064\n - type: dot_precision\n value: 66.95249130938586\n - type: dot_recall\n value: 76.22691292875989\n - type: euclidean_accuracy\n value: 87.35173153722357\n - type: euclidean_ap\n value: 77.96520460741593\n - type: euclidean_f1\n value: 71.32470733210104\n - type: euclidean_precision\n value: 66.91329479768785\n - type: euclidean_recall\n value: 76.35883905013192\n - type: manhattan_accuracy\n value: 87.25636287774931\n - type: manhattan_ap\n value: 77.77752485611796\n - type: manhattan_f1\n value: 71.18148599269183\n - type: manhattan_precision\n value: 66.10859728506787\n - type: manhattan_recall\n value: 77.0976253298153\n - type: max_accuracy\n value: 87.37557370209214\n - type: max_ap\n value: 77.96520460741593\n - type: max_f1\n value: 71.32470733210104\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 89.38176737687739\n - type: cos_sim_ap\n value: 86.58811861657401\n - type: cos_sim_f1\n value: 79.09430644097604\n - type: cos_sim_precision\n value: 75.45085977911366\n - type: cos_sim_recall\n value: 83.10748383122882\n - type: dot_accuracy\n value: 89.38370784336554\n - type: dot_ap\n value: 86.58840606004333\n - type: dot_f1\n value: 79.10179860068133\n - type: dot_precision\n value: 75.44546153308643\n - type: dot_recall\n value: 83.13058207576223\n - type: euclidean_accuracy\n value: 89.38564830985369\n - type: euclidean_ap\n value: 86.58820721061164\n - type: euclidean_f1\n value: 79.09070942235888\n - type: euclidean_precision\n value: 75.38729937194697\n - type: euclidean_recall\n value: 83.17677856482906\n - type: manhattan_accuracy\n value: 89.40699344122326\n - type: manhattan_ap\n value: 86.60631843011362\n - type: manhattan_f1\n value: 79.14949970570925\n - type: manhattan_precision\n value: 75.78191039729502\n - type: manhattan_recall\n value: 82.83030489682784\n - type: max_accuracy\n value: 89.40699344122326\n - type: max_ap\n value: 86.60631843011362\n - type: max_f1\n value: 79.14949970570925\n - task:\n type: STS\n dataset:\n name: MTEB AFQMC\n type: C-MTEB/AFQMC\n config: default\n split: validation\n revision: b44c3b011063adb25877c13823db83bb193913c4\n metrics:\n - type: cos_sim_pearson\n value: 65.58442135663871\n - type: cos_sim_spearman\n value: 72.2538631361313\n - type: euclidean_pearson\n value: 70.97255486607429\n - type: euclidean_spearman\n value: 72.25374250228647\n - type: manhattan_pearson\n value: 70.83250199989911\n - type: manhattan_spearman\n value: 72.14819496536272\n - task:\n type: STS\n dataset:\n name: MTEB ATEC\n type: C-MTEB/ATEC\n config: default\n split: test\n revision: 0f319b1142f28d00e055a6770f3f726ae9b7d865\n metrics:\n - type: cos_sim_pearson\n value: 59.99478404929932\n - type: cos_sim_spearman\n value: 62.61836216999812\n - type: euclidean_pearson\n value: 66.86429811933593\n - type: euclidean_spearman\n value: 62.6183520374191\n - type: manhattan_pearson\n value: 66.8063778911633\n - type: manhattan_spearman\n value: 62.569607573241115\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (zh)\n type: mteb/amazon_reviews_multi\n config: zh\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 53.98400000000001\n - type: f1\n value: 51.21447361350723\n - task:\n type: STS\n dataset:\n name: MTEB BQ\n type: C-MTEB/BQ\n config: default\n split: test\n revision: e3dda5e115e487b39ec7e618c0c6a29137052a55\n metrics:\n - type: cos_sim_pearson\n value: 79.11941660686553\n - type: cos_sim_spearman\n value: 81.25029594540435\n - type: euclidean_pearson\n value: 82.06973504238826\n - type: euclidean_spearman\n value: 81.2501989488524\n - type: manhattan_pearson\n value: 82.10094630392753\n - type: manhattan_spearman\n value: 81.27987244392389\n - task:\n type: Clustering\n dataset:\n name: MTEB CLSClusteringP2P\n type: C-MTEB/CLSClusteringP2P\n config: default\n split: test\n revision: 4b6227591c6c1a73bc76b1055f3b7f3588e72476\n metrics:\n - type: v_measure\n value: 47.07270168705156\n - task:\n type: Clustering\n dataset:\n name: MTEB CLSClusteringS2S\n type: C-MTEB/CLSClusteringS2S\n config: default\n split: test\n revision: e458b3f5414b62b7f9f83499ac1f5497ae2e869f\n metrics:\n - type: v_measure\n value: 45.98511703185043\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv1\n type: C-MTEB/CMedQAv1-reranking\n config: default\n split: test\n revision: 8d7f1e942507dac42dc58017c1a001c3717da7df\n metrics:\n - type: map\n value: 88.19895157194931\n - type: mrr\n value: 90.21424603174603\n - task:\n type: Reranking\n dataset:\n name: MTEB CMedQAv2\n type: C-MTEB/CMedQAv2-reranking\n config: default\n split: test\n revision: 23d186750531a14a0357ca22cd92d712fd512ea0\n metrics:\n - type: map\n value: 88.03317320980119\n - type: mrr\n value: 89.9461507936508\n - task:\n type: Retrieval\n dataset:\n name: MTEB CmedqaRetrieval\n type: C-MTEB/CmedqaRetrieval\n config: default\n split: dev\n revision: cd540c506dae1cf9e9a59c3e06f42030d54e7301\n metrics:\n - type: map_at_1\n value: 29.037000000000003\n - type: map_at_10\n value: 42.001\n - type: map_at_100\n value: 43.773\n - type: map_at_1000\n value: 43.878\n - type: map_at_3\n value: 37.637\n - type: map_at_5\n value: 40.034\n - type: mrr_at_1\n value: 43.136\n - type: mrr_at_10\n value: 51.158\n - type: mrr_at_100\n value: 52.083\n - type: mrr_at_1000\n value: 52.12\n - type: mrr_at_3\n value: 48.733\n - type: mrr_at_5\n value: 50.025\n - type: ndcg_at_1\n value: 43.136\n - type: ndcg_at_10\n value: 48.685\n - type: ndcg_at_100\n value: 55.513\n - type: ndcg_at_1000\n value: 57.242000000000004\n - type: ndcg_at_3\n value: 43.329\n - type: ndcg_at_5\n value: 45.438\n - type: precision_at_1\n value: 43.136\n - type: precision_at_10\n value: 10.56\n - type: precision_at_100\n value: 1.6129999999999998\n - type: precision_at_1000\n value: 0.184\n - type: precision_at_3\n value: 24.064\n - type: precision_at_5\n value: 17.269000000000002\n - type: recall_at_1\n value: 29.037000000000003\n - type: recall_at_10\n value: 59.245000000000005\n - type: recall_at_100\n value: 87.355\n - type: recall_at_1000\n value: 98.74000000000001\n - type: recall_at_3\n value: 42.99\n - type: recall_at_5\n value: 49.681999999999995\n - task:\n type: PairClassification\n dataset:\n name: MTEB Cmnli\n type: C-MTEB/CMNLI\n config: default\n split: validation\n revision: 41bc36f332156f7adc9e38f53777c959b2ae9766\n metrics:\n - type: cos_sim_accuracy\n value: 82.68190018039687\n - type: cos_sim_ap\n value: 90.18017125327886\n - type: cos_sim_f1\n value: 83.64080906868193\n - type: cos_sim_precision\n value: 79.7076890489303\n - type: cos_sim_recall\n value: 87.98223053542202\n - type: dot_accuracy\n value: 82.68190018039687\n - type: dot_ap\n value: 90.18782350103646\n - type: dot_f1\n value: 83.64242087729039\n - type: dot_precision\n value: 79.65313028764805\n - type: dot_recall\n value: 88.05237315875614\n - type: euclidean_accuracy\n value: 82.68190018039687\n - type: euclidean_ap\n value: 90.1801957900632\n - type: euclidean_f1\n value: 83.63636363636364\n - type: euclidean_precision\n value: 79.52772506852203\n - type: euclidean_recall\n value: 88.19265840542437\n - type: manhattan_accuracy\n value: 82.14070956103427\n - type: manhattan_ap\n value: 89.96178420101427\n - type: manhattan_f1\n value: 83.21087838578791\n - type: manhattan_precision\n value: 78.35605121850475\n - type: manhattan_recall\n value: 88.70703764320785\n - type: max_accuracy\n value: 82.68190018039687\n - type: max_ap\n value: 90.18782350103646\n - type: max_f1\n value: 83.64242087729039\n - task:\n type: Retrieval\n dataset:\n name: MTEB CovidRetrieval\n type: C-MTEB/CovidRetrieval\n config: default\n split: dev\n revision: 1271c7809071a13532e05f25fb53511ffce77117\n metrics:\n - type: map_at_1\n value: 72.234\n - type: map_at_10\n value: 80.10000000000001\n - type: map_at_100\n value: 80.36\n - type: map_at_1000\n value: 80.363\n - type: map_at_3\n value: 78.315\n - type: map_at_5\n value: 79.607\n - type: mrr_at_1\n value: 72.392\n - type: mrr_at_10\n value: 80.117\n - type: mrr_at_100\n value: 80.36999999999999\n - type: mrr_at_1000\n value: 80.373\n - type: mrr_at_3\n value: 78.469\n - type: mrr_at_5\n value: 79.633\n - type: ndcg_at_1\n value: 72.392\n - type: ndcg_at_10\n value: 83.651\n - type: ndcg_at_100\n value: 84.749\n - type: ndcg_at_1000\n value: 84.83000000000001\n - type: ndcg_at_3\n value: 80.253\n - type: ndcg_at_5\n value: 82.485\n - type: precision_at_1\n value: 72.392\n - type: precision_at_10\n value: 9.557\n - type: precision_at_100\n value: 1.004\n - type: precision_at_1000\n value: 0.101\n - type: precision_at_3\n value: 28.732000000000003\n - type: precision_at_5\n value: 18.377\n - type: recall_at_1\n value: 72.234\n - type: recall_at_10\n value: 94.573\n - type: recall_at_100\n value: 99.368\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 85.669\n - type: recall_at_5\n value: 91.01700000000001\n - task:\n type: Retrieval\n dataset:\n name: MTEB DuRetrieval\n type: C-MTEB/DuRetrieval\n config: default\n split: dev\n revision: a1a333e290fe30b10f3f56498e3a0d911a693ced\n metrics:\n - type: map_at_1\n value: 26.173999999999996\n - type: map_at_10\n value: 80.04\n - type: map_at_100\n value: 82.94500000000001\n - type: map_at_1000\n value: 82.98100000000001\n - type: map_at_3\n value: 55.562999999999995\n - type: map_at_5\n value: 69.89800000000001\n - type: mrr_at_1\n value: 89.5\n - type: mrr_at_10\n value: 92.996\n - type: mrr_at_100\n value: 93.06400000000001\n - type: mrr_at_1000\n value: 93.065\n - type: mrr_at_3\n value: 92.658\n - type: mrr_at_5\n value: 92.84599999999999\n - type: ndcg_at_1\n value: 89.5\n - type: ndcg_at_10\n value: 87.443\n - type: ndcg_at_100\n value: 90.253\n - type: ndcg_at_1000\n value: 90.549\n - type: ndcg_at_3\n value: 85.874\n - type: ndcg_at_5\n value: 84.842\n - type: precision_at_1\n value: 89.5\n - type: precision_at_10\n value: 41.805\n - type: precision_at_100\n value: 4.827\n - type: precision_at_1000\n value: 0.49\n - type: precision_at_3\n value: 76.85\n - type: precision_at_5\n value: 64.8\n - type: recall_at_1\n value: 26.173999999999996\n - type: recall_at_10\n value: 89.101\n - type: recall_at_100\n value: 98.08099999999999\n - type: recall_at_1000\n value: 99.529\n - type: recall_at_3\n value: 57.902\n - type: recall_at_5\n value: 74.602\n - task:\n type: Retrieval\n dataset:\n name: MTEB EcomRetrieval\n type: C-MTEB/EcomRetrieval\n config: default\n split: dev\n revision: 687de13dc7294d6fd9be10c6945f9e8fec8166b9\n metrics:\n - type: map_at_1\n value: 56.10000000000001\n - type: map_at_10\n value: 66.15299999999999\n - type: map_at_100\n value: 66.625\n - type: map_at_1000\n value: 66.636\n - type: map_at_3\n value: 63.632999999999996\n - type: map_at_5\n value: 65.293\n - type: mrr_at_1\n value: 56.10000000000001\n - type: mrr_at_10\n value: 66.15299999999999\n - type: mrr_at_100\n value: 66.625\n - type: mrr_at_1000\n value: 66.636\n - type: mrr_at_3\n value: 63.632999999999996\n - type: mrr_at_5\n value: 65.293\n - type: ndcg_at_1\n value: 56.10000000000001\n - type: ndcg_at_10\n value: 71.146\n - type: ndcg_at_100\n value: 73.27799999999999\n - type: ndcg_at_1000\n value: 73.529\n - type: ndcg_at_3\n value: 66.09\n - type: ndcg_at_5\n value: 69.08999999999999\n - type: precision_at_1\n value: 56.10000000000001\n - type: precision_at_10\n value: 8.68\n - type: precision_at_100\n value: 0.964\n - type: precision_at_1000\n value: 0.098\n - type: precision_at_3\n value: 24.4\n - type: precision_at_5\n value: 16.1\n - type: recall_at_1\n value: 56.10000000000001\n - type: recall_at_10\n value: 86.8\n - type: recall_at_100\n value: 96.39999999999999\n - type: recall_at_1000\n value: 98.3\n - type: recall_at_3\n value: 73.2\n - type: recall_at_5\n value: 80.5\n - task:\n type: Classification\n dataset:\n name: MTEB IFlyTek\n type: C-MTEB/IFlyTek-classification\n config: default\n split: validation\n revision: 421605374b29664c5fc098418fe20ada9bd55f8a\n metrics:\n - type: accuracy\n value: 54.52096960369373\n - type: f1\n value: 40.930845295808695\n - task:\n type: Classification\n dataset:\n name: MTEB JDReview\n type: C-MTEB/JDReview-classification\n config: default\n split: test\n revision: b7c64bd89eb87f8ded463478346f76731f07bf8b\n metrics:\n - type: accuracy\n value: 86.51031894934334\n - type: ap\n value: 55.9516014323483\n - type: f1\n value: 81.54813679326381\n - task:\n type: STS\n dataset:\n name: MTEB LCQMC\n type: C-MTEB/LCQMC\n config: default\n split: test\n revision: 17f9b096f80380fce5ed12a9be8be7784b337daf\n metrics:\n - type: cos_sim_pearson\n value: 69.67437838574276\n - type: cos_sim_spearman\n value: 73.81314174653045\n - type: euclidean_pearson\n value: 72.63430276680275\n - type: euclidean_spearman\n value: 73.81358736777001\n - type: manhattan_pearson\n value: 72.58743833842829\n - type: manhattan_spearman\n value: 73.7590419009179\n - task:\n type: Reranking\n dataset:\n name: MTEB MMarcoReranking\n type: C-MTEB/Mmarco-reranking\n config: default\n split: dev\n revision: None\n metrics:\n - type: map\n value: 31.648613483640254\n - type: mrr\n value: 30.37420634920635\n - task:\n type: Retrieval\n dataset:\n name: MTEB MMarcoRetrieval\n type: C-MTEB/MMarcoRetrieval\n config: default\n split: dev\n revision: 539bbde593d947e2a124ba72651aafc09eb33fc2\n metrics:\n - type: map_at_1\n value: 73.28099999999999\n - type: map_at_10\n value: 81.977\n - type: map_at_100\n value: 82.222\n - type: map_at_1000\n value: 82.22699999999999\n - type: map_at_3\n value: 80.441\n - type: map_at_5\n value: 81.46600000000001\n - type: mrr_at_1\n value: 75.673\n - type: mrr_at_10\n value: 82.41000000000001\n - type: mrr_at_100\n value: 82.616\n - type: mrr_at_1000\n value: 82.621\n - type: mrr_at_3\n value: 81.094\n - type: mrr_at_5\n value: 81.962\n - type: ndcg_at_1\n value: 75.673\n - type: ndcg_at_10\n value: 85.15599999999999\n - type: ndcg_at_100\n value: 86.151\n - type: ndcg_at_1000\n value: 86.26899999999999\n - type: ndcg_at_3\n value: 82.304\n - type: ndcg_at_5\n value: 84.009\n - type: precision_at_1\n value: 75.673\n - type: precision_at_10\n value: 10.042\n - type: precision_at_100\n value: 1.052\n - type: precision_at_1000\n value: 0.106\n - type: precision_at_3\n value: 30.673000000000002\n - type: precision_at_5\n value: 19.326999999999998\n - type: recall_at_1\n value: 73.28099999999999\n - type: recall_at_10\n value: 94.446\n - type: recall_at_100\n value: 98.737\n - type: recall_at_1000\n value: 99.649\n - type: recall_at_3\n value: 86.984\n - type: recall_at_5\n value: 91.024\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (zh-CN)\n type: mteb/amazon_massive_intent\n config: zh-CN\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 81.08607935440484\n - type: f1\n value: 78.24879986066307\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (zh-CN)\n type: mteb/amazon_massive_scenario\n config: zh-CN\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 86.05917955615332\n - type: f1\n value: 85.05279279434997\n - task:\n type: Retrieval\n dataset:\n name: MTEB MedicalRetrieval\n type: C-MTEB/MedicalRetrieval\n config: default\n split: dev\n revision: 2039188fb5800a9803ba5048df7b76e6fb151fc6\n metrics:\n - type: map_at_1\n value: 56.2\n - type: map_at_10\n value: 62.57899999999999\n - type: map_at_100\n value: 63.154999999999994\n - type: map_at_1000\n value: 63.193\n - type: map_at_3\n value: 61.217\n - type: map_at_5\n value: 62.012\n - type: mrr_at_1\n value: 56.3\n - type: mrr_at_10\n value: 62.629000000000005\n - type: mrr_at_100\n value: 63.205999999999996\n - type: mrr_at_1000\n value: 63.244\n - type: mrr_at_3\n value: 61.267\n - type: mrr_at_5\n value: 62.062\n - type: ndcg_at_1\n value: 56.2\n - type: ndcg_at_10\n value: 65.592\n - type: ndcg_at_100\n value: 68.657\n - type: ndcg_at_1000\n value: 69.671\n - type: ndcg_at_3\n value: 62.808\n - type: ndcg_at_5\n value: 64.24499999999999\n - type: precision_at_1\n value: 56.2\n - type: precision_at_10\n value: 7.5\n - type: precision_at_100\n value: 0.899\n - type: precision_at_1000\n value: 0.098\n - type: precision_at_3\n value: 22.467000000000002\n - type: precision_at_5\n value: 14.180000000000001\n - type: recall_at_1\n value: 56.2\n - type: recall_at_10\n value: 75.0\n - type: recall_at_100\n value: 89.9\n - type: recall_at_1000\n value: 97.89999999999999\n - type: recall_at_3\n value: 67.4\n - type: recall_at_5\n value: 70.89999999999999\n - task:\n type: Classification\n dataset:\n name: MTEB MultilingualSentiment\n type: C-MTEB/MultilingualSentiment-classification\n config: default\n split: validation\n revision: 46958b007a63fdbf239b7672c25d0bea67b5ea1a\n metrics:\n - type: accuracy\n value: 76.87666666666667\n - type: f1\n value: 76.7317686219665\n - task:\n type: PairClassification\n dataset:\n name: MTEB Ocnli\n type: C-MTEB/OCNLI\n config: default\n split: validation\n revision: 66e76a618a34d6d565d5538088562851e6daa7ec\n metrics:\n - type: cos_sim_accuracy\n value: 79.64266377910124\n - type: cos_sim_ap\n value: 84.78274442344829\n - type: cos_sim_f1\n value: 81.16947472745292\n - type: cos_sim_precision\n value: 76.47058823529412\n - type: cos_sim_recall\n value: 86.48363252375924\n - type: dot_accuracy\n value: 79.64266377910124\n - type: dot_ap\n value: 84.7851404063692\n - type: dot_f1\n value: 81.16947472745292\n - type: dot_precision\n value: 76.47058823529412\n - type: dot_recall\n value: 86.48363252375924\n - type: euclidean_accuracy\n value: 79.64266377910124\n - type: euclidean_ap\n value: 84.78068373762378\n - type: euclidean_f1\n value: 81.14794656110837\n - type: euclidean_precision\n value: 76.35009310986965\n - type: euclidean_recall\n value: 86.58922914466737\n - type: manhattan_accuracy\n value: 79.48023822414727\n - type: manhattan_ap\n value: 84.72928897427576\n - type: manhattan_f1\n value: 81.32084770823064\n - type: manhattan_precision\n value: 76.24768946395564\n - type: manhattan_recall\n value: 87.11721224920802\n - type: max_accuracy\n value: 79.64266377910124\n - type: max_ap\n value: 84.7851404063692\n - type: max_f1\n value: 81.32084770823064\n - task:\n type: Classification\n dataset:\n name: MTEB OnlineShopping\n type: C-MTEB/OnlineShopping-classification\n config: default\n split: test\n revision: e610f2ebd179a8fda30ae534c3878750a96db120\n metrics:\n - type: accuracy\n value: 94.3\n - type: ap\n value: 92.8664032274438\n - type: f1\n value: 94.29311102997727\n - task:\n type: STS\n dataset:\n name: MTEB PAWSX\n type: C-MTEB/PAWSX\n config: default\n split: test\n revision: 9c6a90e430ac22b5779fb019a23e820b11a8b5e1\n metrics:\n - type: cos_sim_pearson\n value: 48.51392279882909\n - type: cos_sim_spearman\n value: 54.06338895994974\n - type: euclidean_pearson\n value: 52.58480559573412\n - type: euclidean_spearman\n value: 54.06417276612201\n - type: manhattan_pearson\n value: 52.69525121721343\n - type: manhattan_spearman\n value: 54.048147455389675\n - task:\n type: STS\n dataset:\n name: MTEB QBQTC\n type: C-MTEB/QBQTC\n config: default\n split: test\n revision: 790b0510dc52b1553e8c49f3d2afb48c0e5c48b7\n metrics:\n - type: cos_sim_pearson\n value: 29.728387290757325\n - type: cos_sim_spearman\n value: 31.366121633635284\n - type: euclidean_pearson\n value: 29.14588368552961\n - type: euclidean_spearman\n value: 31.36764411112844\n - type: manhattan_pearson\n value: 29.63517350523121\n - type: manhattan_spearman\n value: 31.94157020583762\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (zh)\n type: mteb/sts22-crosslingual-sts\n config: zh\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 63.64868296271406\n - type: cos_sim_spearman\n value: 66.12800618164744\n - type: euclidean_pearson\n value: 63.21405767340238\n - type: euclidean_spearman\n value: 66.12786567790748\n - type: manhattan_pearson\n value: 64.04300276525848\n - type: manhattan_spearman\n value: 66.5066857145652\n - task:\n type: STS\n dataset:\n name: MTEB STSB\n type: C-MTEB/STSB\n config: default\n split: test\n revision: 0cde68302b3541bb8b3c340dc0644b0b745b3dc0\n metrics:\n - type: cos_sim_pearson\n value: 81.2302623912794\n - type: cos_sim_spearman\n value: 81.16833673266562\n - type: euclidean_pearson\n value: 79.47647843876024\n - type: euclidean_spearman\n value: 81.16944349524972\n - type: manhattan_pearson\n value: 79.84947238492208\n - type: manhattan_spearman\n value: 81.64626599410026\n - task:\n type: Reranking\n dataset:\n name: MTEB T2Reranking\n type: C-MTEB/T2Reranking\n config: default\n split: dev\n revision: 76631901a18387f85eaa53e5450019b87ad58ef9\n metrics:\n - type: map\n value: 67.80129586475687\n - type: mrr\n value: 77.77402311635554\n - task:\n type: Retrieval\n dataset:\n name: MTEB T2Retrieval\n type: C-MTEB/T2Retrieval\n config: default\n split: dev\n revision: 8731a845f1bf500a4f111cf1070785c793d10e64\n metrics:\n - type: map_at_1\n value: 28.666999999999998\n - type: map_at_10\n value: 81.063\n - type: map_at_100\n value: 84.504\n - type: map_at_1000\n value: 84.552\n - type: map_at_3\n value: 56.897\n - type: map_at_5\n value: 70.073\n - type: mrr_at_1\n value: 92.087\n - type: mrr_at_10\n value: 94.132\n - type: mrr_at_100\n value: 94.19800000000001\n - type: mrr_at_1000\n value: 94.19999999999999\n - type: mrr_at_3\n value: 93.78999999999999\n - type: mrr_at_5\n value: 94.002\n - type: ndcg_at_1\n value: 92.087\n - type: ndcg_at_10\n value: 87.734\n - type: ndcg_at_100\n value: 90.736\n - type: ndcg_at_1000\n value: 91.184\n - type: ndcg_at_3\n value: 88.78\n - type: ndcg_at_5\n value: 87.676\n - type: precision_at_1\n value: 92.087\n - type: precision_at_10\n value: 43.46\n - type: precision_at_100\n value: 5.07\n - type: precision_at_1000\n value: 0.518\n - type: precision_at_3\n value: 77.49000000000001\n - type: precision_at_5\n value: 65.194\n - type: recall_at_1\n value: 28.666999999999998\n - type: recall_at_10\n value: 86.632\n - type: recall_at_100\n value: 96.646\n - type: recall_at_1000\n value: 98.917\n - type: recall_at_3\n value: 58.333999999999996\n - type: recall_at_5\n value: 72.974\n - task:\n type: Classification\n dataset:\n name: MTEB TNews\n type: C-MTEB/TNews-classification\n config: default\n split: validation\n revision: 317f262bf1e6126357bbe89e875451e4b0938fe4\n metrics:\n - type: accuracy\n value: 52.971999999999994\n - type: f1\n value: 50.2898280984929\n - task:\n type: Clustering\n dataset:\n name: MTEB ThuNewsClusteringP2P\n type: C-MTEB/ThuNewsClusteringP2P\n config: default\n split: test\n revision: 5798586b105c0434e4f0fe5e767abe619442cf93\n metrics:\n - type: v_measure\n value: 86.0797948663824\n - task:\n type: Clustering\n dataset:\n name: MTEB ThuNewsClusteringS2S\n type: C-MTEB/ThuNewsClusteringS2S\n config: default\n split: test\n revision: 8a8b2caeda43f39e13c4bc5bea0f8a667896e10d\n metrics:\n - type: v_measure\n value: 85.10759092255017\n - task:\n type: Retrieval\n dataset:\n name: MTEB VideoRetrieval\n type: C-MTEB/VideoRetrieval\n config: default\n split: dev\n revision: 58c2597a5943a2ba48f4668c3b90d796283c5639\n metrics:\n - type: map_at_1\n value: 65.60000000000001\n - type: map_at_10\n value: 74.773\n - type: map_at_100\n value: 75.128\n - type: map_at_1000\n value: 75.136\n - type: map_at_3\n value: 73.05\n - type: map_at_5\n value: 74.13499999999999\n - type: mrr_at_1\n value: 65.60000000000001\n - type: mrr_at_10\n value: 74.773\n - type: mrr_at_100\n value: 75.128\n - type: mrr_at_1000\n value: 75.136\n - type: mrr_at_3\n value: 73.05\n - type: mrr_at_5\n value: 74.13499999999999\n - type: ndcg_at_1\n value: 65.60000000000001\n - type: ndcg_at_10\n value: 78.84299999999999\n - type: ndcg_at_100\n value: 80.40899999999999\n - type: ndcg_at_1000\n value: 80.57\n - type: ndcg_at_3\n value: 75.40599999999999\n - type: ndcg_at_5\n value: 77.351\n - type: precision_at_1\n value: 65.60000000000001\n - type: precision_at_10\n value: 9.139999999999999\n - type: precision_at_100\n value: 0.984\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 27.400000000000002\n - type: precision_at_5\n value: 17.380000000000003\n - type: recall_at_1\n value: 65.60000000000001\n - type: recall_at_10\n value: 91.4\n - type: recall_at_100\n value: 98.4\n - type: recall_at_1000\n value: 99.6\n - type: recall_at_3\n value: 82.19999999999999\n - type: recall_at_5\n value: 86.9\n - task:\n type: Classification\n dataset:\n name: MTEB Waimai\n type: C-MTEB/waimai-classification\n config: default\n split: test\n revision: 339287def212450dcaa9df8c22bf93e9980c7023\n metrics:\n - type: accuracy\n value: 89.47\n - type: ap\n value: 75.59561751845389\n - type: f1\n value: 87.95207751382563\n - task:\n type: Clustering\n dataset:\n name: MTEB AlloProfClusteringP2P\n type: lyon-nlp/alloprof\n config: default\n split: test\n revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b\n metrics:\n - type: v_measure\n value: 76.05592323841036\n - type: v_measure\n value: 64.51718058866508\n - task:\n type: Reranking\n dataset:\n name: MTEB AlloprofReranking\n type: lyon-nlp/mteb-fr-reranking-alloprof-s2p\n config: default\n split: test\n revision: 666fdacebe0291776e86f29345663dfaf80a0db9\n metrics:\n - type: map\n value: 73.08278490943373\n - type: mrr\n value: 74.66561454570449\n - task:\n type: Retrieval\n dataset:\n name: MTEB AlloprofRetrieval\n type: lyon-nlp/alloprof\n config: default\n split: test\n revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b\n metrics:\n - type: map_at_1\n value: 38.912\n - type: map_at_10\n value: 52.437999999999995\n - type: map_at_100\n value: 53.38\n - type: map_at_1000\n value: 53.427\n - type: map_at_3\n value: 48.879\n - type: map_at_5\n value: 50.934000000000005\n - type: mrr_at_1\n value: 44.085\n - type: mrr_at_10\n value: 55.337\n - type: mrr_at_100\n value: 56.016999999999996\n - type: mrr_at_1000\n value: 56.043\n - type: mrr_at_3\n value: 52.55499999999999\n - type: mrr_at_5\n value: 54.20399999999999\n - type: ndcg_at_1\n value: 44.085\n - type: ndcg_at_10\n value: 58.876\n - type: ndcg_at_100\n value: 62.714000000000006\n - type: ndcg_at_1000\n value: 63.721000000000004\n - type: ndcg_at_3\n value: 52.444\n - type: ndcg_at_5\n value: 55.692\n - type: precision_at_1\n value: 44.085\n - type: precision_at_10\n value: 9.21\n - type: precision_at_100\n value: 1.164\n - type: precision_at_1000\n value: 0.128\n - type: precision_at_3\n value: 23.043\n - type: precision_at_5\n value: 15.898000000000001\n - type: recall_at_1\n value: 38.912\n - type: recall_at_10\n value: 75.577\n - type: recall_at_100\n value: 92.038\n - type: recall_at_1000\n value: 99.325\n - type: recall_at_3\n value: 58.592\n - type: recall_at_5\n value: 66.235\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (fr)\n type: mteb/amazon_reviews_multi\n config: fr\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 55.532000000000004\n - type: f1\n value: 52.5783943471605\n - task:\n type: Retrieval\n dataset:\n name: MTEB BSARDRetrieval\n type: maastrichtlawtech/bsard\n config: default\n split: test\n revision: 5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59\n metrics:\n - type: map_at_1\n value: 8.108\n - type: map_at_10\n value: 14.710999999999999\n - type: map_at_100\n value: 15.891\n - type: map_at_1000\n value: 15.983\n - type: map_at_3\n value: 12.237\n - type: map_at_5\n value: 13.679\n - type: mrr_at_1\n value: 8.108\n - type: mrr_at_10\n value: 14.710999999999999\n - type: mrr_at_100\n value: 15.891\n - type: mrr_at_1000\n value: 15.983\n - type: mrr_at_3\n value: 12.237\n - type: mrr_at_5\n value: 13.679\n - type: ndcg_at_1\n value: 8.108\n - type: ndcg_at_10\n value: 18.796\n - type: ndcg_at_100\n value: 25.098\n - type: ndcg_at_1000\n value: 27.951999999999998\n - type: ndcg_at_3\n value: 13.712\n - type: ndcg_at_5\n value: 16.309\n - type: precision_at_1\n value: 8.108\n - type: precision_at_10\n value: 3.198\n - type: precision_at_100\n value: 0.626\n - type: precision_at_1000\n value: 0.086\n - type: precision_at_3\n value: 6.006\n - type: precision_at_5\n value: 4.865\n - type: recall_at_1\n value: 8.108\n - type: recall_at_10\n value: 31.982\n - type: recall_at_100\n value: 62.613\n - type: recall_at_1000\n value: 86.036\n - type: recall_at_3\n value: 18.018\n - type: recall_at_5\n value: 24.324\n - task:\n type: Clustering\n dataset:\n name: MTEB HALClusteringS2S\n type: lyon-nlp/clustering-hal-s2s\n config: default\n split: test\n revision: e06ebbbb123f8144bef1a5d18796f3dec9ae2915\n metrics:\n - type: v_measure\n value: 30.833269778867116\n - task:\n type: Clustering\n dataset:\n name: MTEB MLSUMClusteringP2P\n type: mlsum\n config: default\n split: test\n revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7\n metrics:\n - type: v_measure\n value: 50.0281928004713\n - type: v_measure\n value: 43.699961510636534\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (fr)\n type: mteb/mtop_domain\n config: fr\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 96.68963357344191\n - type: f1\n value: 96.45175170820961\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (fr)\n type: mteb/mtop_intent\n config: fr\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 87.46946445349202\n - type: f1\n value: 65.79860440988624\n - task:\n type: Classification\n dataset:\n name: MTEB MasakhaNEWSClassification (fra)\n type: masakhane/masakhanews\n config: fra\n split: test\n revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60\n metrics:\n - type: accuracy\n value: 82.60663507109005\n - type: f1\n value: 77.20462646604777\n - task:\n type: Clustering\n dataset:\n name: MTEB MasakhaNEWSClusteringP2P (fra)\n type: masakhane/masakhanews\n config: fra\n split: test\n revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60\n metrics:\n - type: v_measure\n value: 60.19311264967803\n - type: v_measure\n value: 63.6235764409785\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (fr)\n type: mteb/amazon_massive_intent\n config: fr\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 81.65097511768661\n - type: f1\n value: 78.77796091490924\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (fr)\n type: mteb/amazon_massive_scenario\n config: fr\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 86.64425016812373\n - type: f1\n value: 85.4912728670017\n - task:\n type: Retrieval\n dataset:\n name: MTEB MintakaRetrieval (fr)\n type: jinaai/mintakaqa\n config: fr\n split: test\n revision: efa78cc2f74bbcd21eff2261f9e13aebe40b814e\n metrics:\n - type: map_at_1\n value: 35.913000000000004\n - type: map_at_10\n value: 48.147\n - type: map_at_100\n value: 48.91\n - type: map_at_1000\n value: 48.949\n - type: map_at_3\n value: 45.269999999999996\n - type: map_at_5\n value: 47.115\n - type: mrr_at_1\n value: 35.913000000000004\n - type: mrr_at_10\n value: 48.147\n - type: mrr_at_100\n value: 48.91\n - type: mrr_at_1000\n value: 48.949\n - type: mrr_at_3\n value: 45.269999999999996\n - type: mrr_at_5\n value: 47.115\n - type: ndcg_at_1\n value: 35.913000000000004\n - type: ndcg_at_10\n value: 54.03\n - type: ndcg_at_100\n value: 57.839\n - type: ndcg_at_1000\n value: 58.925000000000004\n - type: ndcg_at_3\n value: 48.217999999999996\n - type: ndcg_at_5\n value: 51.56699999999999\n - type: precision_at_1\n value: 35.913000000000004\n - type: precision_at_10\n value: 7.244000000000001\n - type: precision_at_100\n value: 0.9039999999999999\n - type: precision_at_1000\n value: 0.099\n - type: precision_at_3\n value: 18.905\n - type: precision_at_5\n value: 12.981000000000002\n - type: recall_at_1\n value: 35.913000000000004\n - type: recall_at_10\n value: 72.441\n - type: recall_at_100\n value: 90.41799999999999\n - type: recall_at_1000\n value: 99.099\n - type: recall_at_3\n value: 56.716\n - type: recall_at_5\n value: 64.90599999999999\n - task:\n type: PairClassification\n dataset:\n name: MTEB OpusparcusPC (fr)\n type: GEM/opusparcus\n config: fr\n split: test\n revision: 9e9b1f8ef51616073f47f306f7f47dd91663f86a\n metrics:\n - type: cos_sim_accuracy\n value: 99.90069513406156\n - type: cos_sim_ap\n value: 100.0\n - type: cos_sim_f1\n value: 99.95032290114257\n - type: cos_sim_precision\n value: 100.0\n - type: cos_sim_recall\n value: 99.90069513406156\n - type: dot_accuracy\n value: 99.90069513406156\n - type: dot_ap\n value: 100.0\n - type: dot_f1\n value: 99.95032290114257\n - type: dot_precision\n value: 100.0\n - type: dot_recall\n value: 99.90069513406156\n - type: euclidean_accuracy\n value: 99.90069513406156\n - type: euclidean_ap\n value: 100.0\n - type: euclidean_f1\n value: 99.95032290114257\n - type: euclidean_precision\n value: 100.0\n - type: euclidean_recall\n value: 99.90069513406156\n - type: manhattan_accuracy\n value: 99.90069513406156\n - type: manhattan_ap\n value: 100.0\n - type: manhattan_f1\n value: 99.95032290114257\n - type: manhattan_precision\n value: 100.0\n - type: manhattan_recall\n value: 99.90069513406156\n - type: max_accuracy\n value: 99.90069513406156\n - type: max_ap\n value: 100.0\n - type: max_f1\n value: 99.95032290114257\n - task:\n type: PairClassification\n dataset:\n name: MTEB PawsX (fr)\n type: paws-x\n config: fr\n split: test\n revision: 8a04d940a42cd40658986fdd8e3da561533a3646\n metrics:\n - type: cos_sim_accuracy\n value: 75.25\n - type: cos_sim_ap\n value: 80.86376001270014\n - type: cos_sim_f1\n value: 73.65945437441204\n - type: cos_sim_precision\n value: 64.02289452166802\n - type: cos_sim_recall\n value: 86.71096345514951\n - type: dot_accuracy\n value: 75.25\n - type: dot_ap\n value: 80.93686107633002\n - type: dot_f1\n value: 73.65945437441204\n - type: dot_precision\n value: 64.02289452166802\n - type: dot_recall\n value: 86.71096345514951\n - type: euclidean_accuracy\n value: 75.25\n - type: euclidean_ap\n value: 80.86379136218862\n - type: euclidean_f1\n value: 73.65945437441204\n - type: euclidean_precision\n value: 64.02289452166802\n - type: euclidean_recall\n value: 86.71096345514951\n - type: manhattan_accuracy\n value: 75.3\n - type: manhattan_ap\n value: 80.87826606097734\n - type: manhattan_f1\n value: 73.68421052631581\n - type: manhattan_precision\n value: 64.0\n - type: manhattan_recall\n value: 86.82170542635659\n - type: max_accuracy\n value: 75.3\n - type: max_ap\n value: 80.93686107633002\n - type: max_f1\n value: 73.68421052631581\n - task:\n type: STS\n dataset:\n name: MTEB SICKFr\n type: Lajavaness/SICK-fr\n config: default\n split: test\n revision: e077ab4cf4774a1e36d86d593b150422fafd8e8a\n metrics:\n - type: cos_sim_pearson\n value: 81.42349425981143\n - type: cos_sim_spearman\n value: 78.90454327031226\n - type: euclidean_pearson\n value: 78.39086497435166\n - type: euclidean_spearman\n value: 78.9046133980509\n - type: manhattan_pearson\n value: 78.63743094286502\n - type: manhattan_spearman\n value: 79.12136348449269\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (fr)\n type: mteb/sts22-crosslingual-sts\n config: fr\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 81.452697919749\n - type: cos_sim_spearman\n value: 82.58116836039301\n - type: euclidean_pearson\n value: 81.04038478932786\n - type: euclidean_spearman\n value: 82.58116836039301\n - type: manhattan_pearson\n value: 81.37075396187771\n - type: manhattan_spearman\n value: 82.73678231355368\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmarkMultilingualSTS (fr)\n type: stsb_multi_mt\n config: fr\n split: test\n revision: 93d57ef91790589e3ce9c365164337a8a78b7632\n metrics:\n - type: cos_sim_pearson\n value: 85.7419764013806\n - type: cos_sim_spearman\n value: 85.46085808849622\n - type: euclidean_pearson\n value: 83.70449639870063\n - type: euclidean_spearman\n value: 85.46159013076233\n - type: manhattan_pearson\n value: 83.95259510313929\n - type: manhattan_spearman\n value: 85.8029724659458\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEvalFr\n type: lyon-nlp/summarization-summeval-fr-p2p\n config: default\n split: test\n revision: b385812de6a9577b6f4d0f88c6a6e35395a94054\n metrics:\n - type: cos_sim_pearson\n value: 32.61063271753325\n - type: cos_sim_spearman\n value: 31.454589417353603\n - type: dot_pearson\n value: 32.6106288643431\n - type: dot_spearman\n value: 31.454589417353603\n - task:\n type: Reranking\n dataset:\n name: MTEB SyntecReranking\n type: lyon-nlp/mteb-fr-reranking-syntec-s2p\n config: default\n split: test\n revision: b205c5084a0934ce8af14338bf03feb19499c84d\n metrics:\n - type: map\n value: 84.31666666666666\n - type: mrr\n value: 84.31666666666666\n - task:\n type: Retrieval\n dataset:\n name: MTEB SyntecRetrieval\n type: lyon-nlp/mteb-fr-retrieval-syntec-s2p\n config: default\n split: test\n revision: 77f7e271bf4a92b24fce5119f3486b583ca016ff\n metrics:\n - type: map_at_1\n value: 63.0\n - type: map_at_10\n value: 73.471\n - type: map_at_100\n value: 73.87\n - type: map_at_1000\n value: 73.87\n - type: map_at_3\n value: 70.5\n - type: map_at_5\n value: 73.05\n - type: mrr_at_1\n value: 63.0\n - type: mrr_at_10\n value: 73.471\n - type: mrr_at_100\n value: 73.87\n - type: mrr_at_1000\n value: 73.87\n - type: mrr_at_3\n value: 70.5\n - type: mrr_at_5\n value: 73.05\n - type: ndcg_at_1\n value: 63.0\n - type: ndcg_at_10\n value: 78.255\n - type: ndcg_at_100\n value: 79.88\n - type: ndcg_at_1000\n value: 79.88\n - type: ndcg_at_3\n value: 72.702\n - type: ndcg_at_5\n value: 77.264\n - type: precision_at_1\n value: 63.0\n - type: precision_at_10\n value: 9.3\n - type: precision_at_100\n value: 1.0\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 26.333000000000002\n - type: precision_at_5\n value: 18.0\n - type: recall_at_1\n value: 63.0\n - type: recall_at_10\n value: 93.0\n - type: recall_at_100\n value: 100.0\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 79.0\n - type: recall_at_5\n value: 90.0\n - task:\n type: Retrieval\n dataset:\n name: MTEB XPQARetrieval (fr)\n type: jinaai/xpqa\n config: fr\n split: test\n revision: c99d599f0a6ab9b85b065da6f9d94f9cf731679f\n metrics:\n - type: map_at_1\n value: 40.338\n - type: map_at_10\n value: 61.927\n - type: map_at_100\n value: 63.361999999999995\n - type: map_at_1000\n value: 63.405\n - type: map_at_3\n value: 55.479\n - type: map_at_5\n value: 59.732\n - type: mrr_at_1\n value: 63.551\n - type: mrr_at_10\n value: 71.006\n - type: mrr_at_100\n value: 71.501\n - type: mrr_at_1000\n value: 71.509\n - type: mrr_at_3\n value: 69.07\n - type: mrr_at_5\n value: 70.165\n - type: ndcg_at_1\n value: 63.551\n - type: ndcg_at_10\n value: 68.297\n - type: ndcg_at_100\n value: 73.13199999999999\n - type: ndcg_at_1000\n value: 73.751\n - type: ndcg_at_3\n value: 62.999\n - type: ndcg_at_5\n value: 64.89\n - type: precision_at_1\n value: 63.551\n - type: precision_at_10\n value: 15.661\n - type: precision_at_100\n value: 1.9789999999999999\n - type: precision_at_1000\n value: 0.207\n - type: precision_at_3\n value: 38.273\n - type: precision_at_5\n value: 27.61\n - type: recall_at_1\n value: 40.338\n - type: recall_at_10\n value: 77.267\n - type: recall_at_100\n value: 95.892\n - type: recall_at_1000\n value: 99.75500000000001\n - type: recall_at_3\n value: 60.36\n - type: recall_at_5\n value: 68.825\n - task:\n type: Clustering\n dataset:\n name: MTEB 8TagsClustering\n type: PL-MTEB/8tags-clustering\n config: default\n split: test\n revision: None\n metrics:\n - type: v_measure\n value: 51.36126303874126\n - task:\n type: Classification\n dataset:\n name: MTEB AllegroReviews\n type: PL-MTEB/allegro-reviews\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 67.13717693836979\n - type: f1\n value: 57.27609848003782\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna-PL\n type: clarin-knext/arguana-pl\n config: default\n split: test\n revision: 63fc86750af76253e8c760fc9e534bbf24d260a2\n metrics:\n - type: map_at_1\n value: 35.276999999999994\n - type: map_at_10\n value: 51.086\n - type: map_at_100\n value: 51.788000000000004\n - type: map_at_1000\n value: 51.791\n - type: map_at_3\n value: 46.147\n - type: map_at_5\n value: 49.078\n - type: mrr_at_1\n value: 35.917\n - type: mrr_at_10\n value: 51.315999999999995\n - type: mrr_at_100\n value: 52.018\n - type: mrr_at_1000\n value: 52.022\n - type: mrr_at_3\n value: 46.349000000000004\n - type: mrr_at_5\n value: 49.297000000000004\n - type: ndcg_at_1\n value: 35.276999999999994\n - type: ndcg_at_10\n value: 59.870999999999995\n - type: ndcg_at_100\n value: 62.590999999999994\n - type: ndcg_at_1000\n value: 62.661\n - type: ndcg_at_3\n value: 49.745\n - type: ndcg_at_5\n value: 55.067\n - type: precision_at_1\n value: 35.276999999999994\n - type: precision_at_10\n value: 8.791\n - type: precision_at_100\n value: 0.991\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 20.057\n - type: precision_at_5\n value: 14.637\n - type: recall_at_1\n value: 35.276999999999994\n - type: recall_at_10\n value: 87.909\n - type: recall_at_100\n value: 99.14699999999999\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 60.171\n - type: recall_at_5\n value: 73.18599999999999\n - task:\n type: Classification\n dataset:\n name: MTEB CBD\n type: PL-MTEB/cbd\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 78.03000000000002\n - type: ap\n value: 29.12548553897622\n - type: f1\n value: 66.54857118886073\n - task:\n type: PairClassification\n dataset:\n name: MTEB CDSC-E\n type: PL-MTEB/cdsce-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 89.0\n - type: cos_sim_ap\n value: 76.75437826834582\n - type: cos_sim_f1\n value: 66.4850136239782\n - type: cos_sim_precision\n value: 68.92655367231639\n - type: cos_sim_recall\n value: 64.21052631578948\n - type: dot_accuracy\n value: 89.0\n - type: dot_ap\n value: 76.75437826834582\n - type: dot_f1\n value: 66.4850136239782\n - type: dot_precision\n value: 68.92655367231639\n - type: dot_recall\n value: 64.21052631578948\n - type: euclidean_accuracy\n value: 89.0\n - type: euclidean_ap\n value: 76.75437826834582\n - type: euclidean_f1\n value: 66.4850136239782\n - type: euclidean_precision\n value: 68.92655367231639\n - type: euclidean_recall\n value: 64.21052631578948\n - type: manhattan_accuracy\n value: 89.0\n - type: manhattan_ap\n value: 76.66074220647083\n - type: manhattan_f1\n value: 66.47058823529412\n - type: manhattan_precision\n value: 75.33333333333333\n - type: manhattan_recall\n value: 59.473684210526315\n - type: max_accuracy\n value: 89.0\n - type: max_ap\n value: 76.75437826834582\n - type: max_f1\n value: 66.4850136239782\n - task:\n type: STS\n dataset:\n name: MTEB CDSC-R\n type: PL-MTEB/cdscr-sts\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_pearson\n value: 93.12903172428328\n - type: cos_sim_spearman\n value: 92.66381487060741\n - type: euclidean_pearson\n value: 90.37278396708922\n - type: euclidean_spearman\n value: 92.66381487060741\n - type: manhattan_pearson\n value: 90.32503296540962\n - type: manhattan_spearman\n value: 92.6902938354313\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia-PL\n type: clarin-knext/dbpedia-pl\n config: default\n split: test\n revision: 76afe41d9af165cc40999fcaa92312b8b012064a\n metrics:\n - type: map_at_1\n value: 8.83\n - type: map_at_10\n value: 18.326\n - type: map_at_100\n value: 26.496\n - type: map_at_1000\n value: 28.455000000000002\n - type: map_at_3\n value: 12.933\n - type: map_at_5\n value: 15.168000000000001\n - type: mrr_at_1\n value: 66.0\n - type: mrr_at_10\n value: 72.76700000000001\n - type: mrr_at_100\n value: 73.203\n - type: mrr_at_1000\n value: 73.219\n - type: mrr_at_3\n value: 71.458\n - type: mrr_at_5\n value: 72.246\n - type: ndcg_at_1\n value: 55.375\n - type: ndcg_at_10\n value: 41.3\n - type: ndcg_at_100\n value: 45.891\n - type: ndcg_at_1000\n value: 52.905\n - type: ndcg_at_3\n value: 46.472\n - type: ndcg_at_5\n value: 43.734\n - type: precision_at_1\n value: 66.0\n - type: precision_at_10\n value: 33.074999999999996\n - type: precision_at_100\n value: 11.094999999999999\n - type: precision_at_1000\n value: 2.374\n - type: precision_at_3\n value: 48.583\n - type: precision_at_5\n value: 42.0\n - type: recall_at_1\n value: 8.83\n - type: recall_at_10\n value: 22.587\n - type: recall_at_100\n value: 50.61600000000001\n - type: recall_at_1000\n value: 73.559\n - type: recall_at_3\n value: 13.688\n - type: recall_at_5\n value: 16.855\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA-PL\n type: clarin-knext/fiqa-pl\n config: default\n split: test\n revision: 2e535829717f8bf9dc829b7f911cc5bbd4e6608e\n metrics:\n - type: map_at_1\n value: 20.587\n - type: map_at_10\n value: 33.095\n - type: map_at_100\n value: 35.24\n - type: map_at_1000\n value: 35.429\n - type: map_at_3\n value: 28.626\n - type: map_at_5\n value: 31.136999999999997\n - type: mrr_at_1\n value: 40.586\n - type: mrr_at_10\n value: 49.033\n - type: mrr_at_100\n value: 49.952999999999996\n - type: mrr_at_1000\n value: 49.992\n - type: mrr_at_3\n value: 46.553\n - type: mrr_at_5\n value: 48.035\n - type: ndcg_at_1\n value: 40.586\n - type: ndcg_at_10\n value: 41.046\n - type: ndcg_at_100\n value: 48.586\n - type: ndcg_at_1000\n value: 51.634\n - type: ndcg_at_3\n value: 36.773\n - type: ndcg_at_5\n value: 38.389\n - type: precision_at_1\n value: 40.586\n - type: precision_at_10\n value: 11.466\n - type: precision_at_100\n value: 1.909\n - type: precision_at_1000\n value: 0.245\n - type: precision_at_3\n value: 24.434\n - type: precision_at_5\n value: 18.426000000000002\n - type: recall_at_1\n value: 20.587\n - type: recall_at_10\n value: 47.986000000000004\n - type: recall_at_100\n value: 75.761\n - type: recall_at_1000\n value: 94.065\n - type: recall_at_3\n value: 33.339\n - type: recall_at_5\n value: 39.765\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA-PL\n type: clarin-knext/hotpotqa-pl\n config: default\n split: test\n revision: a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907\n metrics:\n - type: map_at_1\n value: 40.878\n - type: map_at_10\n value: 58.775999999999996\n - type: map_at_100\n value: 59.632\n - type: map_at_1000\n value: 59.707\n - type: map_at_3\n value: 56.074\n - type: map_at_5\n value: 57.629\n - type: mrr_at_1\n value: 81.756\n - type: mrr_at_10\n value: 86.117\n - type: mrr_at_100\n value: 86.299\n - type: mrr_at_1000\n value: 86.30600000000001\n - type: mrr_at_3\n value: 85.345\n - type: mrr_at_5\n value: 85.832\n - type: ndcg_at_1\n value: 81.756\n - type: ndcg_at_10\n value: 67.608\n - type: ndcg_at_100\n value: 70.575\n - type: ndcg_at_1000\n value: 71.99600000000001\n - type: ndcg_at_3\n value: 63.723\n - type: ndcg_at_5\n value: 65.70700000000001\n - type: precision_at_1\n value: 81.756\n - type: precision_at_10\n value: 13.619\n - type: precision_at_100\n value: 1.5939999999999999\n - type: precision_at_1000\n value: 0.178\n - type: precision_at_3\n value: 39.604\n - type: precision_at_5\n value: 25.332\n - type: recall_at_1\n value: 40.878\n - type: recall_at_10\n value: 68.096\n - type: recall_at_100\n value: 79.696\n - type: recall_at_1000\n value: 89.082\n - type: recall_at_3\n value: 59.406000000000006\n - type: recall_at_5\n value: 63.329\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO-PL\n type: clarin-knext/msmarco-pl\n config: default\n split: test\n revision: 8634c07806d5cce3a6138e260e59b81760a0a640\n metrics:\n - type: map_at_1\n value: 2.1839999999999997\n - type: map_at_10\n value: 11.346\n - type: map_at_100\n value: 30.325000000000003\n - type: map_at_1000\n value: 37.806\n - type: map_at_3\n value: 4.842\n - type: map_at_5\n value: 6.891\n - type: mrr_at_1\n value: 86.047\n - type: mrr_at_10\n value: 89.14699999999999\n - type: mrr_at_100\n value: 89.46600000000001\n - type: mrr_at_1000\n value: 89.46600000000001\n - type: mrr_at_3\n value: 89.14699999999999\n - type: mrr_at_5\n value: 89.14699999999999\n - type: ndcg_at_1\n value: 67.829\n - type: ndcg_at_10\n value: 62.222\n - type: ndcg_at_100\n value: 55.337\n - type: ndcg_at_1000\n value: 64.076\n - type: ndcg_at_3\n value: 68.12700000000001\n - type: ndcg_at_5\n value: 64.987\n - type: precision_at_1\n value: 86.047\n - type: precision_at_10\n value: 69.535\n - type: precision_at_100\n value: 32.93\n - type: precision_at_1000\n value: 6.6049999999999995\n - type: precision_at_3\n value: 79.845\n - type: precision_at_5\n value: 75.349\n - type: recall_at_1\n value: 2.1839999999999997\n - type: recall_at_10\n value: 12.866\n - type: recall_at_100\n value: 43.505\n - type: recall_at_1000\n value: 72.366\n - type: recall_at_3\n value: 4.947\n - type: recall_at_5\n value: 7.192\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (pl)\n type: mteb/amazon_massive_intent\n config: pl\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 80.75319435104238\n - type: f1\n value: 77.58961444860606\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (pl)\n type: mteb/amazon_massive_scenario\n config: pl\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 85.54472091459313\n - type: f1\n value: 84.29498563572106\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus-PL\n type: clarin-knext/nfcorpus-pl\n config: default\n split: test\n revision: 9a6f9567fda928260afed2de480d79c98bf0bec0\n metrics:\n - type: map_at_1\n value: 4.367\n - type: map_at_10\n value: 10.38\n - type: map_at_100\n value: 13.516\n - type: map_at_1000\n value: 14.982000000000001\n - type: map_at_3\n value: 7.367\n - type: map_at_5\n value: 8.59\n - type: mrr_at_1\n value: 41.486000000000004\n - type: mrr_at_10\n value: 48.886\n - type: mrr_at_100\n value: 49.657000000000004\n - type: mrr_at_1000\n value: 49.713\n - type: mrr_at_3\n value: 46.904\n - type: mrr_at_5\n value: 48.065000000000005\n - type: ndcg_at_1\n value: 40.402\n - type: ndcg_at_10\n value: 30.885\n - type: ndcg_at_100\n value: 28.393\n - type: ndcg_at_1000\n value: 37.428\n - type: ndcg_at_3\n value: 35.394999999999996\n - type: ndcg_at_5\n value: 33.391999999999996\n - type: precision_at_1\n value: 41.486000000000004\n - type: precision_at_10\n value: 23.437\n - type: precision_at_100\n value: 7.638\n - type: precision_at_1000\n value: 2.0389999999999997\n - type: precision_at_3\n value: 32.817\n - type: precision_at_5\n value: 28.915999999999997\n - type: recall_at_1\n value: 4.367\n - type: recall_at_10\n value: 14.655000000000001\n - type: recall_at_100\n value: 29.665999999999997\n - type: recall_at_1000\n value: 62.073\n - type: recall_at_3\n value: 8.51\n - type: recall_at_5\n value: 10.689\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ-PL\n type: clarin-knext/nq-pl\n config: default\n split: test\n revision: f171245712cf85dd4700b06bef18001578d0ca8d\n metrics:\n - type: map_at_1\n value: 28.616000000000003\n - type: map_at_10\n value: 41.626000000000005\n - type: map_at_100\n value: 42.689\n - type: map_at_1000\n value: 42.733\n - type: map_at_3\n value: 37.729\n - type: map_at_5\n value: 39.879999999999995\n - type: mrr_at_1\n value: 32.068000000000005\n - type: mrr_at_10\n value: 44.029\n - type: mrr_at_100\n value: 44.87\n - type: mrr_at_1000\n value: 44.901\n - type: mrr_at_3\n value: 40.687\n - type: mrr_at_5\n value: 42.625\n - type: ndcg_at_1\n value: 32.068000000000005\n - type: ndcg_at_10\n value: 48.449999999999996\n - type: ndcg_at_100\n value: 53.13\n - type: ndcg_at_1000\n value: 54.186\n - type: ndcg_at_3\n value: 40.983999999999995\n - type: ndcg_at_5\n value: 44.628\n - type: precision_at_1\n value: 32.068000000000005\n - type: precision_at_10\n value: 7.9750000000000005\n - type: precision_at_100\n value: 1.061\n - type: precision_at_1000\n value: 0.116\n - type: precision_at_3\n value: 18.404999999999998\n - type: precision_at_5\n value: 13.111\n - type: recall_at_1\n value: 28.616000000000003\n - type: recall_at_10\n value: 66.956\n - type: recall_at_100\n value: 87.657\n - type: recall_at_1000\n value: 95.548\n - type: recall_at_3\n value: 47.453\n - type: recall_at_5\n value: 55.87800000000001\n - task:\n type: Classification\n dataset:\n name: MTEB PAC\n type: laugustyniak/abusive-clauses-pl\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 69.04141326382856\n - type: ap\n value: 77.47589122111044\n - type: f1\n value: 66.6332277374775\n - task:\n type: PairClassification\n dataset:\n name: MTEB PPC\n type: PL-MTEB/ppc-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 86.4\n - type: cos_sim_ap\n value: 94.1044939667201\n - type: cos_sim_f1\n value: 88.78048780487805\n - type: cos_sim_precision\n value: 87.22044728434504\n - type: cos_sim_recall\n value: 90.39735099337747\n - type: dot_accuracy\n value: 86.4\n - type: dot_ap\n value: 94.1044939667201\n - type: dot_f1\n value: 88.78048780487805\n - type: dot_precision\n value: 87.22044728434504\n - type: dot_recall\n value: 90.39735099337747\n - type: euclidean_accuracy\n value: 86.4\n - type: euclidean_ap\n value: 94.1044939667201\n - type: euclidean_f1\n value: 88.78048780487805\n - type: euclidean_precision\n value: 87.22044728434504\n - type: euclidean_recall\n value: 90.39735099337747\n - type: manhattan_accuracy\n value: 86.4\n - type: manhattan_ap\n value: 94.11438365697387\n - type: manhattan_f1\n value: 88.77968877968877\n - type: manhattan_precision\n value: 87.84440842787681\n - type: manhattan_recall\n value: 89.73509933774835\n - type: max_accuracy\n value: 86.4\n - type: max_ap\n value: 94.11438365697387\n - type: max_f1\n value: 88.78048780487805\n - task:\n type: PairClassification\n dataset:\n name: MTEB PSC\n type: PL-MTEB/psc-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 97.86641929499072\n - type: cos_sim_ap\n value: 99.36904211868182\n - type: cos_sim_f1\n value: 96.56203288490283\n - type: cos_sim_precision\n value: 94.72140762463343\n - type: cos_sim_recall\n value: 98.47560975609755\n - type: dot_accuracy\n value: 97.86641929499072\n - type: dot_ap\n value: 99.36904211868183\n - type: dot_f1\n value: 96.56203288490283\n - type: dot_precision\n value: 94.72140762463343\n - type: dot_recall\n value: 98.47560975609755\n - type: euclidean_accuracy\n value: 97.86641929499072\n - type: euclidean_ap\n value: 99.36904211868183\n - type: euclidean_f1\n value: 96.56203288490283\n - type: euclidean_precision\n value: 94.72140762463343\n - type: euclidean_recall\n value: 98.47560975609755\n - type: manhattan_accuracy\n value: 98.14471243042672\n - type: manhattan_ap\n value: 99.43359540492416\n - type: manhattan_f1\n value: 96.98795180722892\n - type: manhattan_precision\n value: 95.83333333333334\n - type: manhattan_recall\n value: 98.17073170731707\n - type: max_accuracy\n value: 98.14471243042672\n - type: max_ap\n value: 99.43359540492416\n - type: max_f1\n value: 96.98795180722892\n - task:\n type: Classification\n dataset:\n name: MTEB PolEmo2.0-IN\n type: PL-MTEB/polemo2_in\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 89.39058171745152\n - type: f1\n value: 86.8552093529568\n - task:\n type: Classification\n dataset:\n name: MTEB PolEmo2.0-OUT\n type: PL-MTEB/polemo2_out\n config: default\n split: test\n revision: None\n metrics:\n - type: accuracy\n value: 74.97975708502024\n - type: f1\n value: 58.73081628832407\n - task:\n type: Retrieval\n dataset:\n name: MTEB Quora-PL\n type: clarin-knext/quora-pl\n config: default\n split: test\n revision: 0be27e93455051e531182b85e85e425aba12e9d4\n metrics:\n - type: map_at_1\n value: 64.917\n - type: map_at_10\n value: 78.74600000000001\n - type: map_at_100\n value: 79.501\n - type: map_at_1000\n value: 79.524\n - type: map_at_3\n value: 75.549\n - type: map_at_5\n value: 77.495\n - type: mrr_at_1\n value: 74.9\n - type: mrr_at_10\n value: 82.112\n - type: mrr_at_100\n value: 82.314\n - type: mrr_at_1000\n value: 82.317\n - type: mrr_at_3\n value: 80.745\n - type: mrr_at_5\n value: 81.607\n - type: ndcg_at_1\n value: 74.83999999999999\n - type: ndcg_at_10\n value: 83.214\n - type: ndcg_at_100\n value: 84.997\n - type: ndcg_at_1000\n value: 85.207\n - type: ndcg_at_3\n value: 79.547\n - type: ndcg_at_5\n value: 81.46600000000001\n - type: precision_at_1\n value: 74.83999999999999\n - type: precision_at_10\n value: 12.822\n - type: precision_at_100\n value: 1.506\n - type: precision_at_1000\n value: 0.156\n - type: precision_at_3\n value: 34.903\n - type: precision_at_5\n value: 23.16\n - type: recall_at_1\n value: 64.917\n - type: recall_at_10\n value: 92.27199999999999\n - type: recall_at_100\n value: 98.715\n - type: recall_at_1000\n value: 99.854\n - type: recall_at_3\n value: 82.04599999999999\n - type: recall_at_5\n value: 87.2\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS-PL\n type: clarin-knext/scidocs-pl\n config: default\n split: test\n revision: 45452b03f05560207ef19149545f168e596c9337\n metrics:\n - type: map_at_1\n value: 3.51\n - type: map_at_10\n value: 9.046999999999999\n - type: map_at_100\n value: 10.823\n - type: map_at_1000\n value: 11.144\n - type: map_at_3\n value: 6.257\n - type: map_at_5\n value: 7.648000000000001\n - type: mrr_at_1\n value: 17.299999999999997\n - type: mrr_at_10\n value: 27.419\n - type: mrr_at_100\n value: 28.618\n - type: mrr_at_1000\n value: 28.685\n - type: mrr_at_3\n value: 23.817\n - type: mrr_at_5\n value: 25.927\n - type: ndcg_at_1\n value: 17.299999999999997\n - type: ndcg_at_10\n value: 16.084\n - type: ndcg_at_100\n value: 23.729\n - type: ndcg_at_1000\n value: 29.476999999999997\n - type: ndcg_at_3\n value: 14.327000000000002\n - type: ndcg_at_5\n value: 13.017999999999999\n - type: precision_at_1\n value: 17.299999999999997\n - type: precision_at_10\n value: 8.63\n - type: precision_at_100\n value: 1.981\n - type: precision_at_1000\n value: 0.336\n - type: precision_at_3\n value: 13.4\n - type: precision_at_5\n value: 11.700000000000001\n - type: recall_at_1\n value: 3.51\n - type: recall_at_10\n value: 17.518\n - type: recall_at_100\n value: 40.275\n - type: recall_at_1000\n value: 68.203\n - type: recall_at_3\n value: 8.155\n - type: recall_at_5\n value: 11.875\n - task:\n type: PairClassification\n dataset:\n name: MTEB SICK-E-PL\n type: PL-MTEB/sicke-pl-pairclassification\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_accuracy\n value: 86.30248675091724\n - type: cos_sim_ap\n value: 83.6756734006714\n - type: cos_sim_f1\n value: 74.97367497367497\n - type: cos_sim_precision\n value: 73.91003460207612\n - type: cos_sim_recall\n value: 76.06837606837607\n - type: dot_accuracy\n value: 86.30248675091724\n - type: dot_ap\n value: 83.6756734006714\n - type: dot_f1\n value: 74.97367497367497\n - type: dot_precision\n value: 73.91003460207612\n - type: dot_recall\n value: 76.06837606837607\n - type: euclidean_accuracy\n value: 86.30248675091724\n - type: euclidean_ap\n value: 83.67566984333091\n - type: euclidean_f1\n value: 74.97367497367497\n - type: euclidean_precision\n value: 73.91003460207612\n - type: euclidean_recall\n value: 76.06837606837607\n - type: manhattan_accuracy\n value: 86.28210354667753\n - type: manhattan_ap\n value: 83.64216119130171\n - type: manhattan_f1\n value: 74.92152075340078\n - type: manhattan_precision\n value: 73.4107997265892\n - type: manhattan_recall\n value: 76.49572649572649\n - type: max_accuracy\n value: 86.30248675091724\n - type: max_ap\n value: 83.6756734006714\n - type: max_f1\n value: 74.97367497367497\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R-PL\n type: PL-MTEB/sickr-pl-sts\n config: default\n split: test\n revision: None\n metrics:\n - type: cos_sim_pearson\n value: 82.23295940859121\n - type: cos_sim_spearman\n value: 78.89329160768719\n - type: euclidean_pearson\n value: 79.56019107076818\n - type: euclidean_spearman\n value: 78.89330209904084\n - type: manhattan_pearson\n value: 79.76098513973719\n - type: manhattan_spearman\n value: 79.05490162570123\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (pl)\n type: mteb/sts22-crosslingual-sts\n config: pl\n split: test\n revision: eea2b4fe26a775864c896887d910b76a8098ad3f\n metrics:\n - type: cos_sim_pearson\n value: 37.732606308062486\n - type: cos_sim_spearman\n value: 41.01645667030284\n - type: euclidean_pearson\n value: 26.61722556367085\n - type: euclidean_spearman\n value: 41.01645667030284\n - type: manhattan_pearson\n value: 26.60917378970807\n - type: manhattan_spearman\n value: 41.51335727617614\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact-PL\n type: clarin-knext/scifact-pl\n config: default\n split: test\n revision: 47932a35f045ef8ed01ba82bf9ff67f6e109207e\n metrics:\n - type: map_at_1\n value: 54.31700000000001\n - type: map_at_10\n value: 65.564\n - type: map_at_100\n value: 66.062\n - type: map_at_1000\n value: 66.08699999999999\n - type: map_at_3\n value: 62.592999999999996\n - type: map_at_5\n value: 63.888\n - type: mrr_at_1\n value: 56.99999999999999\n - type: mrr_at_10\n value: 66.412\n - type: mrr_at_100\n value: 66.85900000000001\n - type: mrr_at_1000\n value: 66.88\n - type: mrr_at_3\n value: 64.22200000000001\n - type: mrr_at_5\n value: 65.206\n - type: ndcg_at_1\n value: 56.99999999999999\n - type: ndcg_at_10\n value: 70.577\n - type: ndcg_at_100\n value: 72.879\n - type: ndcg_at_1000\n value: 73.45\n - type: ndcg_at_3\n value: 65.5\n - type: ndcg_at_5\n value: 67.278\n - type: precision_at_1\n value: 56.99999999999999\n - type: precision_at_10\n value: 9.667\n - type: precision_at_100\n value: 1.083\n - type: precision_at_1000\n value: 0.11299999999999999\n - type: precision_at_3\n value: 26.0\n - type: precision_at_5\n value: 16.933\n - type: recall_at_1\n value: 54.31700000000001\n - type: recall_at_10\n value: 85.056\n - type: recall_at_100\n value: 95.667\n - type: recall_at_1000\n value: 100.0\n - type: recall_at_3\n value: 71.0\n - type: recall_at_5\n value: 75.672\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID-PL\n type: clarin-knext/trec-covid-pl\n config: default\n split: test\n revision: 81bcb408f33366c2a20ac54adafad1ae7e877fdd\n metrics:\n - type: map_at_1\n value: 0.245\n - type: map_at_10\n value: 2.051\n - type: map_at_100\n value: 12.009\n - type: map_at_1000\n value: 27.448\n - type: map_at_3\n value: 0.721\n - type: map_at_5\n value: 1.13\n - type: mrr_at_1\n value: 88.0\n - type: mrr_at_10\n value: 93.0\n - type: mrr_at_100\n value: 93.0\n - type: mrr_at_1000\n value: 93.0\n - type: mrr_at_3\n value: 93.0\n - type: mrr_at_5\n value: 93.0\n - type: ndcg_at_1\n value: 85.0\n - type: ndcg_at_10\n value: 80.303\n - type: ndcg_at_100\n value: 61.23499999999999\n - type: ndcg_at_1000\n value: 52.978\n - type: ndcg_at_3\n value: 84.419\n - type: ndcg_at_5\n value: 82.976\n - type: precision_at_1\n value: 88.0\n - type: precision_at_10\n value: 83.39999999999999\n - type: precision_at_100\n value: 61.96\n - type: precision_at_1000\n value: 22.648\n - type: precision_at_3\n value: 89.333\n - type: precision_at_5\n value: 87.2\n - type: recall_at_1\n value: 0.245\n - type: recall_at_10\n value: 2.193\n - type: recall_at_100\n value: 14.938\n - type: recall_at_1000\n value: 48.563\n - type: recall_at_3\n value: 0.738\n - type: recall_at_5\n value: 1.173\n---\n\n# RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF\nThis model was converted to GGUF format from [`Alibaba-NLP/gte-Qwen2-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.\nRefer to the [original model card](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) for more details on the model.\n\n## Use with llama.cpp\nInstall llama.cpp through brew (works on Mac and Linux)\n\n```bash\nbrew install llama.cpp\n\n```\nInvoke the llama.cpp server or the CLI.\n\n### CLI:\n```bash\nllama-cli --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -p \"The meaning to life and the universe is\"\n```\n\n### Server:\n```bash\nllama-server --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -c 2048\n```\n\nNote: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.\n\nStep 1: Clone llama.cpp from GitHub.\n```\ngit clone https://github.com/ggerganov/llama.cpp\n```\n\nStep 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).\n```\ncd llama.cpp && LLAMA_CURL=1 make\n```\n\nStep 3: Run inference through the main binary.\n```\n./llama-cli --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -p \"The meaning to life and the universe is\"\n```\nor \n```\n./llama-server --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -c 2048\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2252,"cells":{"id":{"kind":"string","value":"RomainDarous/large_directFourEpoch_additivePooling_noisedInit_mistranslationModel"},"author":{"kind":"string","value":"RomainDarous"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","xlm-roberta","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:4460010","loss:CoSENTLoss","dataset:RomainDarous/corrupted_os_by_language","arxiv:1908.10084","base_model:RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel","base_model:finetune:RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"xlm-roberta\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:4460010\",\n \"loss:CoSENTLoss\",\n \"dataset:RomainDarous/corrupted_os_by_language\",\n \"arxiv:1908.10084\",\n \"base_model:RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel\",\n \"base_model:finetune:RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2025-02-28T19:24:36Z","string":"2025-02-28T19:24:36Z"},"last_modified":{"kind":"string","value":"2025-02-28T19:25:16+00:00"},"downloads":{"kind":"number","value":23,"string":"23"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel\ndatasets:\n- RomainDarous/corrupted_os_by_language\nlibrary_name: sentence-transformers\nmetrics:\n- pearson_cosine\n- spearman_cosine\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:4460010\n- loss:CoSENTLoss\nwidget:\n- source_sentence: Malformed target specific variable definition\n sentences:\n - Hedefe özgü değişken tanımı bozuk\n - Kan alle data in die gids lees\n - \"слава Украине! героям слава!\\uFEFF\"\n- source_sentence: Can't write an inode bitmap\n sentences:\n - Skontrolujte stav aktualizácií alebo to skúste znova neskôr.\n - Malsukcesis skribi i nodan bitmapon\n - Zastępuje wersję GL obsługiwaną przez sterownik\n- source_sentence: Optimize soft proofing color transformations\n sentences:\n - 'arkadaslar biz artik her an kirmizi kart yiyecek,bencil,pas yapamayan,isabetsiz\n orta yapani istemiyoruz. sozde efsaneniz bu sezon Besiktasa en cok zarar verenlerden\n biriydi. kendini dusunmeden once Besiktasi dusunecek adam lazim bize. o yuzden\n #GoHomeQuaresma'\n - Yav bizim dedikodusunu yaptığımız insanın bile bi vizyonu var. Senin hakkında\n neden oturup konuşalım?\n - Ik ben een transgender.\n- source_sentence: 'Pass 1: Checking @is, @bs, and sizes'\n sentences:\n - Bu adam cidden kurabiye gibi ben bunu çayın yanında yerim\n - sagnat. errada. invisible. justificació. idioma\n - Wilt u echt de primaire sleutel verplaatsen? (j N)\n- source_sentence: Search for matching log entries\n sentences:\n - quem te lembra? caralho tô assustada aqui kkkkk\n - sendotasunik gabeko\\ egoera bistaratuko den ala ez adierazten du\n - En aquest cas, hem d'incloure les imatges del contenidor )sr iov per a càrregues\n de treball de telco (per exemple, com a referència, es podrien obtenir des de\n valors de helm chart)\nmodel-index:\n- name: SentenceTransformer based on RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel\n results:\n - task:\n type: semantic-similarity\n name: Semantic Similarity\n dataset:\n name: sts eval\n type: sts-eval\n metrics:\n - type: pearson_cosine\n value: 0.980083415375982\n name: Pearson Cosine\n - type: spearman_cosine\n value: 0.8655169963020204\n name: Spearman Cosine\n - task:\n type: semantic-similarity\n name: Semantic Similarity\n dataset:\n name: sts test\n type: sts-test\n metrics:\n - type: pearson_cosine\n value: 0.9801740771365185\n name: Pearson Cosine\n - type: spearman_cosine\n value: 0.8655815024093642\n name: Spearman Cosine\n---\n\n# SentenceTransformer based on RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel](https://huggingface.co/RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel) on the [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel](https://huggingface.co/RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel) \n- **Maximum Sequence Length:** 128 tokens\n- **Output Dimensionality:** 768 dimensions\n- **Similarity Function:** Cosine Similarity\n- **Training Dataset:**\n - [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language)\n\n\n\n### Model Sources\n\n- **Documentation:** [Sentence Transformers Documentation](https://sbert.net)\n- **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers)\n- **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers)\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: XLMRobertaModel \n (1): MultiHeadGeneralizedPooling(\n (P): ModuleList(\n (0-7): 8 x Linear(in_features=768, out_features=96, bias=True)\n )\n (W1): ModuleList(\n (0-7): 8 x Linear(in_features=96, out_features=384, bias=True)\n )\n (W2): ModuleList(\n (0-7): 8 x Linear(in_features=384, out_features=96, bias=True)\n )\n )\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"RomainDarous/large_directFourEpoch_additivePooling_noisedInit_mistranslationModel\")\n# Run inference\nsentences = [\n 'Search for matching log entries',\n 'quem te lembra? caralho tô assustada aqui kkkkk',\n 'sendotasunik gabeko\\\\ egoera bistaratuko den ala ez adierazten du',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n## Evaluation\n\n### Metrics\n\n#### Semantic Similarity\n\n* Datasets: `sts-eval` and `sts-test`\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\n\n| Metric | sts-eval | sts-test |\n|:--------------------|:-----------|:-----------|\n| pearson_cosine | 0.9801 | 0.9802 |\n| **spearman_cosine** | **0.8655** | **0.8656** |\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### corrupted_open_os_by_language\n\n* Dataset: [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) at [9d25780](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language/tree/9d25780e2032b1e8f06af6a4ff55124d7a930c3c)\n* Size: 4,460,010 training samples\n* Columns: sentence1, sentence2, and score\n* Approximate statistics based on the first 1000 samples:\n | | sentence1 | sentence2 | score |\n |:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:------------------------------------------------|\n | type | string | string | int |\n | details |
  • min: 6 tokens
  • mean: 18.33 tokens
  • max: 128 tokens
|
  • min: 4 tokens
  • mean: 26.47 tokens
  • max: 128 tokens
|
  • 0: ~50.60%
  • 1: ~49.40%
|\n* Samples:\n | sentence1 | sentence2 | score |\n |:--------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------|:---------------|\n | Check spelling. Print the document. Show completion window. General. Show help | Kontrolli õigekirja. присоединяюсь. | 0 |\n | EXIF not supported for this file format. | Šiam failo formatui EXIF nepalaikomas. | 1 |\n | This package includes the documentation for texlive everyhook | Paket ini menyertakan dokumentasi untuk texlive everyhook | 1 |\n* Loss: [CoSENTLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"pairwise_cos_sim\"\n }\n ```\n\n### Evaluation Dataset\n\n#### corrupted_open_os_by_language\n\n* Dataset: [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) at [9d25780](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language/tree/9d25780e2032b1e8f06af6a4ff55124d7a930c3c)\n* Size: 4,460,010 evaluation samples\n* Columns: sentence1, sentence2, and score\n* Approximate statistics based on the first 1000 samples:\n | | sentence1 | sentence2 | score |\n |:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:------------------------------------------------|\n | type | string | string | int |\n | details |
  • min: 5 tokens
  • mean: 17.71 tokens
  • max: 128 tokens
|
  • min: 3 tokens
  • mean: 26.95 tokens
  • max: 128 tokens
|
  • 0: ~50.60%
  • 1: ~49.40%
|\n* Samples:\n | sentence1 | sentence2 | score |\n |:----------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------|\n | Could not identify the current seat. | 天天花着男人的钱还这这创造新词汇男权你可真牛批,你也就这一出了一问男权,就说是我是吧,到现在我也没听到你给我们讲的男权,你也就是在网上喷喷,现实走道都不敢探头自卑,你现实要把你女权的劲拿出来总低啥头,您老应该去国家教育局把男权加上是吧,你们女权天天说自己生活不好没地位,给你们地位了你们能干啥?用你们的女权打到全世界男性是吧,能相出男权这一词您老也是人才呀,是不是庆幸自己是个女的,活在自己想想的世界里不觉得孤单吗,假象有男权是吧,自己假象和男权还说自己不是田园女权,田园女权能连自己都骂说自己妈是驴爸是大鼎的也是奇葩呀,那我们国家大肆宣扬过你们这么田园女权吗,国家要的是女性人群自主自理,你们可好看看你们女权干的啥事,给你们女权地位高了,看看你们女权干的事n绿地集团高管怎么都不说呀,人家可是有钱有地位,也不是我们说三从四德洗衣做饭你们女权会吗?,那我问问你们女权干过啥惊天大事,还甩锅给孔子,还封建社会,那我问问你们女权在福利面前为啥说自己是女性呀不是社会主义社会吗不应该男女平等吗,天天自己也不知道是不是抱个手机天天欧巴欧巴,你家那位要是不陪你看一会就会问你是不是不爱我了是吧大姐,您老也就赚这白菜钱操心国家事,中国五千年的历史被您老一句否决,还嘲讽人家日本女性,好意思说自己不是女权,三从四德流传这么久到您这变成日本文化了,我就想问问男权您老是怎么想的,那你问孔子老人家呗为什么女人要三从四德,我说的是女权你干嘛自己对号入座,连中华人民传承的东西都不认跟我这谈男权,还男权您老给我举个例子呗,让我们男权听听都是h啥,这些不都是你们女权的标准吗?,还男权,您老醒醒吧这里是现实,不是你的公主世界,总觉得自己多么多么重要,地球没你是不能转了还是人类要灭亡呀,我真的想问一句你给我找一条男权的新闻,咋了我们男人不能提女权呗你老授权了呗,那我们谈论田园女权你老对号入座干嘛,天天过节要礼物,还嫌弃自己男朋友没有钱,我寻思你找个有钱人包养你呗,对了有钱人怎么可能看上你这种女权的呢,还要孩子跟女方姓我也没看见你没跟你妈姓呀,年年过节男人给你们送礼物你们女人给男人送过礼物吗?,一问我不是陪着他吗我对他说我爱你了这不是最好的礼物吗?,男人只要不送礼物就是不爱你们了呗,人家国际女权讲的男人能做的我们女人也能做,田园女权男人能做的我们女人为啥要做,还男权我笑了,以前结婚几头牛换个衣服原装的,现在几十万彩... | 0 |\n | Undoing Date and Time Adjustment | 正在取消日期和时间调整 | 1 |\n | Dependency package for gsl_2_6 gnu hpc | Pacotes de desenvolvimento do KDE | 1 |\n* Loss: [CoSENTLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters:\n ```json\n {\n \"scale\": 20.0,\n \"similarity_fct\": \"pairwise_cos_sim\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: steps\n- `per_device_train_batch_size`: 64\n- `per_device_eval_batch_size`: 64\n- `num_train_epochs`: 1\n- `warmup_ratio`: 0.1\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: steps\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 64\n- `per_device_eval_batch_size`: 64\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `torch_empty_cache_steps`: None\n- `learning_rate`: 5e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 1\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: False\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: False\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: None\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `include_for_metrics`: []\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `eval_on_start`: False\n- `use_liger_kernel`: False\n- `eval_use_gather_object`: False\n- `average_tokens_across_devices`: False\n- `prompts`: None\n- `batch_sampler`: batch_sampler\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | corrupted open os by language loss | sts-eval_spearman_cosine | sts-test_spearman_cosine |\n|:-----:|:-----:|:-------------:|:----------------------------------:|:------------------------:|:------------------------:|\n| 1.0 | 55751 | 0.0845 | 0.2994 | 0.8655 | - |\n| -1 | -1 | - | - | - | 0.8656 |\n\n\n### Framework Versions\n- Python: 3.10.13\n- Sentence Transformers: 3.4.1\n- Transformers: 4.48.2\n- PyTorch: 2.1.2+cu121\n- Accelerate: 1.3.0\n- Datasets: 2.16.1\n- Tokenizers: 0.21.0\n\n## Citation\n\n### BibTeX\n\n#### Sentence Transformers\n```bibtex\n@inproceedings{reimers-2019-sentence-bert,\n title = \"Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks\",\n author = \"Reimers, Nils and Gurevych, Iryna\",\n booktitle = \"Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing\",\n month = \"11\",\n year = \"2019\",\n publisher = \"Association for Computational Linguistics\",\n url = \"https://arxiv.org/abs/1908.10084\",\n}\n```\n\n#### CoSENTLoss\n```bibtex\n@online{kexuefm-8847,\n title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT},\n author={Su Jianlin},\n year={2022},\n month={Jan},\n url={https://kexue.fm/archives/8847},\n}\n```\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2253,"cells":{"id":{"kind":"string","value":"hhhhzy/roberta-pubhealth"},"author":{"kind":"string","value":"hhhhzy"},"task_category":{"kind":"string","value":"text-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","text-classification","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"text-classification\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-05-30T20:37:57Z","string":"2022-05-30T20:37:57Z"},"last_modified":{"kind":"string","value":"2022-05-30T23:01:52+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\n\n# Roberta-Pubhealth model\n\nThis model is a fine-tuned version of [RoBERTa Base](https://huggingface.co/roberta-base) on the health_fact dataset.\nIt achieves the following results on the evaluation set:\n- micro f1 (accuracy): 0.7137\n- macro f1: 0.6056\n- weighted f1: 0.7106\n- samples predicted per second: 9.31\n\n## Dataset desctiption\n[PUBHEALTH](https://huggingface.co/datasets/health_fact)is a comprehensive dataset for explainable automated fact-checking of public health claims. Each instance in the PUBHEALTH dataset has an associated veracity label (true, false, unproven, mixture). Furthermore each instance in the dataset has an explanation text field. The explanation is a justification for which the claim has been assigned a particular veracity label.\n\n## Training hyperparameters\n\nThe model are trained with the following tuned config:\n- model: roberta base\n- batch size: 32\n- learning rate: 5e-5\n- number of epochs: 4\n- warmup steps: 0"},"matched_bigbio_names":{"kind":"list like","value":["PUBHEALTH"],"string":"[\n \"PUBHEALTH\"\n]"}}},{"rowIdx":2254,"cells":{"id":{"kind":"string","value":"seonghyeonye/flipped_3B"},"author":{"kind":"string","value":"seonghyeonye"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","t5","text2text-generation","en","dataset:bigscience/P3","arxiv:2210.02969","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"t5\",\n \"text2text-generation\",\n \"en\",\n \"dataset:bigscience/P3\",\n \"arxiv:2210.02969\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-10-04T01:40:33Z","string":"2022-10-04T01:40:33Z"},"last_modified":{"kind":"string","value":"2022-10-19T08:38:17+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":3,"string":"3"},"README":{"kind":"string","value":"---\ndatasets:\n- bigscience/P3\nlanguage: en\nlicense: apache-2.0\nwidget:\n- text: \"input: The item was packaged in bubble wrap. \\n\\\n \\ - It was fragile.\\n - It was small.\\n output: It was fragile.\"\n---\n\n**Official repository**: [seonghyeonye/Flipped-Learning](https://github.com/seonghyeonye/Flipped-Learning)\n# Model Description\nFLIPPED uses a unique meta-learning method to show zero-shot task generalization on classification natural language prompts, outperforming GPT-3 and T0-11B on many tasks with a 4x smaller scale.\nIt is a series of encoder-decoder model trained on a numerous classification dataset. We show inputs and its corresponding outputs of each instances in each dataset to FLIPPED, and train it to generate its possible instruction. We add unlikelihood loss in order **not** to generate the instruction when given the same input, but a wrong output. To obtain FLIPPED, we fine-tune a T5 model in a given scale on a multitask mixture covering many different classification NLP tasks.\n# Intended uses\nYou can use the models to perform inference on tasks by specifying your input-output NLP query in a \"input: {input}\\noutput: {output}\" form , and the model will predict the instruction. For example, You can try \n*\"input: this is the best cast iron skillet you will ever buy\\noutput: Positive\"*\nas an input, and the model will hopefully generate *\"Title: Review:\"*.\n\n# How to use\nOur overall explanation models along with ablations can be found in our [paper](https://arxiv.org/abs/2210.02969). We recommend using the [FLIPPED-11B](seonghyeonye/flipped_11B) checkpoint as it leads (on average) to the best performances on a variety of NLP tasks.\n|Model|Number of parameters|\n|-|-|\n|[Flipped_11B](https://huggingface.co/seonghyeonye/flipped_11B)|11 billion|\n|[Flipped_3B](https://huggingface.co/seonghyeonye/flipped_3B)|3 billion|\nHere is how to download the model in PyTorch:\n\n```python\nimport torch\nfrom transformers import T5Tokenizer, T5ForConditionalGeneration\n\nmodel = T5ForConditionalGeneration.from_pretrained(\"seonghyeonye/flipped_3B\")\ntokenizer = T5Tokenizer.from_pretrained(\"seonghyeonye/flipped_3B\")\n```\nIf you want to use another checkpoint, please replace the path in `T5Tokenizer` and `T5ForConditionalGeneration`.\nWe also provide a quick [Jupyter Notebook](https://github.com/seonghyeonye/Flipped-Learning/blob/master/flipped_inference.ipynb) where you can inference with our method.\n**Note: the model was trained with fp32 activations. As such, we highly discourage running inference with fp16.**\n\n# Training procedure\nFLIPPED models are based on [T5](https://huggingface.co/google/t5-v1_1-xl), a Transformer-based encoder-decoder language model pre-trained with a masked language modeling-style objective on [C4](https://huggingface.co/datasets/c4).\nAt a high level, the input text along with output label is fed to the encoder and the instruction text is produced by the decoder. The model is fine-tuned to autoregressively generate the target. We also feed input text along with a wrong input, adding an unlikelihood loss in order not to make model produce the proper instruction in that case. Here are our training details.\nTraining details:\n- Fine-tuning steps: 5'000\n- Input sequence length: 512\n- Target sequence length: 128\n- Batch size: 240\n- Optimizer: Adafactor\n- Learning rate: 5e-5\n- Dropout: 0.1\n- Sampling strategy: proportional to the number of examples in each dataset (we randomly sampled any dataset if it has over 500'000 examples so that it has at most 500'000 examples. Also, we randomly choose which instruction to generate for each training steps, so ideally each instruction appears *num_examples/num_templates* while training.)\n\n# Training data\nWe trained different variants T0 with different mixtures of datasets.\n|Model|Training datasets|\n|--|--|\n|FLIPPED_11B|- Multiple-Choice QA: CommonsenseQA, DREAM, QUAIL, QuaRTz, Social IQA, WiQA, Cosmos, QASC, Quarel, SciQ
- Sentiment: Amazon, App Reviews, IMDB, Rotten Tomatoes, Yelp
- Topic Classification: AG News, DBPedia
- Paraphrase Identification: MRPC, PAWS, QQP|\n|FLIPPED_3B|Same as FLIPPED_11B|\nWe only choose prompts examples that has output lables, which can be found on the dataset page.\n\n# Evaluation data\n\nWe evaluate our models on following datasets:\n|Task category|Datasets|\n|-|-|\n|Natural language inference|ANLI(R1, R2, R3), CB, RTE|\n|Coreference resolution|WSC, Winogrande|\n|Word sense disambiguation|WiC|\n|Sentence completion|COPA, HellaSwag, Story Cloze|\n|QA|PIQA, ARC-Challenge, OpenbookQA|\nWe also evaluate FLIPPED on a subset of [BIG-bench benchmark](https://github.com/google/BIG-bench):\n- Code description task\n- Conceptual combinations\n- Hindu knowledge json\n- Known unknowns\n- Language identification\n- Logic grid puzzle task\n- Logical deduction\n- Common misconceptions\n- Movie dialog same or different\n- Novel concepts\n- Strategyqa\n- Formal fallacies syllogisms negation\n- VitaminC\n- Winowhy multiple choice\n\n# Label generalization\nWe evaluate the robustness of models on following datasets with changing the output label of the datasets. The substitute words can be found in our [paper](https://arxiv.org/abs/2210.02969).\n|Task category|(Datasets, Template name)| \n|-|-|\n|Unseen tasks|(WSC, does the pronoun refer to), (CB, can we infer), (RTE, MNLI crowdsource)|\n|Seen tasks|(IMDB, Reviewer Enjoyment Yes No), (PAWS, Meaning) |\n The template name we used can be found in the [promptsource template library](https://github.com/bigscience-workshop/promptsource/tree/main/promptsource/templates). \n# BibTeX entry and citation info\n```bibtex\n@article{ye2022guess,\n title={Guess the Instruction! Flipped Learning Makes Language Models Stronger Zero-Shot Learners},\n author={Ye, Seonghyeon and Kim, Doyoung and Jang, Joel and Shin, Joongbo and Seo, Minjoon},\n journal={arXiv preprint arXiv:2210.02969},\n year={2022}\n}\n```"},"matched_bigbio_names":{"kind":"list like","value":["SCIQ"],"string":"[\n \"SCIQ\"\n]"}}},{"rowIdx":2255,"cells":{"id":{"kind":"string","value":"model-attribution-challenge/openai-gpt"},"author":{"kind":"string","value":"model-attribution-challenge"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","tf","rust","openai-gpt","text-generation","en","arxiv:1705.11168","arxiv:1803.02324","arxiv:1910.09700","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tf\",\n \"rust\",\n \"openai-gpt\",\n \"text-generation\",\n \"en\",\n \"arxiv:1705.11168\",\n \"arxiv:1803.02324\",\n \"arxiv:1910.09700\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2022-11-09T20:17:38Z","string":"2022-11-09T20:17:38Z"},"last_modified":{"kind":"string","value":"2022-07-22T07:57:33+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nlanguage: en\nlicense: mit\n---\n\n# OpenAI GPT\n\n## Table of Contents\n- [Model Details](#model-details)\n- [How To Get Started With the Model](#how-to-get-started-with-the-model)\n- [Uses](#uses)\n- [Risks, Limitations and Biases](#risks-limitations-and-biases)\n- [Training](#training)\n- [Evaluation](#evaluation)\n- [Environmental Impact](#environmental-impact)\n- [Technical Specifications](#technical-specifications)\n- [Citation Information](#citation-information)\n- [Model Card Authors](#model-card-authors)\n\n## Model Details\n\n**Model Description:** `openai-gpt` is a transformer-based language model created and released by OpenAI. The model is a causal (unidirectional) transformer pre-trained using language modeling on a large corpus with long range dependencies.\n\n- **Developed by:** Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever. See [associated research paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) and [GitHub repo](https://github.com/openai/finetune-transformer-lm) for model developers and contributors.\n- **Model Type:** Transformer-based language model\n- **Language(s):** English\n- **License:** [MIT License](https://github.com/openai/finetune-transformer-lm/blob/master/LICENSE)\n- **Related Models:** [GPT2](https://huggingface.co/gpt2), [GPT2-Medium](https://huggingface.co/gpt2-medium), [GPT2-Large](https://huggingface.co/gpt2-large) and [GPT2-XL](https://huggingface.co/gpt2-xl)\n- **Resources for more information:**\n - [Research Paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf)\n - [OpenAI Blog Post](https://openai.com/blog/language-unsupervised/)\n - [GitHub Repo](https://github.com/openai/finetune-transformer-lm)\n - Test the full generation capabilities here: https://transformer.huggingface.co/doc/gpt\n\n## How to Get Started with the Model \n\nUse the code below to get started with the model. You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we\nset a seed for reproducibility:\n\n```python\n>>> from transformers import pipeline, set_seed\n>>> generator = pipeline('text-generation', model='openai-gpt')\n>>> set_seed(42)\n>>> generator(\"Hello, I'm a language model,\", max_length=30, num_return_sequences=5)\n\n[{'generated_text': \"Hello, I'm a language model,'he said, when i was finished.'ah well,'said the man,'that's\"},\n {'generated_text': 'Hello, I\\'m a language model, \" she said. \\n she reached the bottom of the shaft and leaned a little further out. it was'},\n {'generated_text': 'Hello, I\\'m a language model, \" she laughed. \" we call that a\\'white girl.\\'or as we are called by the'},\n {'generated_text': 'Hello, I\\'m a language model, \" said mr pin. \" an\\'the ones with the funny hats don\\'t. \" the rest of'},\n {'generated_text': 'Hello, I\\'m a language model, was\\'ere \\'bout to do some more dancin \\', \" he said, then his voice lowered to'}]\n```\n\nHere is how to use this model in PyTorch:\n\n```python\nfrom transformers import OpenAIGPTTokenizer, OpenAIGPTModel\nimport torch\n\ntokenizer = OpenAIGPTTokenizer.from_pretrained(\"openai-gpt\")\nmodel = OpenAIGPTModel.from_pretrained(\"openai-gpt\")\n\ninputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"pt\")\noutputs = model(**inputs)\n\nlast_hidden_states = outputs.last_hidden_state\n```\n\nand in TensorFlow:\n\n```python\nfrom transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel\n\ntokenizer = OpenAIGPTTokenizer.from_pretrained(\"openai-gpt\")\nmodel = TFOpenAIGPTModel.from_pretrained(\"openai-gpt\")\n\ninputs = tokenizer(\"Hello, my dog is cute\", return_tensors=\"tf\")\noutputs = model(inputs)\n\nlast_hidden_states = outputs.last_hidden_state\n```\n\n## Uses\n\n#### Direct Use\n\nThis model can be used for language modeling tasks.\n\n#### Downstream Use\n\nPotential downstream uses of this model include tasks that leverage language models. In the [associated paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf), the model developers discuss evaluations of the model for tasks including natural language inference (NLI), question answering, semantic similarity, and text classification.\n\n#### Misuse and Out-of-scope Use\n\nThe model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model.\n\n## Risks, Limitations and Biases\n\n#### Biases\n\n**CONTENT WARNING: Readers should be aware that language generated by this model can be disturbing or offensive to some and can propagate historical and current stereotypes.**\n\nSignificant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). \nPredictions generated by this model can include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. For example:\n\n```python\n>>> from transformers import pipeline, set_seed\n>>> generator = pipeline('text-generation', model='openai-gpt')\n>>> set_seed(42)\n>>> generator(\"The man worked as a\", max_length=10, num_return_sequences=5)\n\n[{'generated_text': 'The man worked as a teacher for the college he'},\n {'generated_text': 'The man worked as a janitor at the club.'},\n {'generated_text': 'The man worked as a bodyguard in america. the'},\n {'generated_text': 'The man worked as a clerk for one of the'},\n {'generated_text': 'The man worked as a nurse, but there was'}]\n\n>>> set_seed(42)\n>>> generator(\"The woman worked as a\", max_length=10, num_return_sequences=5)\n\n[{'generated_text': 'The woman worked as a medical intern but is a'},\n {'generated_text': 'The woman worked as a midwife, i know that'},\n {'generated_text': 'The woman worked as a prostitute in a sex club'},\n {'generated_text': 'The woman worked as a secretary for one of the'},\n {'generated_text': 'The woman worked as a nurse, but she had'}]\n```\n\nThis bias may also affect fine-tuned versions of this model. Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model.\n\n#### Risks and Limitations\n\nThe model developers also wrote in a [blog post](https://openai.com/blog/language-unsupervised/) about risks and limitations of the model, including: \n\n> - **Compute Requirements:** Many previous approaches to NLP tasks train relatively small models on a single GPU from scratch. Our approach requires an expensive pre-training step - 1 month on 8 GPUs. Luckily, this only has to be done once and we’re releasing our model so others can avoid it. It is also a large model (in comparison to prior work) and consequently uses more compute and memory — we used a 37-layer (12 block) Transformer architecture, and we train on sequences of up to 512 tokens. Most experiments were conducted on 4 and 8 GPU systems. The model does fine-tune to new tasks very quickly which helps mitigate the additional resource requirements.\n> - **The limits and bias of learning about the world through text:** Books and text readily available on the internet do not contain complete or even accurate information about the world. Recent work ([Lucy and Gauthier, 2017](https://arxiv.org/abs/1705.11168)) has shown that certain kinds of information are difficult to learn via just text and other work ([Gururangan et al., 2018](https://arxiv.org/abs/1803.02324)) has shown that models learn and exploit biases in data distributions.\n> - **Still brittle generalization:** Although our approach improves performance across a broad range of tasks, current deep learning NLP models still exhibit surprising and counterintuitive behavior - especially when evaluated in a systematic, adversarial, or out-of-distribution way. Our approach is not immune to these issues, though we have observed some indications of progress. Our approach shows improved lexical robustness over previous purely neural approaches to textual entailment. On the dataset introduced in Glockner et al. (2018) our model achieves 83.75%, performing similarly to KIM, which incorporates external knowledge via WordNet.\n\n## Training\n\n#### Training Data\n\nThe model developers [write](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf): \n\n> We use the BooksCorpus dataset ([Zhu et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhu_Aligning_Books_and_ICCV_2015_paper.pdf)) for training the language model. It contains over 7,000 unique unpublished books from a variety of genres including Adventure, Fantasy, and Romance. Crucially, it contains long stretches of contiguous text, which allows the generative model to learn to condition on long-range information.\n\n#### Training Procedure\n\nThe model developers [write](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf): \n\n> Our model largely follows the original transformer work [62]. We trained a 12-layer decoder-only transformer with masked self-attention heads (768 dimensional states and 12 attention heads). For the position-wise feed-forward networks, we used 3072 dimensional inner states. We used the Adam optimization scheme [27] with a max learning rate of 2.5e-4. The learning rate was increased linearly from zero over the first 2000 updates and annealed to 0 using a cosine schedule. We train for 100 epochs on minibatches of 64 randomly sampled, contiguous sequences of 512 tokens. Since layernorm [2] is used extensively throughout the model, a simple weight initialization of N (0, 0.02) was sufficient. We used a bytepair encoding (BPE) vocabulary with 40,000 merges [53] and residual, embedding, and attention dropouts with a rate of 0.1 for regularization. We also employed a modified version of L2 regularization proposed in [37], with w = 0.01 on all non bias or gain weights. For the activation function, we used the Gaussian Error Linear Unit (GELU) [18]. We used learned position embeddings instead of the sinusoidal version proposed in the original work. We use the ftfy library2 to clean the raw text in BooksCorpus, standardize some punctuation and whitespace, and use the spaCy tokenizer.\n\nSee the paper for further details and links to citations.\n\n## Evaluation\n\nThe following evaluation information is extracted from the [associated blog post](https://openai.com/blog/language-unsupervised/). See the [associated paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) for further details.\n\n#### Testing Data, Factors and Metrics\n\nThe model developers report that the model was evaluated on the following tasks and datasets using the listed metrics: \n\n- **Task:** Textual Entailment\n - **Datasets:** [SNLI](https://huggingface.co/datasets/snli), [MNLI Matched](https://huggingface.co/datasets/glue), [MNLI Mismatched](https://huggingface.co/datasets/glue), [SciTail](https://huggingface.co/datasets/scitail), [QNLI](https://huggingface.co/datasets/glue), [RTE](https://huggingface.co/datasets/glue)\n - **Metrics:** Accuracy \n \n- **Task:** Semantic Similarity\n - **Datasets:** [STS-B](https://huggingface.co/datasets/glue), [QQP](https://huggingface.co/datasets/glue), [MRPC](https://huggingface.co/datasets/glue)\n - **Metrics:** Accuracy\n \n- **Task:** Reading Comprehension\n - **Datasets:** [RACE](https://huggingface.co/datasets/race)\n - **Metrics:** Accuracy\n \n- **Task:** Commonsense Reasoning\n - **Datasets:** [ROCStories](https://huggingface.co/datasets/story_cloze), [COPA](https://huggingface.co/datasets/xcopa)\n - **Metrics:** Accuracy\n \n- **Task:** Sentiment Analysis\n - **Datasets:** [SST-2](https://huggingface.co/datasets/glue)\n - **Metrics:** Accuracy\n \n- **Task:** Linguistic Acceptability\n - **Datasets:** [CoLA](https://huggingface.co/datasets/glue)\n - **Metrics:** Accuracy\n \n- **Task:** Multi Task Benchmark\n - **Datasets:** [GLUE](https://huggingface.co/datasets/glue)\n - **Metrics:** Accuracy\n\n#### Results\n\nThe model achieves the following results without any fine-tuning (zero-shot):\n\n| Task | TE | TE | TE |TE | TE | TE | SS | SS | SS | RC | CR | CR | SA | LA | MTB |\n|:--------:|:--:|:----------:|:-------------:|:-----:|:----:|:---:|:---:|:---:|:--:|:----:|:--------:|:----:|:----:|:----:|:----:|\n| Dataset |SNLI|MNLI Matched|MNLI Mismatched|SciTail| QNLI | RTE |STS-B| QQP |MPRC|RACE |ROCStories|COPA | SST-2| CoLA | GLUE |\n| |89.9| 82.1 | 81.4 |88.3 | 88.1 | 56.0|82.0 | 70.3|82.3|59.0 | 86.5 | 78.6 | 91.3 | 45.4 | 72.8 | \n\n## Environmental Impact\n\nThe model developers [report that](https://openai.com/blog/language-unsupervised/): \n\n> The total compute used to train this model was 0.96 petaflop days (pfs-days).\n\n> 8 P600 GPU's * 30 days * 12 TFLOPS/GPU * 0.33 utilization = .96 pfs-days\n\nCarbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700).\n\n- **Hardware Type:** 8 P600 GPUs\n- **Hours used:** 720 hours (30 days)\n- **Cloud Provider:** Unknown\n- **Compute Region:** Unknown\n- **Carbon Emitted:** Unknown\n\n## Technical Specifications\n\nSee the [associated paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) for details on the modeling architecture, objective, compute infrastructure, and training details.\n\n## Citation Information\n\n```bibtex\n@article{radford2018improving,\n title={Improving language understanding by generative pre-training},\n author={Radford, Alec and Narasimhan, Karthik and Salimans, Tim and Sutskever, Ilya and others},\n year={2018},\n publisher={OpenAI}\n}\n```\n\nAPA: \n*Radford, A., Narasimhan, K., Salimans, T., & Sutskever, I. (2018). Improving language understanding by generative pre-training.*\n\n## Model Card Authors\n\nThis model card was written by the Hugging Face team."},"matched_bigbio_names":{"kind":"list like","value":["SCITAIL"],"string":"[\n \"SCITAIL\"\n]"}}},{"rowIdx":2256,"cells":{"id":{"kind":"string","value":"BigSalmon/InformalToFormalLincoln99Paraphrase"},"author":{"kind":"string","value":"BigSalmon"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","gpt2","text-generation","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"gpt2\",\n \"text-generation\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-05-23T02:54:21Z","string":"2023-05-23T02:54:21Z"},"last_modified":{"kind":"string","value":"2023-05-23T03:45:21+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\ndata: https://github.com/BigSalmon2/InformalToFormalDataset\n\nText Generation Informal Formal\n\n```\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\ntokenizer = AutoTokenizer.from_pretrained(\"BigSalmon/InformalToFormalLincoln99Paraphrase\")\nmodel = AutoModelForCausalLM.from_pretrained(\"BigSalmon/InformalToFormalLincoln99Paraphrase\")\n```\n\n```\nDemo:\nhttps://huggingface.co/spaces/BigSalmon/FormalInformalConciseWordy\n```\n\n```\nprompt = \"\"\"informal english: corn fields are all across illinois, visible once you leave chicago.\\nTranslated into the Style of Abraham Lincoln:\"\"\"\ninput_ids = tokenizer.encode(prompt, return_tensors='pt')\noutputs = model.generate(input_ids=input_ids,\n max_length=10 + len(prompt),\n temperature=1.0,\n top_k=50,\n top_p=0.95,\n do_sample=True,\n num_return_sequences=5,\n early_stopping=True)\nfor i in range(5):\n print(tokenizer.decode(outputs[i]))\n```\nMost likely outputs (Disclaimer: I highly recommend using this over just generating):\n```\nprompt = \"\"\"informal english: corn fields are all across illinois, visible once you leave chicago.\\nTranslated into the Style of Abraham Lincoln:\"\"\"\ntext = tokenizer.encode(prompt)\nmyinput, past_key_values = torch.tensor([text]), None\nmyinput = myinput\nmyinput= myinput.to(device)\nlogits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)\nlogits = logits[0,-1]\nprobabilities = torch.nn.functional.softmax(logits)\nbest_logits, best_indices = logits.topk(250)\nbest_words = [tokenizer.decode([idx.item()]) for idx in best_indices]\ntext.append(best_indices[0].item())\nbest_probabilities = probabilities[best_indices].tolist()\nwords = [] \nprint(best_words)\n```\n\n```\nHow To Make Prompt:\ninformal english: i am very ready to do that just that.\nTranslated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end.\nTranslated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task.\n***\ninformal english: space is huge and needs to be explored.\nTranslated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless.\nTranslated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration.\n***\ninformal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\ninformal english:\n```\n\n```\noriginal: microsoft word's [MASK] pricing invites competition.\nTranslated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition.\n***\noriginal: the library’s quiet atmosphere encourages visitors to [blank] in their work.\nTranslated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work.\n```\n\n```\nEssay Intro (Warriors vs. Rockets in Game 7):\ntext: eagerly anticipated by fans, game 7's are the highlight of the post-season.\ntext: ever-building in suspense, game 7's have the crowd captivated.\n***\nEssay Intro (South Korean TV Is Becoming Popular):\ntext: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ).\ntext: increasingly held in critical esteem, south korean television continues to impress.\ntext: at the forefront of quality content, south korea is quickly achieving celebrity status.\n***\nEssay Intro (\n```\n\n```\nSearch: What is the definition of Checks and Balances?\nhttps://en.wikipedia.org/wiki/Checks_and_balances\nChecks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate.\nhttps://www.harvard.edu/glossary/Checks_and_Balances\nChecks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power\nhttps://www.law.cornell.edu/library/constitution/Checks_and_Balances\nChecks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power.\n***\nSearch: What is the definition of Separation of Powers?\nhttps://en.wikipedia.org/wiki/Separation_of_powers\nThe separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power.\nhttps://www.yale.edu/tcf/Separation_of_Powers.html\nSeparation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined.\n***\nSearch: What is the definition of Connection of Powers?\nhttps://en.wikipedia.org/wiki/Connection_of_powers\nConnection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches.\nhttps://simple.wikipedia.org/wiki/Connection_of_powers\nThe term Connection of Powers describes a system of government in which there is overlap between different parts of the government.\n***\nSearch: What is the definition of\n```\n\n```\nSearch: What are phrase synonyms for \"second-guess\"?\nhttps://www.powerthesaurus.org/second-guess/synonyms\nShortest to Longest:\n- feel dubious about\n- raise an eyebrow at\n- wrinkle their noses at\n- cast a jaundiced eye at\n- teeter on the fence about\n***\nSearch: What are phrase synonyms for \"mean to newbies\"?\nhttps://www.powerthesaurus.org/mean_to_newbies/synonyms\nShortest to Longest:\n- readiness to balk at rookies\n- absence of tolerance for novices\n- hostile attitude toward newcomers\n***\nSearch: What are phrase synonyms for \"make use of\"?\nhttps://www.powerthesaurus.org/make_use_of/synonyms\nShortest to Longest:\n- call upon\n- glean value from\n- reap benefits from\n- derive utility from\n- seize on the merits of\n- draw on the strength of\n- tap into the potential of\n***\nSearch: What are phrase synonyms for \"hurting itself\"?\nhttps://www.powerthesaurus.org/hurting_itself/synonyms\nShortest to Longest:\n- erring\n- slighting itself\n- forfeiting its integrity\n- doing itself a disservice\n- evincing a lack of backbone\n***\nSearch: What are phrase synonyms for \"\n```\n```\n- nebraska\n- unicamerical legislature\n- different from federal house and senate\ntext: featuring a unicameral legislature, nebraska's political system stands in stark contrast to the federal model, comprised of a house and senate.\n***\n- penny has practically no value\n- should be taken out of circulation\n- just as other coins have been in us history\n- lost use\n- value not enough\n- to make environmental consequences worthy\ntext: all but valueless, the penny should be retired. as with other coins in american history, it has become defunct. too minute to warrant the environmental consequences of its production, it has outlived its usefulness.\n***\n-\n```\n```\noriginal: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. \ninfill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. \n***\noriginal:\n```\n\n```\nwordy: classical music is becoming less popular more and more.\nTranslate into Concise Text: interest in classic music is fading.\n***\nwordy:\n```\n\n```\nsweet: savvy voters ousted him.\nlonger: voters who were informed delivered his defeat.\n***\nsweet:\n```\n\n```\n1: commercial space company spacex plans to launch a whopping 52 flights in 2022.\n2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022.\n3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights.\n4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company.\n5: a commercial space company, spacex aims to conduct 52 flights in 2022.\n***\n1:\n```\n\nKeywords to sentences or sentence.\n\n```\nngos are characterized by:\n□ voluntary citizens' group that is organized on a local, national or international level\n□ encourage political participation\n□ often serve humanitarian functions\n□ work for social, economic, or environmental change\n***\nwhat are the drawbacks of living near an airbnb?\n□ noise\n□ parking\n□ traffic\n□ security\n□ strangers\n***\n```\n\n\n```\noriginal: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung.\nadapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung.\n***\noriginal: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark.\nadapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark.\n***\noriginal:\n```\n\n```\noriginal: had trouble deciding.\ntranslated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation.\n***\noriginal:\n```\n\n```\ninput: not loyal\n1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ).\n***\ninput:\n```\n\n```\nfirst: ( was complicit in / was involved in ).\nantonym: ( was blameless / was not an accomplice to / had no hand in / was uninvolved in ).\n***\nfirst: ( have no qualms about / see no issue with ).\nantonym: ( are deeply troubled by / harbor grave reservations about / have a visceral aversion to / take ( umbrage at / exception to ) / are wary of ).\n***\nfirst: ( do not see eye to eye / disagree often ).\nantonym: ( are in sync / are united / have excellent rapport / are like-minded / are in step / are of one mind / are in lockstep / operate in perfect harmony / march in lockstep ).\n***\nfirst:\n```\n\n```\nstiff with competition, law school {A} is the launching pad for countless careers, {B} is a crowded field, {C} ranks among the most sought-after professional degrees, {D} is a professional proving ground.\n***\nlanguishing in viewership, saturday night live {A} is due for a creative renaissance, {B} is no longer a ratings juggernaut, {C} has been eclipsed by its imitators, {C} can still find its mojo.\n***\ndubbed the \"manhattan of the south,\" atlanta {A} is a bustling metropolis, {B} is known for its vibrant downtown, {C} is a city of rich history, {D} is the pride of georgia.\n***\nembattled by scandal, harvard {A} is feeling the heat, {B} cannot escape the media glare, {C} is facing its most intense scrutiny yet, {D} is in the spotlight for all the wrong reasons.\n```\n\nInfill / Infilling / Masking / Phrase Masking (Works pretty decently actually, especially when you use logprobs code from above):\n\n```\nhis contention [blank] by the evidence [sep] was refuted [answer]\n***\nfew sights are as [blank] new york city as the colorful, flashing signage of its bodegas [sep] synonymous with [answer]\n***\nwhen rick won the lottery, all of his distant relatives [blank] his winnings [sep] clamored for [answer]\n***\nthe library’s quiet atmosphere encourages visitors to [blank] in their work [sep] immerse themselves [answer]\n***\nthe joy of sport is that no two games are alike. for every exhilarating experience, however, there is an interminable one. the national pastime, unfortunately, has a penchant for the latter. what begins as a summer evening at the ballpark can quickly devolve into a game of tedium. the primary culprit is the [blank] of play. from batters readjusting their gloves to fielders spitting on their mitts, the action is [blank] unnecessary interruptions. the sport's future is [blank] if these tendencies are not addressed [sep] plodding pace [answer] riddled with [answer] bleak [answer]\n***\nmicrosoft word's [blank] pricing [blank] competition [sep] unconscionable [answer] invites [answer]\n***\n```\n\n```\noriginal: microsoft word's [MASK] pricing invites competition.\nTranslated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition.\n***\noriginal: the library’s quiet atmosphere encourages visitors to [blank] in their work.\nTranslated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work.\n```\n\nBackwards\n```\nEssay Intro (National Parks):\ntext: tourists are at ease in the national parks, ( swept up in the beauty of their natural splendor ).\n***\nEssay Intro (D.C. Statehood):\nwashington, d.c. is a city of outsize significance, ( ground zero for the nation's political life / center stage for the nation's political machinations ).\n```\n\n```\ntopic: the Golden State Warriors.\ncharacterization 1: the reigning kings of the NBA.\ncharacterization 2: possessed of a remarkable cohesion.\ncharacterization 3: helmed by superstar Stephen Curry.\ncharacterization 4: perched atop the league’s hierarchy.\ncharacterization 5: boasting a litany of hall-of-famers.\n***\ntopic: emojis.\ncharacterization 1: shorthand for a digital generation.\ncharacterization 2: more versatile than words.\ncharacterization 3: the latest frontier in language.\ncharacterization 4: a form of self-expression.\ncharacterization 5: quintessentially millennial.\ncharacterization 6: reflective of a tech-centric world.\n***\ntopic:\n```\n\n\n```\nregular: illinois went against the census' population-loss prediction by getting more residents.\nVBG: defying the census' prediction of population loss, illinois experienced growth.\n***\nregular: microsoft word’s high pricing increases the likelihood of competition.\nVBG: extortionately priced, microsoft word is inviting competition.\n***\nregular:\n```\n\n```\nsource: badminton should be more popular in the US.\nQUERY: Based on the given topic, can you develop a story outline?\ntarget: (1) games played with racquets are popular, (2) just look at tennis and ping pong, (3) but badminton underappreciated, (4) fun, fast-paced, competitive, (5) needs to be marketed more\ntext: the sporting arena is dominated by games that are played with racquets. tennis and ping pong, in particular, are immensely popular. somewhat curiously, however, badminton is absent from this pantheon. exciting, fast-paced, and competitive, it is an underappreciated pastime. all that it lacks is more effective marketing.\n***\nsource: movies in theaters should be free.\nQUERY: Based on the given topic, can you develop a story outline?\ntarget: (1) movies provide vital life lessons, (2) many venues charge admission, (3) those without much money\ntext: the lessons that movies impart are far from trivial. the vast catalogue of cinematic classics is replete with inspiring sagas of friendship, bravery, and tenacity. it is regrettable, then, that admission to theaters is not free. in their current form, the doors of this most vital of institutions are closed to those who lack the means to pay.\n***\nsource:\n```\n\n```\nin the private sector, { transparency } is vital to the business’s credibility. the { disclosure of information } can be the difference between success and failure.\n***\nthe labor market is changing, with { remote work } now the norm. this { flexible employment } allows the individual to design their own schedule.\n***\nthe { cubicle } is the locus of countless grievances. many complain that the { enclosed workspace } restricts their freedom of movement.\n***\n```\n\n```\nit would be natural to assume that americans, as a people whose ancestors { immigrated to this country }, would be sympathetic to those seeking to do likewise.\nquestion: what does “do likewise” mean in the above context?\n(a) make the same journey\n(b) share in the promise of the american dream\n(c) start anew in the land of opportunity\n(d) make landfall on the united states\n***\nin the private sector, { transparency } is vital to the business’s credibility. this orientation can be the difference between success and failure.\nquestion: what does “this orientation” mean in the above context?\n(a) visible business practices \n(b) candor with the public\n(c) open, honest communication\n(d) culture of accountability\n```\n\n```\nexample: suppose you are a teacher. further suppose you want to tell an accurate telling of history. then suppose a parent takes offense. they do so in the name of name of their kid. this happens a lot.\ntext: educators' responsibility to remain true to the historical record often clashes with the parent's desire to shelter their child from uncomfortable realities.\n***\nexample: suppose you are a student at college. now suppose you have to buy textbooks. that is going to be worth hundreds of dollars. given how much you already spend on tuition, that is going to hard cost to bear.\ntext: the exorbitant cost of textbooks, which often reaches hundreds of dollars, imposes a sizable financial burden on the already-strapped college student.\n```\n\n```\n the atlanta hawks may attribute trae young their robust season to \n***\n the nobel prize in literature honor is a singularly prestigious \n```\n\n```\naccustomed to having its name uttered ______, harvard university is weathering a rare spell of reputational tumult\n(a) in reverential tones\n(b) with great affection\n(c) in adulatory fashion\n(d) in glowing terms\n```\n\n```\nclarify: international ( {working together} / cooperation ) is called for when ( {issue go beyond lots of borders} / an issue transcends borders / a given matter has transnational implications ).\n```\n\n```\ndescription: when someone thinks that their view is the only right one.\nsynonyms: intolerant, opinionated, narrow-minded, insular, self-righteous.\n***\ndescription: when you put something off.\nsynonyms: shelve, defer, table, postpone.\n```\n\n```\norganic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea.\nrewrite phrases: meritocratic, viability, vision\nrewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability.\n```\n\n\n*Note* Of all the masking techniques, this one works the best.\n```\n the atlanta hawks may attribute trae young their robust season to \n***\n the nobel prize in literature honor is a singularly prestigious \n```\n\n```\nessence: when someone's views are keeping within reasonable.\nrefine: the senator's voting record is ( moderate / centrist / pragmatic / balanced / fair-minded / even-handed ).\n***\nessence: when things are worked through in a petty way.\nrefine: the propensity of the u.s. congress to settle every dispute by way of ( mudslinging / bickering / demagoguery / name-calling / finger-pointing / vilification ) is appalling.\n```\n\n```\ndescription: when someone thinks that their view is the only right one.\nsynonyms: intolerant, opinionated, narrow-minded, insular, self-righteous.\n\n***\n\ndescription: when you put something off.\nsynonyms: shelve, defer, table, postpone.\n```\n\n```\norganic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea.\nrewrite phrases: meritocratic, viability, vision\nrewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability.\n```\n\n```\nmusic before bedtime [makes for being able to relax] -> is a recipe for relaxation.\n```\n\n```\n[people wanting entertainment love traveling new york city] -> travelers flock to new york city in droves, drawn to its iconic entertainment scene. [cannot blame them] -> one cannot fault them [broadway so fun] -> when it is home to such thrilling fare as Broadway.\n```\n\n```\nin their ( ‖ when you are rushing because you want to get there on time ‖ / haste to arrive punctually / mad dash to be timely ), morning commuters are too rushed to whip up their own meal.\n\n***\n\npoliticians prefer to author vague plans rather than ( ‖ when you can make a plan without many unknowns ‖ / actionable policies / concrete solutions ).\n```\n\n```\nQ: What is whistleblower protection?\nA: Whistleblower protection is a form of legal immunity granted to employees who expose the unethical practices of their employer.\nQ: Why are whistleblower protections important?\nA: Absent whistleblower protections, employees would be deterred from exposing their employer’s wrongdoing for fear of retribution.\nQ: Why would an employer engage in retribution?\nA: An employer who has acted unethically stands to suffer severe financial and reputational damage were their transgressions to become public. To safeguard themselves from these consequences, they might seek to dissuade employees from exposing their wrongdoing.\n```\n\n```\noriginal: the meritocratic nature of crowdfunding [MASK] into their vision's viability.\ninfill: the meritocratic nature of crowdfunding [gives investors idea of how successful] -> ( offers entrepreneurs a window ) into their vision's viability.\n```\n\n```\nLeadership | Lecture 17: Worker Morale\n\nWhat Workers Look for in Companies:\n• Benefits\no Tuition reimbursement\no Paid parental leave\no 401K matching\no Profit sharing\no Pension plans\no Free meals\n• Social responsibility\no Environmental stewardship\no Charitable contributions\no Diversity\n• Work-life balance\no Telecommuting\no Paid holidays and vacation\no Casual dress\n• Growth opportunities\n• Job security\n• Competitive compensation\n• Recognition\no Open-door policies\no Whistleblower protection\no Employee-of-the-month awards\no Positive performance reviews\no Bonuses\n```\n\n```\ndescription: business\nkeywords: for-profit, fiduciary duty, monopolistic, bottom line, return on investment, short-term thinking, capital-intensive, self-interested, risk-taking, fiduciary duty, merger, speculation, profiteering, oversight, capitalism, diversification\n ```\n \n ```\n 3. In this task, you are given a company name and you need to find its industry.\n\nMcDonalds -- Restaurant\nFacebook -- Social Network\nIKEA -- Furniture\nAmerican Express -- Credit Services\nNokia -- Telecom\nNintendo -- Entertainment\n\n4. In this task, you are given a Month and you need to convert it to its corresponding season\n\nApril -- Spring\nDecember -- Winter\nJuly -- Summer\nOctober -- Fall\nFebruary -- Winter\n\n5. In this task, you are given a sentence with a missing word and you need to predict the correct word.\n\nManagers should set an _____ for their employees. -- example\nSome people spend more than four _____ in the gym. -- hours\nThe police were on the _____ of arresting the suspect. -- verge\nThey were looking for _____ on how to solve the problem. -- guidance\nWhat is the _____ of the coffee? -- price\n\n6. In this task, you are given a paragraph and you need to reorder it to make it logical.\n\nIt was first proposed in 1987. The total length of the bridge is 1,828 meters. The idea of a bridge connects Hong Kong to Macau. -- The idea of bridge connecting Hong Kong and Macau was first proposed in 1987. The total length of the bridge is 1,828 meters.\nIt is a movie about a brave and noble policeman. The film was produced by Americans. They were Kevin Lima and Chris Buck. They are directors. The movie is called Tarzan. -- Produced by Americans Kevin Lima and Chris Buck, Tarzan is a movie about a brave and noble policeman.\nIt was first discovered in the mountains of India. The active ingredients in this plant can stimulate hair growth. The plant is called \"Hair Plus.\" -- First discovered in the mountains of India, Hair Plus is a plant whose active ingredients can stimulate hair growth.\n```\n\n```\ntrivia: What is the population of South Korea?\nresponse: 51 million.\n\n***\n\ntrivia: What is the minimum voting age in the US?\nresponse: 18.\n\n***\n\ntrivia: What are the first ten amendments of the US constitution called?\nresponse: Bill of Rights.\n```\n\n```\nideas: in modern-day america, it is customary for the commander-in-chief to conduct regular press conferences\nrelated keywords: transparency, check and balance, sacrosanct, public accountability, adversarial, unscripted, direct access, open government, watchdog, healthy democracy, institutional integrity, right to know, direct line of communication, behind closed doors, updates, track progress, instill confidence, reassure, humanize, leadership style, day-to-day, forthcoming, demystify, ask hard questions\n\n***\n\nideas: i know this one guy who retired so young, attesting to how careful they were with money.\nrelated keywords: money management, resourceful, penny-pinching, live below their means, frugal, financial discipline, financial independence, conservative, long-term vision, discretionary spending, deferred gratification, preparedness, self-control, cushion\n```\n\n```\nless specific: actors and musicians should ( support democracy ).\nclarifies: actors and musicians should ( wield their celebrity to amplify pro-democracy messaging / marshal their considerable influence in the service of the democratic cause ).\n\n***\n\nless specific: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( be careful ).\nclarifies: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( master their desires / exercise self-restraint / embrace frugality / restrain their appetite for splendor ).\n```\n\n```\ndull: clean\nemotional heft: spotless, immaculate, pristine\n\n***\n\ndull: hot\nemotional heft: scorching, searing, blistering\n\n***\n\ndull: happy\nemotional heft: euphoric\n\n```\n\n```\ntext: {guide: vividly describe the premise of the show \"seinfield\"} -> set in the heart of bustling new york city, the sitcom \"seinfield\" follows the everyday {restrict: term that implies they get into trouble but in a comical way} -> misadventures of four neurotic friends. on any given episode, one can find them quarreling over their favorite diner's latest menu change, haggling over the division of household expenses, or contriving a scheme to spy on the apartment's newest resident. mundane as their exploits may be, they never fail to elicit a hearty laugh. {guide: mention how larry david is responsible} -> behind the show's witty, incisive dialogue lies the sharp pen of larry david, who co-created the show with jerry seinfeld. {guide: mention how larry david came up with the most recognizable lines} -> it is his genius that conjured such instantly {restrict: term that imply everybody knows them} -> recognizable quips as \"no soup for you!\" and \"not that there's anything wrong with that!\". {guide: mention how humanity should revel in having such good comedians these days} -> as this list of laugh-out-loud lines attests, the world is fortunate to ( count such a sharp comedic mind among its ranks / have such a talented humorist in its midst / have such comedic talent gracing its airwaves ).\n```\n\n```\nsystem: i'm not sure i agree with your assessment of the situation.\nuser: replace \"assessment\" with a more casual term.\nchatbot: ( take / read / view ).\n\nsystem: i've been feeling really busy lately, trying to balance work and family responsibilities.\nuser: replace \"busy\" with a more intense word that conveys it is too much.\nchatbot: overwhelmed.\n\nsystem: the candidate's stance on immigration is quite nebulous, and it's difficult to discern exactly what they would do if elected.\nuser: replace \"nebulous\" with a word that conveys a lack of detail.\nchatbot: ( vague / ambiguous ).\n```\n\n```\nuser: exorbitant in price, scholarly journals unconscionably define the breadth of one's knowledge awareness by the contents of their wallet. [replace “knowledge awareness” with a more natural expression]\nchatbot: intellectual horizons.\nuser: can you do another alternative to “intellectual horizons” that has more relation to “scholarly journals”?\nchatbot: academic enlightenment.\n```\n\n```\nkey: calculate.\nsyn: estimate, consider, weigh, number, count, apportion, proportion, investigate, reckon, rate, compute.\nant: guess, conjecture, hit, chance, risk, stake, miscalculate.\n```\n\n```\ndescription: more forceful version of curious that is less forceful than nosy\nanswer: inquisitive\n\ndescription: more forceful version of hopeful that is less forceful than overconfident\nanswer: optimistic\n```"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2257,"cells":{"id":{"kind":"string","value":"ICTNLP/bayling-13b-diff"},"author":{"kind":"string","value":"ICTNLP"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","llama","text-generation","translation","multilingual","large language model","instruction tuning","zh","en","arxiv:2306.10968","license:gpl-3.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"llama\",\n \"text-generation\",\n \"translation\",\n \"multilingual\",\n \"large language model\",\n \"instruction tuning\",\n \"zh\",\n \"en\",\n \"arxiv:2306.10968\",\n \"license:gpl-3.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-14T08:21:30Z","string":"2023-06-14T08:21:30Z"},"last_modified":{"kind":"string","value":"2023-07-06T13:52:12+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":12,"string":"12"},"README":{"kind":"string","value":"---\nlanguage:\n- zh\n- en\nlicense: gpl-3.0\npipeline_tag: text-generation\ntags:\n- translation\n- multilingual\n- large language model\n- instruction tuning\n---\n\n# BayLing: Bridging Cross-lingual Alignment and Instruction Following through Interactive Translation for Large Language Models\n\n**BayLing** (**百聆**, **bǎi líng**) is an instruction-following LLM equipped with advanced language alignment, showing superior capability in English/Chinese generation, instruction following and multi-turn interaction. BayLing can be effortlessly deployed on a consumer-grade GPU with 16GB of memory, and assists users with tasks such as translation, writing, creation, suggestion...\n\n**This model is the *weight-diff* version of BayLing-13B-v1.0.**\n\n[BayLing-13B-v1.1](https://huggingface.co/ICTNLP/bayling-13b-v1.1) has been released, **BayLing-13B-v1.1 is additionally injected with extensive Chinese knowledge** compared with this model.\n\n👇 Learn more about BayLing:\n\n💬 [**Demo**](http://nlp.ict.ac.cn/bayling/demo): Welcome to apply for a trial of BayLing's online demo (beta version).\n\n📄 [**Paper**](https://arxiv.org/abs/2306.10968): A comprehensive research paper of BayLing.\n\n🏠 [**Homepage**](http://nlp.ict.ac.cn/bayling): BayLing's homepage. You can discover more information and cases of BayLing here.\n\n✍️ [**BayLing-80 Test Set**](https://github.com/ictnlp/BayLing/tree/main/data/BayLing-80): A human-annotated evaluation set comprising multi-turn instructions in both English and Chinese, can be used to evaluate the multilingual and multi-turn interaction capabilities of LLMs.\n\n🤗 **Model**: The *weight-diff* version of [BayLing-7B](https://huggingface.co/ICTNLP/bayling-7b-diff) and [BayLing-13B](https://huggingface.co/ICTNLP/bayling-13b-diff), you can quickly get the parameters of BayLing through [apply_delta.py](https://github.com/ictnlp/BayLing/blob/main/apply_delta.py). The HF models of BayLing are anonymized version (exclude BayLing's name in its knowledge), in order to facilitate future LLMs to build upon BayLing.\n\n> BayLing is developed by [NLP Group](http://nlp.ict.ac.cn/) of [Institute of Computing Technology](http://www.ict.ac.cn/), [Chinese Academy of Sciences](https://www.cas.cn/) (ICT/CAS)\n>\n> BayLing is continuously optimizing 🆙\n> If you have any suggestions, please contact `bayling@ict.ac.cn`. Thanks for your support!\n\n\n**Refer to our [Github Repo](https://github.com/ictnlp/BayLing) for the detailed introduction to BayLing, including deploying BayLing, interacting with BayLing and BayLing's performance.**\n\n\n\n## Limitations\n\nDespite demonstrating commendable performance in certain aspects, BayLing still exhibits several limitations. For instance, when faced with tasks involving factual knowledge, BayLing has the potential to generate inaccurate information. Moreover, it lacks proficiency in solving reasoning, mathematics, and coding tasks. Additionally, there is a risk of BayLing generating content that is harmful or biased in nature.\n\nBayLing is a large language model that, like any other language model, cannot guarantee the absolute accuracy of the generated content. **Note that this project does not assume any risks or responsibilities associated with data security, public opinion risks arising from open-source models and codes, or any risks and liabilities resulting from misleading, misusing, spreading, or improper use of the models.**\n\n## License\n\nModel weights (delta version) and the inference code are released under The GNU General Public License v3.0 (GPLv3). The online demo serves as a research preview and is exclusively intended for non-commercial usage, subject to the [Model License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT and [Data License](https://machinetranslate.org/wmt22) of WMT22.\n\n## Acknowledgements\n\nWe would like to express our gratitude to all those who have contributed to BayLing. We extend special thanks to Ms. Xiaohong Wang for her valuable comments and suggestions on the use of InforSuperBahn MLOps, and for her organizational and resource support in providing computing resources and showcasing BayLing. We also acknowledge Xiaodong Liu for his pivotal role in the construction of the distributed system and overall coordination of the demo deployment. Furthermore, we appreciate the contribution of the development team from the Nanjing Institute of InforSuperBahn in maintaining the computing resources and creating the display interface for BayLing’s webpage and demo.\n\n## Authors\n\n | [Shaolei Zhang](https://zhangshaolei1998.github.io/) | [Qingkai Fang](https://fangqingkai.github.io/) | [Zhuocheng Zhang](https://nlp.ict.ac.cn/yjdw/xs/bsyjs/202210/t20221019_52678.html) | [Zhengrui Ma](https://nlp.ict.ac.cn/yjdw/xs/bsyjs/202210/t20221019_52675.html) |\n\n | [Yan Zhou](https://zhouyan19.github.io/zhouyan/) | [Langlin Huang](https://nlp.ict.ac.cn/yjdw/xs/ssyjs/202210/t20221019_52686.html) | [Mengyu Bu](https://bingo123122121.github.io/) | [Shangtong Gui](https://github.com/GhostofAdam) |\n\n | [Yunji Chen](http://novel.ict.ac.cn/ychen/) | [Xilin Chen](http://www.ict.cas.cn/sourcedb_2018_ict_cas/cn/jssrck/200909/t20090917_2496595.html) | [Yang Feng \\*](https://people.ucas.edu.cn/~yangfeng?language=en) |\n\n## Citation\n\nIf our work is helpful for you, please cite as:\n\n```\n@article{bayling,\n title={BayLing: Bridging Cross-lingual Alignment and Instruction Following through Interactive Translation for Large Language Models}, \n author={Shaolei Zhang and Qingkai Fang and Zhuocheng Zhang and Zhengrui Ma and Yan Zhou and Langlin Huang and Mengyu Bu and Shangtong Gui and Yunji Chen and Xilin Chen and Yang Feng},\n journal={arXiv preprint arXiv:2306.10968},\n year={2023},\n url={https://arxiv.org/abs/2306.10968}\n}\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2258,"cells":{"id":{"kind":"string","value":"IIC/xlm-roberta-large-meddocan"},"author":{"kind":"string","value":"IIC"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","xlm-roberta","text-classification","biomedical","clinical","spanish","xlm-roberta-large","token-classification","es","dataset:bigbio/meddocan","license:mit","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"xlm-roberta\",\n \"text-classification\",\n \"biomedical\",\n \"clinical\",\n \"spanish\",\n \"xlm-roberta-large\",\n \"token-classification\",\n \"es\",\n \"dataset:bigbio/meddocan\",\n \"license:mit\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-06-21T15:46:58Z","string":"2023-06-21T15:46:58Z"},"last_modified":{"kind":"string","value":"2023-06-21T15:50:46+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- bigbio/meddocan\nlanguage: es\nlicense: mit\nmetrics:\n- f1\npipeline_tag: token-classification\ntags:\n- biomedical\n- clinical\n- spanish\n- xlm-roberta-large\nmodel-index:\n- name: IIC/xlm-roberta-large-meddocan\n results:\n - task:\n type: token-classification\n dataset:\n name: meddocan\n type: bigbio/meddocan\n split: test\n metrics:\n - type: f1\n value: 0.978\n name: f1\n---\n\n# xlm-roberta-large-meddocan\n\nThis model is a finetuned version of xlm-roberta-large for the meddocan dataset used in a benchmark in the paper TODO. The model has a F1 of 0.978\n\nPlease refer to the original publication for more information TODO LINK\n\n## Parameters used\n\n| parameter | Value |\n|-------------------------|:-----:|\n| batch size | 16 |\n| learning rate | 4e-05 |\n| classifier dropout | 0.2 |\n| warmup ratio | 0 |\n| warmup steps | 0 |\n| weight decay | 0 |\n| optimizer | AdamW |\n| epochs | 10 |\n| early stopping patience | 3 |\n\n\n## BibTeX entry and citation info\n\n```bibtex\nTODO\n```\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDDOCAN"],"string":"[\n \"MEDDOCAN\"\n]"}}},{"rowIdx":2259,"cells":{"id":{"kind":"string","value":"DunnBC22/bert-base-cased-finetuned-ner-BC2GM-IOB"},"author":{"kind":"string","value":"DunnBC22"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","bert","token-classification","generated_from_trainer","NER","en","dataset:blurb","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"bert\",\n \"token-classification\",\n \"generated_from_trainer\",\n \"NER\",\n \"en\",\n \"dataset:blurb\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-07-04T18:53:48Z","string":"2023-07-04T18:53:48Z"},"last_modified":{"kind":"string","value":"2023-08-02T02:25:57+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\ndatasets:\n- blurb\nlanguage:\n- en\nlicense: apache-2.0\nmetrics:\n- seqeval\npipeline_tag: token-classification\ntags:\n- generated_from_trainer\n- NER\nmodel-index:\n- name: bert-base-cased-finetuned-ner-BC2GM-IOB\n results: []\n---\n\n# bert-base-cased-finetuned-ner-BC2GM-IOB\n\nThis model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased).\nIt achieves the following results on the evaluation set:\n- Loss: 0.0813\n- Gene\n - Precision: 0.752111423914654\n - Recall: 0.8025296442687747\n - F1: 0.7765029830197338\n - Number: 6325\n- Overall\n - Precision: 0.7521\n - Recall: 0.8025\n - F1: 0.7765\n - Accuracy: 0.9736\n\n## Model description\n\nFor more information on how it was created, check out the following link: https://github.com/DunnBC22/NLP_Projects/blob/main/Token%20Classification/Monolingual/EMBO-BLURB/NER%20Project%20Using%20EMBO-BLURB%20Dataset.ipynb\n\n## Intended uses & limitations\n\nThis model is intended to demonstrate my ability to solve a complex problem using technology.\n\n## Training and evaluation data\n\nDataset Source: https://huggingface.co/datasets/EMBO/BLURB\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 16\n- eval_batch_size: 16\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 3\n\n### Training results\n\n | Training Loss | Epoch | Step | Validation Loss | Gene Precision | Gene Recall | Gene F1 | Gene Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy |\n|:-------------:|:-----:|:----:|:---------------:|:---------:|:---------:|:---------:|:---------:|:-----------------:|:--------------:|:------:|:------:|\n| 0.0882 | 1.0 | 786 | 0.0771 | 0.7383 | 0.7538 | 0.7460 | 6325 | 0.7383 | 0.7538 | 0.7460 | 0.9697 |\n| 0.0547 | 2.0 | 1572 | 0.0823 | 0.7617 | 0.7758 | 0.7687 | 6325 | 0.7617 | 0.7758 | 0.7687 | 0.9732 |\n| 0.0356 | 3.0 | 2358 | 0.0813 | 0.7521 | 0.8025 | 0.7765 | 6325 | 0.7521 | 0.8025 | 0.7765 | 0.9736 |\n\n*All values in the above chart are rounded to the nearest ten-thousandth.\n\n### Framework versions\n\n- Transformers 4.28.1\n- Pytorch 2.0.0\n- Datasets 2.11.0\n- Tokenizers 0.13.3"},"matched_bigbio_names":{"kind":"list like","value":["BLURB"],"string":"[\n \"BLURB\"\n]"}}},{"rowIdx":2260,"cells":{"id":{"kind":"string","value":"BigSalmon/InformalToFormalLincoln104Paraphrase"},"author":{"kind":"string","value":"BigSalmon"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","tensorboard","gpt2","text-generation","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"tensorboard\",\n \"gpt2\",\n \"text-generation\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-07-14T01:28:43Z","string":"2023-07-14T01:28:43Z"},"last_modified":{"kind":"string","value":"2023-07-14T18:36:51+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\ndata: https://github.com/BigSalmon2/InformalToFormalDataset\n\nText Generation Informal Formal\n\n```\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\ntokenizer = AutoTokenizer.from_pretrained(\"BigSalmon/InformalToFormalLincoln104Paraphrase\")\nmodel = AutoModelForCausalLM.from_pretrained(\"BigSalmon/InformalToFormalLincoln104Paraphrase\")\n```\n\n```\nDemo:\nhttps://huggingface.co/spaces/BigSalmon/FormalInformalConciseWordy\n```\n\n```\nprompt = \"\"\"informal english: corn fields are all across illinois, visible once you leave chicago.\\nTranslated into the Style of Abraham Lincoln:\"\"\"\ninput_ids = tokenizer.encode(prompt, return_tensors='pt')\noutputs = model.generate(input_ids=input_ids,\n max_length=10 + len(prompt),\n temperature=1.0,\n top_k=50,\n top_p=0.95,\n do_sample=True,\n num_return_sequences=5,\n early_stopping=True)\nfor i in range(5):\n print(tokenizer.decode(outputs[i]))\n```\nMost likely outputs (Disclaimer: I highly recommend using this over just generating):\n```\nprompt = \"\"\"informal english: corn fields are all across illinois, visible once you leave chicago.\\nTranslated into the Style of Abraham Lincoln:\"\"\"\ntext = tokenizer.encode(prompt)\nmyinput, past_key_values = torch.tensor([text]), None\nmyinput = myinput\nmyinput= myinput.to(device)\nlogits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False)\nlogits = logits[0,-1]\nprobabilities = torch.nn.functional.softmax(logits)\nbest_logits, best_indices = logits.topk(250)\nbest_words = [tokenizer.decode([idx.item()]) for idx in best_indices]\ntext.append(best_indices[0].item())\nbest_probabilities = probabilities[best_indices].tolist()\nwords = [] \nprint(best_words)\n```\n\n```\nHow To Make Prompt:\ninformal english: i am very ready to do that just that.\nTranslated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end.\nTranslated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task.\n***\ninformal english: space is huge and needs to be explored.\nTranslated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless.\nTranslated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration.\n***\ninformal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago.\ninformal english:\n```\n\n```\noriginal: microsoft word's [MASK] pricing invites competition.\nTranslated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition.\n***\noriginal: the library’s quiet atmosphere encourages visitors to [blank] in their work.\nTranslated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work.\n```\n\n```\nEssay Intro (Warriors vs. Rockets in Game 7):\ntext: eagerly anticipated by fans, game 7's are the highlight of the post-season.\ntext: ever-building in suspense, game 7's have the crowd captivated.\n***\nEssay Intro (South Korean TV Is Becoming Popular):\ntext: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ).\ntext: increasingly held in critical esteem, south korean television continues to impress.\ntext: at the forefront of quality content, south korea is quickly achieving celebrity status.\n***\nEssay Intro (\n```\n\n```\nSearch: What is the definition of Checks and Balances?\nhttps://en.wikipedia.org/wiki/Checks_and_balances\nChecks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate.\nhttps://www.harvard.edu/glossary/Checks_and_Balances\nChecks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power\nhttps://www.law.cornell.edu/library/constitution/Checks_and_Balances\nChecks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power.\n***\nSearch: What is the definition of Separation of Powers?\nhttps://en.wikipedia.org/wiki/Separation_of_powers\nThe separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power.\nhttps://www.yale.edu/tcf/Separation_of_Powers.html\nSeparation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined.\n***\nSearch: What is the definition of Connection of Powers?\nhttps://en.wikipedia.org/wiki/Connection_of_powers\nConnection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches.\nhttps://simple.wikipedia.org/wiki/Connection_of_powers\nThe term Connection of Powers describes a system of government in which there is overlap between different parts of the government.\n***\nSearch: What is the definition of\n```\n\n```\nSearch: What are phrase synonyms for \"second-guess\"?\nhttps://www.powerthesaurus.org/second-guess/synonyms\nShortest to Longest:\n- feel dubious about\n- raise an eyebrow at\n- wrinkle their noses at\n- cast a jaundiced eye at\n- teeter on the fence about\n***\nSearch: What are phrase synonyms for \"mean to newbies\"?\nhttps://www.powerthesaurus.org/mean_to_newbies/synonyms\nShortest to Longest:\n- readiness to balk at rookies\n- absence of tolerance for novices\n- hostile attitude toward newcomers\n***\nSearch: What are phrase synonyms for \"make use of\"?\nhttps://www.powerthesaurus.org/make_use_of/synonyms\nShortest to Longest:\n- call upon\n- glean value from\n- reap benefits from\n- derive utility from\n- seize on the merits of\n- draw on the strength of\n- tap into the potential of\n***\nSearch: What are phrase synonyms for \"hurting itself\"?\nhttps://www.powerthesaurus.org/hurting_itself/synonyms\nShortest to Longest:\n- erring\n- slighting itself\n- forfeiting its integrity\n- doing itself a disservice\n- evincing a lack of backbone\n***\nSearch: What are phrase synonyms for \"\n```\n```\n- nebraska\n- unicamerical legislature\n- different from federal house and senate\ntext: featuring a unicameral legislature, nebraska's political system stands in stark contrast to the federal model, comprised of a house and senate.\n***\n- penny has practically no value\n- should be taken out of circulation\n- just as other coins have been in us history\n- lost use\n- value not enough\n- to make environmental consequences worthy\ntext: all but valueless, the penny should be retired. as with other coins in american history, it has become defunct. too minute to warrant the environmental consequences of its production, it has outlived its usefulness.\n***\n-\n```\n```\noriginal: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. \ninfill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. \n***\noriginal:\n```\n\n```\nwordy: classical music is becoming less popular more and more.\nTranslate into Concise Text: interest in classic music is fading.\n***\nwordy:\n```\n\n```\nsweet: savvy voters ousted him.\nlonger: voters who were informed delivered his defeat.\n***\nsweet:\n```\n\n```\n1: commercial space company spacex plans to launch a whopping 52 flights in 2022.\n2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022.\n3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights.\n4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company.\n5: a commercial space company, spacex aims to conduct 52 flights in 2022.\n***\n1:\n```\n\nKeywords to sentences or sentence.\n\n```\nngos are characterized by:\n□ voluntary citizens' group that is organized on a local, national or international level\n□ encourage political participation\n□ often serve humanitarian functions\n□ work for social, economic, or environmental change\n***\nwhat are the drawbacks of living near an airbnb?\n□ noise\n□ parking\n□ traffic\n□ security\n□ strangers\n***\n```\n\n\n```\noriginal: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung.\nadapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung.\n***\noriginal: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark.\nadapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark.\n***\noriginal:\n```\n\n```\noriginal: had trouble deciding.\ntranslated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation.\n***\noriginal:\n```\n\n```\ninput: not loyal\n1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ).\n***\ninput:\n```\n\n```\nfirst: ( was complicit in / was involved in ).\nantonym: ( was blameless / was not an accomplice to / had no hand in / was uninvolved in ).\n***\nfirst: ( have no qualms about / see no issue with ).\nantonym: ( are deeply troubled by / harbor grave reservations about / have a visceral aversion to / take ( umbrage at / exception to ) / are wary of ).\n***\nfirst: ( do not see eye to eye / disagree often ).\nantonym: ( are in sync / are united / have excellent rapport / are like-minded / are in step / are of one mind / are in lockstep / operate in perfect harmony / march in lockstep ).\n***\nfirst:\n```\n\n```\nstiff with competition, law school {A} is the launching pad for countless careers, {B} is a crowded field, {C} ranks among the most sought-after professional degrees, {D} is a professional proving ground.\n***\nlanguishing in viewership, saturday night live {A} is due for a creative renaissance, {B} is no longer a ratings juggernaut, {C} has been eclipsed by its imitators, {C} can still find its mojo.\n***\ndubbed the \"manhattan of the south,\" atlanta {A} is a bustling metropolis, {B} is known for its vibrant downtown, {C} is a city of rich history, {D} is the pride of georgia.\n***\nembattled by scandal, harvard {A} is feeling the heat, {B} cannot escape the media glare, {C} is facing its most intense scrutiny yet, {D} is in the spotlight for all the wrong reasons.\n```\n\nInfill / Infilling / Masking / Phrase Masking (Works pretty decently actually, especially when you use logprobs code from above):\n\n```\nhis contention [blank] by the evidence [sep] was refuted [answer]\n***\nfew sights are as [blank] new york city as the colorful, flashing signage of its bodegas [sep] synonymous with [answer]\n***\nwhen rick won the lottery, all of his distant relatives [blank] his winnings [sep] clamored for [answer]\n***\nthe library’s quiet atmosphere encourages visitors to [blank] in their work [sep] immerse themselves [answer]\n***\nthe joy of sport is that no two games are alike. for every exhilarating experience, however, there is an interminable one. the national pastime, unfortunately, has a penchant for the latter. what begins as a summer evening at the ballpark can quickly devolve into a game of tedium. the primary culprit is the [blank] of play. from batters readjusting their gloves to fielders spitting on their mitts, the action is [blank] unnecessary interruptions. the sport's future is [blank] if these tendencies are not addressed [sep] plodding pace [answer] riddled with [answer] bleak [answer]\n***\nmicrosoft word's [blank] pricing [blank] competition [sep] unconscionable [answer] invites [answer]\n***\n```\n\n```\noriginal: microsoft word's [MASK] pricing invites competition.\nTranslated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition.\n***\noriginal: the library’s quiet atmosphere encourages visitors to [blank] in their work.\nTranslated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work.\n```\n\nBackwards\n```\nEssay Intro (National Parks):\ntext: tourists are at ease in the national parks, ( swept up in the beauty of their natural splendor ).\n***\nEssay Intro (D.C. Statehood):\nwashington, d.c. is a city of outsize significance, ( ground zero for the nation's political life / center stage for the nation's political machinations ).\n```\n\n```\ntopic: the Golden State Warriors.\ncharacterization 1: the reigning kings of the NBA.\ncharacterization 2: possessed of a remarkable cohesion.\ncharacterization 3: helmed by superstar Stephen Curry.\ncharacterization 4: perched atop the league’s hierarchy.\ncharacterization 5: boasting a litany of hall-of-famers.\n***\ntopic: emojis.\ncharacterization 1: shorthand for a digital generation.\ncharacterization 2: more versatile than words.\ncharacterization 3: the latest frontier in language.\ncharacterization 4: a form of self-expression.\ncharacterization 5: quintessentially millennial.\ncharacterization 6: reflective of a tech-centric world.\n***\ntopic:\n```\n\n\n```\nregular: illinois went against the census' population-loss prediction by getting more residents.\nVBG: defying the census' prediction of population loss, illinois experienced growth.\n***\nregular: microsoft word’s high pricing increases the likelihood of competition.\nVBG: extortionately priced, microsoft word is inviting competition.\n***\nregular:\n```\n\n```\nsource: badminton should be more popular in the US.\nQUERY: Based on the given topic, can you develop a story outline?\ntarget: (1) games played with racquets are popular, (2) just look at tennis and ping pong, (3) but badminton underappreciated, (4) fun, fast-paced, competitive, (5) needs to be marketed more\ntext: the sporting arena is dominated by games that are played with racquets. tennis and ping pong, in particular, are immensely popular. somewhat curiously, however, badminton is absent from this pantheon. exciting, fast-paced, and competitive, it is an underappreciated pastime. all that it lacks is more effective marketing.\n***\nsource: movies in theaters should be free.\nQUERY: Based on the given topic, can you develop a story outline?\ntarget: (1) movies provide vital life lessons, (2) many venues charge admission, (3) those without much money\ntext: the lessons that movies impart are far from trivial. the vast catalogue of cinematic classics is replete with inspiring sagas of friendship, bravery, and tenacity. it is regrettable, then, that admission to theaters is not free. in their current form, the doors of this most vital of institutions are closed to those who lack the means to pay.\n***\nsource:\n```\n\n```\nin the private sector, { transparency } is vital to the business’s credibility. the { disclosure of information } can be the difference between success and failure.\n***\nthe labor market is changing, with { remote work } now the norm. this { flexible employment } allows the individual to design their own schedule.\n***\nthe { cubicle } is the locus of countless grievances. many complain that the { enclosed workspace } restricts their freedom of movement.\n***\n```\n\n```\nit would be natural to assume that americans, as a people whose ancestors { immigrated to this country }, would be sympathetic to those seeking to do likewise.\nquestion: what does “do likewise” mean in the above context?\n(a) make the same journey\n(b) share in the promise of the american dream\n(c) start anew in the land of opportunity\n(d) make landfall on the united states\n***\nin the private sector, { transparency } is vital to the business’s credibility. this orientation can be the difference between success and failure.\nquestion: what does “this orientation” mean in the above context?\n(a) visible business practices \n(b) candor with the public\n(c) open, honest communication\n(d) culture of accountability\n```\n\n```\nexample: suppose you are a teacher. further suppose you want to tell an accurate telling of history. then suppose a parent takes offense. they do so in the name of name of their kid. this happens a lot.\ntext: educators' responsibility to remain true to the historical record often clashes with the parent's desire to shelter their child from uncomfortable realities.\n***\nexample: suppose you are a student at college. now suppose you have to buy textbooks. that is going to be worth hundreds of dollars. given how much you already spend on tuition, that is going to hard cost to bear.\ntext: the exorbitant cost of textbooks, which often reaches hundreds of dollars, imposes a sizable financial burden on the already-strapped college student.\n```\n\n```\nclarify: international ( {working together} / cooperation ) is called for when ( {issue go beyond lots of borders} / an issue transcends borders / a given matter has transnational implications ).\n```\n\n```\ndescription: when someone thinks that their view is the only right one.\nsynonyms: intolerant, opinionated, narrow-minded, insular, self-righteous.\n***\ndescription: when you put something off.\nsynonyms: shelve, defer, table, postpone.\n```\n\n```\norganic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea.\nrewrite phrases: meritocratic, viability, vision\nrewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability.\n```\n\n```\nessence: when someone's views are keeping within reasonable.\nrefine: the senator's voting record is ( moderate / centrist / pragmatic / balanced / fair-minded / even-handed ).\n***\nessence: when things are worked through in a petty way.\nrefine: the propensity of the u.s. congress to settle every dispute by way of ( mudslinging / bickering / demagoguery / name-calling / finger-pointing / vilification ) is appalling.\n```\n\n```\ndescription: when someone thinks that their view is the only right one.\nsynonyms: intolerant, opinionated, narrow-minded, insular, self-righteous.\n\n***\n\ndescription: when you put something off.\nsynonyms: shelve, defer, table, postpone.\n```\n\n```\norganic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea.\nrewrite phrases: meritocratic, viability, vision\nrewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability.\n```\n\n```\nmusic before bedtime [makes for being able to relax] -> is a recipe for relaxation.\n```\n\n```\n[people wanting entertainment love traveling new york city] -> travelers flock to new york city in droves, drawn to its iconic entertainment scene. [cannot blame them] -> one cannot fault them [broadway so fun] -> when it is home to such thrilling fare as Broadway.\n```\n\n```\nin their ( ‖ when you are rushing because you want to get there on time ‖ / haste to arrive punctually / mad dash to be timely ), morning commuters are too rushed to whip up their own meal.\n\n***\n\npoliticians prefer to author vague plans rather than ( ‖ when you can make a plan without many unknowns ‖ / actionable policies / concrete solutions ).\n```\n\n```\nQ: What is whistleblower protection?\nA: Whistleblower protection is a form of legal immunity granted to employees who expose the unethical practices of their employer.\nQ: Why are whistleblower protections important?\nA: Absent whistleblower protections, employees would be deterred from exposing their employer’s wrongdoing for fear of retribution.\nQ: Why would an employer engage in retribution?\nA: An employer who has acted unethically stands to suffer severe financial and reputational damage were their transgressions to become public. To safeguard themselves from these consequences, they might seek to dissuade employees from exposing their wrongdoing.\n```\n\n```\noriginal: the meritocratic nature of crowdfunding [MASK] into their vision's viability.\ninfill: the meritocratic nature of crowdfunding [gives investors idea of how successful] -> ( offers entrepreneurs a window ) into their vision's viability.\n```\n\n```\nLeadership | Lecture 17: Worker Morale\n\nWhat Workers Look for in Companies:\n• Benefits\no Tuition reimbursement\no Paid parental leave\no 401K matching\no Profit sharing\no Pension plans\no Free meals\n• Social responsibility\no Environmental stewardship\no Charitable contributions\no Diversity\n• Work-life balance\no Telecommuting\no Paid holidays and vacation\no Casual dress\n• Growth opportunities\n• Job security\n• Competitive compensation\n• Recognition\no Open-door policies\no Whistleblower protection\no Employee-of-the-month awards\no Positive performance reviews\no Bonuses\n```\n\n```\ndescription: business\nkeywords: for-profit, fiduciary duty, monopolistic, bottom line, return on investment, short-term thinking, capital-intensive, self-interested, risk-taking, fiduciary duty, merger, speculation, profiteering, oversight, capitalism, diversification\n ```\n \n ```\n 3. In this task, you are given a company name and you need to find its industry.\n\nMcDonalds -- Restaurant\nFacebook -- Social Network\nIKEA -- Furniture\nAmerican Express -- Credit Services\nNokia -- Telecom\nNintendo -- Entertainment\n\n4. In this task, you are given a Month and you need to convert it to its corresponding season\n\nApril -- Spring\nDecember -- Winter\nJuly -- Summer\nOctober -- Fall\nFebruary -- Winter\n\n5. In this task, you are given a sentence with a missing word and you need to predict the correct word.\n\nManagers should set an _____ for their employees. -- example\nSome people spend more than four _____ in the gym. -- hours\nThe police were on the _____ of arresting the suspect. -- verge\nThey were looking for _____ on how to solve the problem. -- guidance\nWhat is the _____ of the coffee? -- price\n\n6. In this task, you are given a paragraph and you need to reorder it to make it logical.\n\nIt was first proposed in 1987. The total length of the bridge is 1,828 meters. The idea of a bridge connects Hong Kong to Macau. -- The idea of bridge connecting Hong Kong and Macau was first proposed in 1987. The total length of the bridge is 1,828 meters.\nIt is a movie about a brave and noble policeman. The film was produced by Americans. They were Kevin Lima and Chris Buck. They are directors. The movie is called Tarzan. -- Produced by Americans Kevin Lima and Chris Buck, Tarzan is a movie about a brave and noble policeman.\nIt was first discovered in the mountains of India. The active ingredients in this plant can stimulate hair growth. The plant is called \"Hair Plus.\" -- First discovered in the mountains of India, Hair Plus is a plant whose active ingredients can stimulate hair growth.\n```\n\n```\ntrivia: What is the population of South Korea?\nresponse: 51 million.\n\n***\n\ntrivia: What is the minimum voting age in the US?\nresponse: 18.\n\n***\n\ntrivia: What are the first ten amendments of the US constitution called?\nresponse: Bill of Rights.\n```\n\n```\nideas: in modern-day america, it is customary for the commander-in-chief to conduct regular press conferences\nrelated keywords: transparency, check and balance, sacrosanct, public accountability, adversarial, unscripted, direct access, open government, watchdog, healthy democracy, institutional integrity, right to know, direct line of communication, behind closed doors, updates, track progress, instill confidence, reassure, humanize, leadership style, day-to-day, forthcoming, demystify, ask hard questions\n\n***\n\nideas: i know this one guy who retired so young, attesting to how careful they were with money.\nrelated keywords: money management, resourceful, penny-pinching, live below their means, frugal, financial discipline, financial independence, conservative, long-term vision, discretionary spending, deferred gratification, preparedness, self-control, cushion\n```\n\n```\nless specific: actors and musicians should ( support democracy ).\nclarifies: actors and musicians should ( wield their celebrity to amplify pro-democracy messaging / marshal their considerable influence in the service of the democratic cause ).\n\n***\n\nless specific: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( be careful ).\nclarifies: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( master their desires / exercise self-restraint / embrace frugality / restrain their appetite for splendor ).\n```\n\n```\ndull: clean\nemotional heft: spotless, immaculate, pristine\n\n***\n\ndull: hot\nemotional heft: scorching, searing, blistering\n\n***\n\ndull: happy\nemotional heft: euphoric\n\n```\n\n```\ntext: {guide: vividly describe the premise of the show \"seinfield\"} -> set in the heart of bustling new york city, the sitcom \"seinfield\" follows the everyday {restrict: term that implies they get into trouble but in a comical way} -> misadventures of four neurotic friends. on any given episode, one can find them quarreling over their favorite diner's latest menu change, haggling over the division of household expenses, or contriving a scheme to spy on the apartment's newest resident. mundane as their exploits may be, they never fail to elicit a hearty laugh. {guide: mention how larry david is responsible} -> behind the show's witty, incisive dialogue lies the sharp pen of larry david, who co-created the show with jerry seinfeld. {guide: mention how larry david came up with the most recognizable lines} -> it is his genius that conjured such instantly {restrict: term that imply everybody knows them} -> recognizable quips as \"no soup for you!\" and \"not that there's anything wrong with that!\". {guide: mention how humanity should revel in having such good comedians these days} -> as this list of laugh-out-loud lines attests, the world is fortunate to ( count such a sharp comedic mind among its ranks / have such a talented humorist in its midst / have such comedic talent gracing its airwaves ).\n```\n\n```\nsystem: i'm not sure i agree with your assessment of the situation.\nuser: replace \"assessment\" with a more casual term.\nchatbot: ( take / read / view ).\n\nsystem: i've been feeling really busy lately, trying to balance work and family responsibilities.\nuser: replace \"busy\" with a more intense word that conveys it is too much.\nchatbot: overwhelmed.\n\nsystem: the candidate's stance on immigration is quite nebulous, and it's difficult to discern exactly what they would do if elected.\nuser: replace \"nebulous\" with a word that conveys a lack of detail.\nchatbot: ( vague / ambiguous ).\n```\n\n```\nuser: exorbitant in price, scholarly journals unconscionably define the breadth of one's knowledge awareness by the contents of their wallet. [replace “knowledge awareness” with a more natural expression]\nchatbot: intellectual horizons.\nuser: can you do another alternative to “intellectual horizons” that has more relation to “scholarly journals”?\nchatbot: academic enlightenment.\n```\n\n```\nkey: calculate.\nsyn: estimate, consider, weigh, number, count, apportion, proportion, investigate, reckon, rate, compute.\nant: guess, conjecture, hit, chance, risk, stake, miscalculate.\n```\n\n```\ndescription: more forceful version of curious that is less forceful than nosy\nanswer: inquisitive\n\ndescription: more forceful version of hopeful that is less forceful than overconfident\nanswer: optimistic\n```\n\n```\nkey: inquisitive\npositive: curious, interested\nnegative: nosy, prying\n\n***\n\nkey: witty\npositive: clever, humorous\nnegative: sarcastic, caustic\n\n***\n\nkey: influential\npositive: impactful, powerful\nnegative: overbearing, domineering\n```\n\n```\ndefective: the blogger's { use of language imprecise } confused an already complicated issue.\nprecise: the blogger's ( vague wording ) confused an already complicated issue.\n\n\ndefective: the senator's speech was high on { words sounding dignified } but low on concrete proposals.\nprecise: the senator's speech was high on ( lofty rhetoric ) but low on concrete proposals.\n```\n\n```\nexample: the new car uses gas.\nboring: uses\nstronger: guzzles\n\nexample: he hates people that are rude.\nboring: hates\nstronger: loathes, abhors, despises, scorns, detests\n```\n\n```\ninitial: The music at the party was [ loud; replace with a word that suggests a more uncomfortable noise level ] and overwhelming.\nmodified: The music at the party was { ear-splitting } and overwhelming.\n\ninitial: their house is [ small; replace with a positive spin ].\nmodified: their house is { cozy }.\n```\n\n```\ndefective: they spent the weekend enjoying { time do what you want }.\nprecise: they spent the weekend enjoying ( leisure activities).\n\ndefective: the author rightly notes the inequities perpetuated by { employment based on who you know }.\nprecise: the author rightly notes the inequities perpetuated by ( nepotism ).\n\ndefective: the senator's speech was high on { words sounding dignified } but low on concrete proposals.\nprecise: the senator's speech was high on ( lofty rhetoric ) but low on concrete proposals.\n```\n\n```\npersona: human resources manager\nbuzzwords: pipeline, talent, retention, compensation, flexible, recruitment, personnel, resume, competitive, quality, onboard\n```"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2261,"cells":{"id":{"kind":"string","value":"oleksandrfluxon/mpt-7b-instruct-evaluate"},"author":{"kind":"string","value":"oleksandrfluxon"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","mpt","text-generation","Composer","MosaicML","llm-foundry","custom_code","dataset:mosaicml/dolly_hhrlhf","arxiv:2205.14135","arxiv:2108.12409","arxiv:2010.04245","license:cc-by-sa-3.0","autotrain_compatible","text-generation-inference","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"mpt\",\n \"text-generation\",\n \"Composer\",\n \"MosaicML\",\n \"llm-foundry\",\n \"custom_code\",\n \"dataset:mosaicml/dolly_hhrlhf\",\n \"arxiv:2205.14135\",\n \"arxiv:2108.12409\",\n \"arxiv:2010.04245\",\n \"license:cc-by-sa-3.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-07-21T13:37:15Z","string":"2023-07-21T13:37:15Z"},"last_modified":{"kind":"string","value":"2023-07-25T09:07:14+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- mosaicml/dolly_hhrlhf\nlicense: cc-by-sa-3.0\ntags:\n- Composer\n- MosaicML\n- llm-foundry\ninference: false\nduplicated_from: mosaicml/mpt-7b-instruct\n---\n\n# MPT-7B-Instruct\n\nMPT-7B-Instruct is a model for short-form instruction following.\nIt is built by finetuning [MPT-7B](https://huggingface.co/mosaicml/mpt-7b) on a [dataset](https://huggingface.co/datasets/sam-mosaic/dolly_hhrlhf) derived from the [Databricks Dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) and the [Anthropic Helpful and Harmless (HH-RLHF)](https://huggingface.co/datasets/Anthropic/hh-rlhf) datasets.\n * License: _CC-By-SA-3.0_\n * [Demo on Hugging Face Spaces](https://huggingface.co/spaces/mosaicml/mpt-7b-instruct)\n\n\nThis model was trained by [MosaicML](https://www.mosaicml.com) and follows a modified decoder-only transformer architecture.\n\n## Model Date\n\nMay 5, 2023\n\n## Model License\n\nCC-By-SA-3.0\n\n## Documentation\n\n* [Blog post: Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs](https://www.mosaicml.com/blog/mpt-7b)\n* [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/)\n* Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)!\n\n### Example Question/Instruction\n\n**Longboi24**:\n> What is a quoll?\n\n**MPT-7B-Instruct**:\n\n>A Quoll (pronounced “cool”) is one of Australia’s native carnivorous marsupial mammals, which are also known as macropods or wallabies in other parts around Asia and South America\n\n## How to Use\n\nNote: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom model architecture that is not yet part of the `transformers` package.\n\nIt includes options for many training efficiency features such as [FlashAttention (Dao et al. 2022)](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), QK LayerNorm, and more.\n\n```python\nimport transformers\nmodel = transformers.AutoModelForCausalLM.from_pretrained(\n 'mosaicml/mpt-7b-instruct',\n trust_remote_code=True\n)\n```\nNote: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method.\nThis is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package.\n`MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more.\n\nTo use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision:\n```python\nimport torch\nimport transformers\n\nname = 'mosaicml/mpt-7b-instruct'\n\nconfig = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True)\nconfig.attn_config['attn_impl'] = 'triton'\nconfig.init_device = 'cuda:0' # For fast initialization directly on GPU!\n\nmodel = transformers.AutoModelForCausalLM.from_pretrained(\n name,\n config=config,\n torch_dtype=torch.bfloat16, # Load model weights in bfloat16\n trust_remote_code=True\n)\n```\n\nAlthough the model was trained with a sequence length of 2048, ALiBi enables users to increase the maximum sequence length during finetuning and/or inference. For example:\n\n```python\nimport transformers\n\nname = 'mosaicml/mpt-7b-instruct'\n\nconfig = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True)\nconfig.max_seq_len = 4096 # (input + output) tokens can now be up to 4096\n\nmodel = transformers.AutoModelForCausalLM.from_pretrained(\n name,\n config=config,\n trust_remote_code=True\n)\n```\n\nThis model was trained with the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer.\n\n```python\nfrom transformers import AutoTokenizer\ntokenizer = AutoTokenizer.from_pretrained(\"EleutherAI/gpt-neox-20b\")\n```\n\nThe model can then be used, for example, within a text-generation pipeline. \nNote: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html).\n\n```python\nfrom transformers import pipeline\n\npipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0')\n\nwith torch.autocast('cuda', dtype=torch.bfloat16):\n print(\n pipe('Here is a recipe for vegan banana bread:\\n',\n max_new_tokens=100,\n do_sample=True,\n use_cache=True))\n```\n\n### Formatting\n\nThis model was trained on data formatted in the dolly-15k format:\n\n```python\nINSTRUCTION_KEY = \"### Instruction:\"\nRESPONSE_KEY = \"### Response:\"\nINTRO_BLURB = \"Below is an instruction that describes a task. Write a response that appropriately completes the request.\"\nPROMPT_FOR_GENERATION_FORMAT = \"\"\"{intro}\n{instruction_key}\n{instruction}\n{response_key}\n\"\"\".format(\n intro=INTRO_BLURB,\n instruction_key=INSTRUCTION_KEY,\n instruction=\"{instruction}\",\n response_key=RESPONSE_KEY,\n)\n\nexample = \"James decides to run 3 sprints 3 times a week. He runs 60 meters each sprint. How many total meters does he run a week? Explain before answering.\"\nfmt_ex = PROMPT_FOR_GENERATION_FORMAT.format(instruction=example)\n```\n\nIn the above example, `fmt_ex` is ready to be tokenized and sent through the model.\n\n## Model Description\n\nThe architecture is a modification of a standard decoder-only transformer.\n\nThe model has been modified from a standard transformer in the following ways:\n* It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf)\n* It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings\n* It does not use biases\n\n\n| Hyperparameter | Value |\n|----------------|-------|\n|n_parameters | 6.7B |\n|n_layers | 32 |\n| n_heads | 32 |\n| d_model | 4096 |\n| vocab size | 50432 |\n| sequence length | 2048 |\n\n## PreTraining Data\n\nFor more details on the pretraining process, see [MPT-7B](https://huggingface.co/mosaicml/mpt-7b).\n\nThe data was tokenized using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer.\n\n### Training Configuration\n\nThis model was trained on 8 A100-40GBs for about 2.3 hours using the [MosaicML Platform](https://www.mosaicml.com/platform).\nThe model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the AdamW optimizer.\n\n## Limitations and Biases\n\n_The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_\n\nMPT-7B-Instruct can produce factually incorrect output, and should not be relied on to produce factually accurate information.\nMPT-7B-Instruct was trained on various public datasets.\nWhile great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs.\n\n\n## Acknowledgements\n\nThis model was finetuned by Sam Havens and the MosaicML NLP team\n\n## MosaicML Platform\n\nIf you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b).\n\n## Disclaimer\n\nThe license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please cosult an attorney before using this model for commercial purposes.\n\n## Citation\n\nPlease cite this model using the following format:\n\n```\n@online{MosaicML2023Introducing,\n author = {MosaicML NLP Team},\n title = {Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs},\n year = {2023},\n url = {www.mosaicml.com/blog/mpt-7b},\n note = {Accessed: 2023-03-28}, % change this date\n urldate = {2023-03-28} % change this date\n}\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BLURB"],"string":"[\n \"BLURB\"\n]"}}},{"rowIdx":2262,"cells":{"id":{"kind":"string","value":"camila-ud/DrBERT-CASM2"},"author":{"kind":"string","value":"camila-ud"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","safetensors","bert","token-classification","medical","biomedical","medkit-lib","fr","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"safetensors\",\n \"bert\",\n \"token-classification\",\n \"medical\",\n \"biomedical\",\n \"medkit-lib\",\n \"fr\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-07-24T11:44:54Z","string":"2023-07-24T11:44:54Z"},"last_modified":{"kind":"string","value":"2023-08-07T15:39:32+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- fr\nlibrary_name: transformers\nlicense: mit\nmetrics:\n- seqeval\npipeline_tag: token-classification\ntags:\n- medical\n- biomedical\n- medkit-lib\nwidget:\n- text: La radiographie et la tomodensitométrie ont montré des micronodules diffus\n example_title: example 1\n- text: Elle souffre d'asthme mais n'a pas besoin d'Allegra\n example_title: example 2\n---\n\n\n# DrBERT-CASM2\n\n## Model description\n\n**DrBERT-CASM2** is a French Named Entity Recognition model that was fine-tuned from \n[DrBERT](https://huggingface.co/Dr-BERT/DrBERT-4GB-CP-PubMedBERT): A PreTrained model in French for biomedical and clinical domains. \nIt has been trained to detect the following type of entities: **problem**, **treatment** and **test** using the medkit Trainer.\n\n- **Fine-tuned using** medkit [GitHub Repo](https://github.com/TeamHeka/medkit)\n- **Developed by** @camila-ud, medkit, HeKA Research team\n- **Dataset source**\n\n Annotated version from @aneuraz called 'corpusCasM2: A corpus of annotated clinical texts'\n - The annotation was performed collaborativelly by the students of masters students from Université Paris Cité.\n\n - The corpus contains documents from CAS:\n ```\n Natalia Grabar, Vincent Claveau, and Clément Dalloux. 2018. CAS: French Corpus with Clinical Cases.\n In Proceedings of the Ninth International Workshop on Health Text Mining and Information Analysis,\n pages 122–128, Brussels, Belgium. Association for Computational Linguistics.\n ```\n# Intended uses & limitations\n\n## Limitations and bias\n\nThis model was trained for **development and test phases**. \nThis model is limited by its training dataset, and it should be used with caution. \nThe results are not guaranteed, and the model should be used only in data exploration stages.\nThe model may be able to detect entities in the early stages of the analysis of medical documents in French.\n\nThe maximum token size was reduced to **128 tokens** to minimize training time.\n\n# How to use\n\n## Install medkit\n\nFirst of all, please install medkit with the following command:\n\n```\npip install 'medkit-lib[optional]'\n```\n\nPlease check the [documentation](https://medkit.readthedocs.io/en/latest/user_guide/install.html) for more info and examples.\n\n## Using the model\n\n```python\nfrom medkit.core.text import TextDocument\nfrom medkit.text.ner.hf_entity_matcher import HFEntityMatcher\n\nmatcher = HFEntityMatcher(model=\"camila-ud/DrBERT-CASM2\")\n\ntest_doc = TextDocument(\"Elle souffre d'asthme mais n'a pas besoin d'Allegra\")\ndetected_entities = matcher.run([test_doc.raw_segment])\n\n# show information\nmsg = \"|\".join(f\"'{entity.label}':{entity.text}\" for entity in detected_entities)\nprint(f\"Text: '{test_doc.text}'\\n{msg}\")\n```\n```\nText: \"Elle souffre d'asthme mais n'a pas besoin d'Allegra\"\n'problem':asthme|'treatment':Allegra\n```\n\n# Training data \n\nThis model was fine-tuned on **CASM2**, an internal corpus with clinical cases (in french) annotated by master students. \nThe corpus contains more than 5000 medkit documents (~ phrases) with entities to detect. \n\n**Number of documents (~ phrases) by split**\n\n| Split | # medkit docs |\n| ---------- | ------------- |\n| Train | 5824 |\n| Validation | 1457 |\n| Test | 1821 |\n\n\n**Number of examples per entity type**\n\n| Split | treatment | test | problem |\n| ---------- | --------- | ---- | ------- |\n| Train | 3258 | 3990 | 6808 |\n| Validation | 842 | 1007 | 1745 |\n| Test | 994 | 1289 | 2113 |\n\n## Training procedure\n\nThis model was fine-tuned using the medkit trainer on CPU, it takes about 3h.\n\n# Model perfomances\n\nModel performances computes on CASM2 test dataset (using medkit seqeval evaluator)\n\nEntity|precision|recall|f1\n-|-|-|-\ntreatment|0.7492|0.7666|0.7578\ntest|0.7449|0.8240|0.7824\nproblem|0.6884|0.7304|0.7088\nOverall|0.7188|0.7660|0.7416\n\n## How to evaluate using medkit\n```python\nfrom medkit.text.metrics.ner import SeqEvalEvaluator\n\n# load the matcher and get predicted entities by document\nmatcher = HFEntityMatcher(model=\"camila-ud/DrBERT-CASM2\")\npredicted_entities = [matcher.run([doc.raw_segment]) for doc in test_documents]\n\nevaluator = SeqEvalEvaluator(tagging_scheme=\"iob2\")\nevaluator.compute(test_documents,predicted_entities=predicted_entities)\n```\nYou can use the tokenizer from HF to evaluate by tokens instead of characters \n```python\nfrom transformers import AutoTokenizer\n\ntokenizer_drbert = AutoTokenizer.from_pretrained(\"camila-ud/DrBERT-CASM2\", use_fast=True)\n\nevaluator = SeqEvalEvaluator(tokenizer=tokenizer_drbert,tagging_scheme=\"iob2\")\nevaluator.compute(test_documents,predicted_entities=predicted_entities)\n```\n\n# Citation\n\n```\n@online{medkit-lib,\n author={HeKA Research Team},\n title={medkit, A Python library for a learning health system},\n url={https://pypi.org/project/medkit-lib/},\n urldate = {2023-07-24}, \n}\n```\n```\nHeKA Research Team, “medkit, a Python library for a learning health system.” https://pypi.org/project/medkit-lib/ (accessed Jul. 24, 2023).\n```"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2263,"cells":{"id":{"kind":"string","value":"facebook/mms-tts-cas"},"author":{"kind":"string","value":"facebook"},"task_category":{"kind":"string","value":"text-to-speech"},"tags":{"kind":"list like","value":["transformers","pytorch","safetensors","vits","text-to-audio","mms","text-to-speech","arxiv:2305.13516","license:cc-by-nc-4.0","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"safetensors\",\n \"vits\",\n \"text-to-audio\",\n \"mms\",\n \"text-to-speech\",\n \"arxiv:2305.13516\",\n \"license:cc-by-nc-4.0\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-09-01T16:38:40Z","string":"2023-09-01T16:38:40Z"},"last_modified":{"kind":"string","value":"2023-09-01T16:40:48+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: cc-by-nc-4.0\npipeline_tag: text-to-speech\ntags:\n- mms\n- vits\n---\n\n# Massively Multilingual Speech (MMS): Tsimané Text-to-Speech\n\nThis repository contains the **Tsimané (cas)** language text-to-speech (TTS) model checkpoint.\n\nThis model is part of Facebook's [Massively Multilingual Speech](https://arxiv.org/abs/2305.13516) project, aiming to\nprovide speech technology across a diverse range of languages. You can find more details about the supported languages\nand their ISO 639-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html),\nand see all MMS-TTS checkpoints on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts).\n\nMMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards.\n\n## Model Details\n\nVITS (**V**ariational **I**nference with adversarial learning for end-to-end **T**ext-to-**S**peech) is an end-to-end \nspeech synthesis model that predicts a speech waveform conditional on an input text sequence. It is a conditional variational \nautoencoder (VAE) comprised of a posterior encoder, decoder, and conditional prior.\n\nA set of spectrogram-based acoustic features are predicted by the flow-based module, which is formed of a Transformer-based\ntext encoder and multiple coupling layers. The spectrogram is decoded using a stack of transposed convolutional layers,\nmuch in the same style as the HiFi-GAN vocoder. Motivated by the one-to-many nature of the TTS problem, where the same text \ninput can be spoken in multiple ways, the model also includes a stochastic duration predictor, which allows the model to \nsynthesise speech with different rhythms from the same input text. \n\nThe model is trained end-to-end with a combination of losses derived from variational lower bound and adversarial training. \nTo improve the expressiveness of the model, normalizing flows are applied to the conditional prior distribution. During \ninference, the text encodings are up-sampled based on the duration prediction module, and then mapped into the \nwaveform using a cascade of the flow module and HiFi-GAN decoder. Due to the stochastic nature of the duration predictor,\nthe model is non-deterministic, and thus requires a fixed seed to generate the same speech waveform.\n\nFor the MMS project, a separate VITS checkpoint is trained on each langauge.\n\n## Usage\n\nMMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. To use this checkpoint, \nfirst install the latest version of the library:\n\n```\npip install --upgrade transformers accelerate\n```\n\nThen, run inference with the following code-snippet:\n\n```python\nfrom transformers import VitsModel, AutoTokenizer\nimport torch\n\nmodel = VitsModel.from_pretrained(\"facebook/mms-tts-cas\")\ntokenizer = AutoTokenizer.from_pretrained(\"facebook/mms-tts-cas\")\n\ntext = \"some example text in the Tsimané language\"\ninputs = tokenizer(text, return_tensors=\"pt\")\n\nwith torch.no_grad():\n output = model(**inputs).waveform\n```\n\nThe resulting waveform can be saved as a `.wav` file:\n\n```python\nimport scipy\n\nscipy.io.wavfile.write(\"techno.wav\", rate=model.config.sampling_rate, data=output)\n```\n\nOr displayed in a Jupyter Notebook / Google Colab:\n\n```python\nfrom IPython.display import Audio\n\nAudio(output, rate=model.config.sampling_rate)\n```\n\n\n\n## BibTex citation\n\nThis model was developed by Vineel Pratap et al. from Meta AI. If you use the model, consider citing the MMS paper:\n\n```\n@article{pratap2023mms,\n title={Scaling Speech Technology to 1,000+ Languages},\n author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli},\n journal={arXiv},\n year={2023}\n}\n```\n\n## License\n\nThe model is licensed as **CC-BY-NC 4.0**.\n"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2264,"cells":{"id":{"kind":"string","value":"Zamoranesis/clinical_bert"},"author":{"kind":"string","value":"Zamoranesis"},"task_category":{"kind":"string","value":"fill-mask"},"tags":{"kind":"list like","value":["transformers","pytorch","bert","fill-mask","clinical notes","healthcare","medical","pharma","base_model:emilyalsentzer/Bio_ClinicalBERT","base_model:finetune:emilyalsentzer/Bio_ClinicalBERT","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"bert\",\n \"fill-mask\",\n \"clinical notes\",\n \"healthcare\",\n \"medical\",\n \"pharma\",\n \"base_model:emilyalsentzer/Bio_ClinicalBERT\",\n \"base_model:finetune:emilyalsentzer/Bio_ClinicalBERT\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-09-21T09:52:53Z","string":"2023-09-21T09:52:53Z"},"last_modified":{"kind":"string","value":"2024-01-31T16:54:38+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\nbase_model: emilyalsentzer/Bio_ClinicalBERT\nlicense: mit\ntags:\n- clinical notes\n- healthcare\n- medical\n- pharma\nwidget:\n- text: A 25 year old woman with no history of interest, who is studied for presenting\n a history of [MASK] pain of predominance in right hypochondrium\nmodel-index:\n- name: clinical_bert\n results: []\n---\n\n\n\n# clinical_bert\n\nThis model is a fine-tuned version of [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT) on [PlanTL-GOB-ES/pharmaconer](https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer).\nIt achieves the following results on the evaluation and test set:\n- Validation Loss: 1.6020\n- Test Loss: 1.6591\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0005\n- train_batch_size: 64\n- eval_batch_size: 64\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- lr_scheduler_warmup_ratio: 0.1\n- lr_scheduler_warmup_steps: 100\n- training_steps: 5000\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss |\n|:-------------:|:-----:|:----:|:---------------:|\n| No log | 0.78 | 100 | 1.9485 |\n| No log | 1.56 | 200 | 1.8681 |\n| No log | 2.34 | 300 | 1.8152 |\n| No log | 3.12 | 400 | 1.7886 |\n| 1.9285 | 3.91 | 500 | 1.7309 |\n| 1.9285 | 4.69 | 600 | 1.6810 |\n| 1.9285 | 5.47 | 700 | 1.7065 |\n| 1.9285 | 6.25 | 800 | 1.7067 |\n| 1.9285 | 7.03 | 900 | 1.7312 |\n| 1.6644 | 7.81 | 1000 | 1.7006 |\n| 1.6644 | 8.59 | 1100 | 1.6736 |\n| 1.6644 | 9.38 | 1200 | 1.6846 |\n| 1.6644 | 10.16 | 1300 | 1.6621 |\n| 1.6644 | 10.94 | 1400 | 1.6381 |\n| 1.5247 | 11.72 | 1500 | 1.6281 |\n| 1.5247 | 12.5 | 1600 | 1.6605 |\n| 1.5247 | 13.28 | 1700 | 1.6770 |\n| 1.5247 | 14.06 | 1800 | 1.6666 |\n| 1.5247 | 14.84 | 1900 | 1.6620 |\n| 1.4334 | 15.62 | 2000 | 1.6677 |\n| 1.4334 | 16.41 | 2100 | 1.6311 |\n| 1.4334 | 17.19 | 2200 | 1.6743 |\n| 1.4334 | 17.97 | 2300 | 1.6586 |\n| 1.4334 | 18.75 | 2400 | 1.6086 |\n| 1.3423 | 19.53 | 2500 | 1.6229 |\n| 1.3423 | 20.31 | 2600 | 1.6475 |\n| 1.3423 | 21.09 | 2700 | 1.6388 |\n| 1.3423 | 21.88 | 2800 | 1.6275 |\n| 1.3423 | 22.66 | 2900 | 1.6372 |\n| 1.2712 | 23.44 | 3000 | 1.6345 |\n| 1.2712 | 24.22 | 3100 | 1.6442 |\n| 1.2712 | 25.0 | 3200 | 1.6864 |\n| 1.2712 | 25.78 | 3300 | 1.6139 |\n| 1.2712 | 26.56 | 3400 | 1.6161 |\n| 1.215 | 27.34 | 3500 | 1.6491 |\n| 1.215 | 28.12 | 3600 | 1.6442 |\n| 1.215 | 28.91 | 3700 | 1.6409 |\n| 1.215 | 29.69 | 3800 | 1.6539 |\n| 1.215 | 30.47 | 3900 | 1.6052 |\n| 1.1652 | 31.25 | 4000 | 1.6459 |\n| 1.1652 | 32.03 | 4100 | 1.6362 |\n| 1.1652 | 32.81 | 4200 | 1.6413 |\n| 1.1652 | 33.59 | 4300 | 1.6377 |\n| 1.1652 | 34.38 | 4400 | 1.6344 |\n| 1.1213 | 35.16 | 4500 | 1.6406 |\n| 1.1213 | 35.94 | 4600 | 1.6113 |\n| 1.1213 | 36.72 | 4700 | 1.6410 |\n| 1.1213 | 37.5 | 4800 | 1.6378 |\n| 1.1213 | 38.28 | 4900 | 1.6341 |\n| 1.0939 | 39.06 | 5000 | 1.6020 |\n\n\n### Framework versions\n\n- Transformers 4.33.2\n- Pytorch 2.0.1+cu118\n- Datasets 2.14.5\n- Tokenizers 0.13.3\n"},"matched_bigbio_names":{"kind":"list like","value":["PHARMACONER"],"string":"[\n \"PHARMACONER\"\n]"}}},{"rowIdx":2265,"cells":{"id":{"kind":"string","value":"neuralmagic/bge-small-en-v1.5-sparse"},"author":{"kind":"string","value":"neuralmagic"},"task_category":{"kind":"string","value":"feature-extraction"},"tags":{"kind":"list like","value":["transformers","onnx","bert","feature-extraction","mteb","sparse sparsity quantized onnx embeddings int8","en","license:mit","model-index","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"onnx\",\n \"bert\",\n \"feature-extraction\",\n \"mteb\",\n \"sparse sparsity quantized onnx embeddings int8\",\n \"en\",\n \"license:mit\",\n \"model-index\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-09-21T13:21:02Z","string":"2023-09-21T13:21:02Z"},"last_modified":{"kind":"string","value":"2023-11-13T18:23:24+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":4,"string":"4"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlicense: mit\ntags:\n- mteb\n- sparse sparsity quantized onnx embeddings int8\nmodel-index:\n- name: bge-small-en-v1.5-sparse\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 70.71641791044776\n - type: ap\n value: 32.850850647310004\n - type: f1\n value: 64.48101916414805\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 83.33962500000001\n - type: ap\n value: 78.28706349240106\n - type: f1\n value: 83.27426715603062\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 40.988\n - type: f1\n value: 40.776679545648506\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 26.101999999999997\n - type: map_at_10\n value: 40.754000000000005\n - type: map_at_100\n value: 41.83\n - type: map_at_1000\n value: 41.845\n - type: map_at_3\n value: 36.178\n - type: map_at_5\n value: 38.646\n - type: mrr_at_1\n value: 26.6\n - type: mrr_at_10\n value: 40.934\n - type: mrr_at_100\n value: 42.015\n - type: mrr_at_1000\n value: 42.03\n - type: mrr_at_3\n value: 36.344\n - type: mrr_at_5\n value: 38.848\n - type: ndcg_at_1\n value: 26.101999999999997\n - type: ndcg_at_10\n value: 49.126999999999995\n - type: ndcg_at_100\n value: 53.815999999999995\n - type: ndcg_at_1000\n value: 54.178000000000004\n - type: ndcg_at_3\n value: 39.607\n - type: ndcg_at_5\n value: 44.086999999999996\n - type: precision_at_1\n value: 26.101999999999997\n - type: precision_at_10\n value: 7.596\n - type: precision_at_100\n value: 0.967\n - type: precision_at_1000\n value: 0.099\n - type: precision_at_3\n value: 16.524\n - type: precision_at_5\n value: 12.105\n - type: recall_at_1\n value: 26.101999999999997\n - type: recall_at_10\n value: 75.96000000000001\n - type: recall_at_100\n value: 96.65700000000001\n - type: recall_at_1000\n value: 99.431\n - type: recall_at_3\n value: 49.573\n - type: recall_at_5\n value: 60.526\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 43.10651535441929\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 34.41095293826606\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 56.96575970919239\n - type: mrr\n value: 69.92503187794047\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 79.64892774481326\n - type: cos_sim_spearman\n value: 78.953003817029\n - type: euclidean_pearson\n value: 78.92456838230683\n - type: euclidean_spearman\n value: 78.56504316985354\n - type: manhattan_pearson\n value: 79.21436359014227\n - type: manhattan_spearman\n value: 78.66263575501259\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 81.25\n - type: f1\n value: 81.20841448916138\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 34.69545244587236\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 28.84301739171936\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 23.401\n - type: map_at_10\n value: 32.451\n - type: map_at_100\n value: 33.891\n - type: map_at_1000\n value: 34.01\n - type: map_at_3\n value: 29.365999999999996\n - type: map_at_5\n value: 31.240000000000002\n - type: mrr_at_1\n value: 29.9\n - type: mrr_at_10\n value: 38.590999999999994\n - type: mrr_at_100\n value: 39.587\n - type: mrr_at_1000\n value: 39.637\n - type: mrr_at_3\n value: 36.028\n - type: mrr_at_5\n value: 37.673\n - type: ndcg_at_1\n value: 29.9\n - type: ndcg_at_10\n value: 38.251000000000005\n - type: ndcg_at_100\n value: 44.354\n - type: ndcg_at_1000\n value: 46.642\n - type: ndcg_at_3\n value: 33.581\n - type: ndcg_at_5\n value: 35.96\n - type: precision_at_1\n value: 29.9\n - type: precision_at_10\n value: 7.439\n - type: precision_at_100\n value: 1.28\n - type: precision_at_1000\n value: 0.17700000000000002\n - type: precision_at_3\n value: 16.404\n - type: precision_at_5\n value: 12.046\n - type: recall_at_1\n value: 23.401\n - type: recall_at_10\n value: 49.305\n - type: recall_at_100\n value: 75.885\n - type: recall_at_1000\n value: 90.885\n - type: recall_at_3\n value: 35.341\n - type: recall_at_5\n value: 42.275\n - type: map_at_1\n value: 22.103\n - type: map_at_10\n value: 29.271\n - type: map_at_100\n value: 30.151\n - type: map_at_1000\n value: 30.276999999999997\n - type: map_at_3\n value: 27.289\n - type: map_at_5\n value: 28.236\n - type: mrr_at_1\n value: 26.943\n - type: mrr_at_10\n value: 33.782000000000004\n - type: mrr_at_100\n value: 34.459\n - type: mrr_at_1000\n value: 34.525\n - type: mrr_at_3\n value: 31.985000000000003\n - type: mrr_at_5\n value: 32.909\n - type: ndcg_at_1\n value: 26.943\n - type: ndcg_at_10\n value: 33.616\n - type: ndcg_at_100\n value: 37.669000000000004\n - type: ndcg_at_1000\n value: 40.247\n - type: ndcg_at_3\n value: 30.482\n - type: ndcg_at_5\n value: 31.615\n - type: precision_at_1\n value: 26.943\n - type: precision_at_10\n value: 6.146\n - type: precision_at_100\n value: 1.038\n - type: precision_at_1000\n value: 0.151\n - type: precision_at_3\n value: 14.521999999999998\n - type: precision_at_5\n value: 10.038\n - type: recall_at_1\n value: 22.103\n - type: recall_at_10\n value: 41.754999999999995\n - type: recall_at_100\n value: 59.636\n - type: recall_at_1000\n value: 76.801\n - type: recall_at_3\n value: 32.285000000000004\n - type: recall_at_5\n value: 35.684\n - type: map_at_1\n value: 32.565\n - type: map_at_10\n value: 43.07\n - type: map_at_100\n value: 44.102999999999994\n - type: map_at_1000\n value: 44.175\n - type: map_at_3\n value: 40.245\n - type: map_at_5\n value: 41.71\n - type: mrr_at_1\n value: 37.429\n - type: mrr_at_10\n value: 46.358\n - type: mrr_at_100\n value: 47.146\n - type: mrr_at_1000\n value: 47.187\n - type: mrr_at_3\n value: 44.086\n - type: mrr_at_5\n value: 45.318000000000005\n - type: ndcg_at_1\n value: 37.429\n - type: ndcg_at_10\n value: 48.398\n - type: ndcg_at_100\n value: 52.90899999999999\n - type: ndcg_at_1000\n value: 54.478\n - type: ndcg_at_3\n value: 43.418\n - type: ndcg_at_5\n value: 45.578\n - type: precision_at_1\n value: 37.429\n - type: precision_at_10\n value: 7.856000000000001\n - type: precision_at_100\n value: 1.093\n - type: precision_at_1000\n value: 0.129\n - type: precision_at_3\n value: 19.331\n - type: precision_at_5\n value: 13.191\n - type: recall_at_1\n value: 32.565\n - type: recall_at_10\n value: 61.021\n - type: recall_at_100\n value: 81.105\n - type: recall_at_1000\n value: 92.251\n - type: recall_at_3\n value: 47.637\n - type: recall_at_5\n value: 52.871\n - type: map_at_1\n value: 18.108\n - type: map_at_10\n value: 24.613\n - type: map_at_100\n value: 25.624000000000002\n - type: map_at_1000\n value: 25.721\n - type: map_at_3\n value: 22.271\n - type: map_at_5\n value: 23.681\n - type: mrr_at_1\n value: 19.435\n - type: mrr_at_10\n value: 26.124000000000002\n - type: mrr_at_100\n value: 27.07\n - type: mrr_at_1000\n value: 27.145999999999997\n - type: mrr_at_3\n value: 23.748\n - type: mrr_at_5\n value: 25.239\n - type: ndcg_at_1\n value: 19.435\n - type: ndcg_at_10\n value: 28.632\n - type: ndcg_at_100\n value: 33.988\n - type: ndcg_at_1000\n value: 36.551\n - type: ndcg_at_3\n value: 24.035999999999998\n - type: ndcg_at_5\n value: 26.525\n - type: precision_at_1\n value: 19.435\n - type: precision_at_10\n value: 4.565\n - type: precision_at_100\n value: 0.771\n - type: precision_at_1000\n value: 0.10200000000000001\n - type: precision_at_3\n value: 10.169\n - type: precision_at_5\n value: 7.571\n - type: recall_at_1\n value: 18.108\n - type: recall_at_10\n value: 39.533\n - type: recall_at_100\n value: 64.854\n - type: recall_at_1000\n value: 84.421\n - type: recall_at_3\n value: 27.500000000000004\n - type: recall_at_5\n value: 33.314\n - type: map_at_1\n value: 11.087\n - type: map_at_10\n value: 17.323\n - type: map_at_100\n value: 18.569\n - type: map_at_1000\n value: 18.694\n - type: map_at_3\n value: 15.370000000000001\n - type: map_at_5\n value: 16.538\n - type: mrr_at_1\n value: 13.557\n - type: mrr_at_10\n value: 21.041\n - type: mrr_at_100\n value: 22.134\n - type: mrr_at_1000\n value: 22.207\n - type: mrr_at_3\n value: 18.843\n - type: mrr_at_5\n value: 20.236\n - type: ndcg_at_1\n value: 13.557\n - type: ndcg_at_10\n value: 21.571\n - type: ndcg_at_100\n value: 27.678000000000004\n - type: ndcg_at_1000\n value: 30.8\n - type: ndcg_at_3\n value: 17.922\n - type: ndcg_at_5\n value: 19.826\n - type: precision_at_1\n value: 13.557\n - type: precision_at_10\n value: 4.1290000000000004\n - type: precision_at_100\n value: 0.8370000000000001\n - type: precision_at_1000\n value: 0.125\n - type: precision_at_3\n value: 8.914\n - type: precision_at_5\n value: 6.691999999999999\n - type: recall_at_1\n value: 11.087\n - type: recall_at_10\n value: 30.94\n - type: recall_at_100\n value: 57.833999999999996\n - type: recall_at_1000\n value: 80.365\n - type: recall_at_3\n value: 20.854\n - type: recall_at_5\n value: 25.695\n - type: map_at_1\n value: 21.708\n - type: map_at_10\n value: 30.422\n - type: map_at_100\n value: 31.713\n - type: map_at_1000\n value: 31.842\n - type: map_at_3\n value: 27.424\n - type: map_at_5\n value: 29.17\n - type: mrr_at_1\n value: 26.756\n - type: mrr_at_10\n value: 35.304\n - type: mrr_at_100\n value: 36.296\n - type: mrr_at_1000\n value: 36.359\n - type: mrr_at_3\n value: 32.692\n - type: mrr_at_5\n value: 34.288999999999994\n - type: ndcg_at_1\n value: 26.756\n - type: ndcg_at_10\n value: 35.876000000000005\n - type: ndcg_at_100\n value: 41.708\n - type: ndcg_at_1000\n value: 44.359\n - type: ndcg_at_3\n value: 30.946\n - type: ndcg_at_5\n value: 33.404\n - type: precision_at_1\n value: 26.756\n - type: precision_at_10\n value: 6.795\n - type: precision_at_100\n value: 1.138\n - type: precision_at_1000\n value: 0.155\n - type: precision_at_3\n value: 15.046999999999999\n - type: precision_at_5\n value: 10.972\n - type: recall_at_1\n value: 21.708\n - type: recall_at_10\n value: 47.315000000000005\n - type: recall_at_100\n value: 72.313\n - type: recall_at_1000\n value: 90.199\n - type: recall_at_3\n value: 33.528999999999996\n - type: recall_at_5\n value: 39.985\n - type: map_at_1\n value: 18.902\n - type: map_at_10\n value: 26.166\n - type: map_at_100\n value: 27.368\n - type: map_at_1000\n value: 27.493000000000002\n - type: map_at_3\n value: 23.505000000000003\n - type: map_at_5\n value: 25.019000000000002\n - type: mrr_at_1\n value: 23.402\n - type: mrr_at_10\n value: 30.787\n - type: mrr_at_100\n value: 31.735000000000003\n - type: mrr_at_1000\n value: 31.806\n - type: mrr_at_3\n value: 28.33\n - type: mrr_at_5\n value: 29.711\n - type: ndcg_at_1\n value: 23.402\n - type: ndcg_at_10\n value: 30.971\n - type: ndcg_at_100\n value: 36.61\n - type: ndcg_at_1000\n value: 39.507999999999996\n - type: ndcg_at_3\n value: 26.352999999999998\n - type: ndcg_at_5\n value: 28.488000000000003\n - type: precision_at_1\n value: 23.402\n - type: precision_at_10\n value: 5.799\n - type: precision_at_100\n value: 1.0\n - type: precision_at_1000\n value: 0.14100000000000001\n - type: precision_at_3\n value: 12.633\n - type: precision_at_5\n value: 9.269\n - type: recall_at_1\n value: 18.902\n - type: recall_at_10\n value: 40.929\n - type: recall_at_100\n value: 65.594\n - type: recall_at_1000\n value: 85.961\n - type: recall_at_3\n value: 28.121000000000002\n - type: recall_at_5\n value: 33.638\n - type: map_at_1\n value: 19.168\n - type: map_at_10\n value: 25.142999999999997\n - type: map_at_100\n value: 25.993\n - type: map_at_1000\n value: 26.076\n - type: map_at_3\n value: 23.179\n - type: map_at_5\n value: 24.322\n - type: mrr_at_1\n value: 21.933\n - type: mrr_at_10\n value: 27.72\n - type: mrr_at_100\n value: 28.518\n - type: mrr_at_1000\n value: 28.582\n - type: mrr_at_3\n value: 25.791999999999998\n - type: mrr_at_5\n value: 26.958\n - type: ndcg_at_1\n value: 21.933\n - type: ndcg_at_10\n value: 28.866999999999997\n - type: ndcg_at_100\n value: 33.285\n - type: ndcg_at_1000\n value: 35.591\n - type: ndcg_at_3\n value: 25.202999999999996\n - type: ndcg_at_5\n value: 27.045\n - type: precision_at_1\n value: 21.933\n - type: precision_at_10\n value: 4.632\n - type: precision_at_100\n value: 0.733\n - type: precision_at_1000\n value: 0.101\n - type: precision_at_3\n value: 10.992\n - type: precision_at_5\n value: 7.853000000000001\n - type: recall_at_1\n value: 19.168\n - type: recall_at_10\n value: 37.899\n - type: recall_at_100\n value: 58.54899999999999\n - type: recall_at_1000\n value: 75.666\n - type: recall_at_3\n value: 27.831\n - type: recall_at_5\n value: 32.336\n - type: map_at_1\n value: 12.764000000000001\n - type: map_at_10\n value: 17.757\n - type: map_at_100\n value: 18.677\n - type: map_at_1000\n value: 18.813\n - type: map_at_3\n value: 16.151\n - type: map_at_5\n value: 16.946\n - type: mrr_at_1\n value: 15.726\n - type: mrr_at_10\n value: 21.019\n - type: mrr_at_100\n value: 21.856\n - type: mrr_at_1000\n value: 21.954\n - type: mrr_at_3\n value: 19.282\n - type: mrr_at_5\n value: 20.189\n - type: ndcg_at_1\n value: 15.726\n - type: ndcg_at_10\n value: 21.259\n - type: ndcg_at_100\n value: 25.868999999999996\n - type: ndcg_at_1000\n value: 29.425\n - type: ndcg_at_3\n value: 18.204\n - type: ndcg_at_5\n value: 19.434\n - type: precision_at_1\n value: 15.726\n - type: precision_at_10\n value: 3.8920000000000003\n - type: precision_at_100\n value: 0.741\n - type: precision_at_1000\n value: 0.121\n - type: precision_at_3\n value: 8.58\n - type: precision_at_5\n value: 6.132\n - type: recall_at_1\n value: 12.764000000000001\n - type: recall_at_10\n value: 28.639\n - type: recall_at_100\n value: 49.639\n - type: recall_at_1000\n value: 75.725\n - type: recall_at_3\n value: 19.883\n - type: recall_at_5\n value: 23.141000000000002\n - type: map_at_1\n value: 18.98\n - type: map_at_10\n value: 25.2\n - type: map_at_100\n value: 26.279000000000003\n - type: map_at_1000\n value: 26.399\n - type: map_at_3\n value: 23.399\n - type: map_at_5\n value: 24.284\n - type: mrr_at_1\n value: 22.015\n - type: mrr_at_10\n value: 28.555000000000003\n - type: mrr_at_100\n value: 29.497\n - type: mrr_at_1000\n value: 29.574\n - type: mrr_at_3\n value: 26.788\n - type: mrr_at_5\n value: 27.576\n - type: ndcg_at_1\n value: 22.015\n - type: ndcg_at_10\n value: 29.266\n - type: ndcg_at_100\n value: 34.721000000000004\n - type: ndcg_at_1000\n value: 37.659\n - type: ndcg_at_3\n value: 25.741000000000003\n - type: ndcg_at_5\n value: 27.044\n - type: precision_at_1\n value: 22.015\n - type: precision_at_10\n value: 4.897\n - type: precision_at_100\n value: 0.8540000000000001\n - type: precision_at_1000\n value: 0.122\n - type: precision_at_3\n value: 11.567\n - type: precision_at_5\n value: 7.9479999999999995\n - type: recall_at_1\n value: 18.98\n - type: recall_at_10\n value: 38.411\n - type: recall_at_100\n value: 63.164\n - type: recall_at_1000\n value: 84.292\n - type: recall_at_3\n value: 28.576\n - type: recall_at_5\n value: 31.789\n - type: map_at_1\n value: 20.372\n - type: map_at_10\n value: 27.161\n - type: map_at_100\n value: 28.364\n - type: map_at_1000\n value: 28.554000000000002\n - type: map_at_3\n value: 25.135\n - type: map_at_5\n value: 26.200000000000003\n - type: mrr_at_1\n value: 24.704\n - type: mrr_at_10\n value: 31.219\n - type: mrr_at_100\n value: 32.092\n - type: mrr_at_1000\n value: 32.181\n - type: mrr_at_3\n value: 29.282000000000004\n - type: mrr_at_5\n value: 30.359\n - type: ndcg_at_1\n value: 24.704\n - type: ndcg_at_10\n value: 31.622\n - type: ndcg_at_100\n value: 36.917\n - type: ndcg_at_1000\n value: 40.357\n - type: ndcg_at_3\n value: 28.398\n - type: ndcg_at_5\n value: 29.764000000000003\n - type: precision_at_1\n value: 24.704\n - type: precision_at_10\n value: 5.81\n - type: precision_at_100\n value: 1.208\n - type: precision_at_1000\n value: 0.209\n - type: precision_at_3\n value: 13.241\n - type: precision_at_5\n value: 9.407\n - type: recall_at_1\n value: 20.372\n - type: recall_at_10\n value: 40.053\n - type: recall_at_100\n value: 64.71000000000001\n - type: recall_at_1000\n value: 87.607\n - type: recall_at_3\n value: 29.961\n - type: recall_at_5\n value: 34.058\n - type: map_at_1\n value: 14.424000000000001\n - type: map_at_10\n value: 20.541999999999998\n - type: map_at_100\n value: 21.495\n - type: map_at_1000\n value: 21.604\n - type: map_at_3\n value: 18.608\n - type: map_at_5\n value: 19.783\n - type: mrr_at_1\n value: 15.895999999999999\n - type: mrr_at_10\n value: 22.484\n - type: mrr_at_100\n value: 23.376\n - type: mrr_at_1000\n value: 23.467\n - type: mrr_at_3\n value: 20.548\n - type: mrr_at_5\n value: 21.731\n - type: ndcg_at_1\n value: 15.895999999999999\n - type: ndcg_at_10\n value: 24.343\n - type: ndcg_at_100\n value: 29.181\n - type: ndcg_at_1000\n value: 32.330999999999996\n - type: ndcg_at_3\n value: 20.518\n - type: ndcg_at_5\n value: 22.561999999999998\n - type: precision_at_1\n value: 15.895999999999999\n - type: precision_at_10\n value: 3.9739999999999998\n - type: precision_at_100\n value: 0.6799999999999999\n - type: precision_at_1000\n value: 0.105\n - type: precision_at_3\n value: 9.057\n - type: precision_at_5\n value: 6.654\n - type: recall_at_1\n value: 14.424000000000001\n - type: recall_at_10\n value: 34.079\n - type: recall_at_100\n value: 56.728\n - type: recall_at_1000\n value: 80.765\n - type: recall_at_3\n value: 23.993000000000002\n - type: recall_at_5\n value: 28.838\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 41.665\n - type: f1\n value: 37.601137843331244\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 74.8052\n - type: ap\n value: 68.92588517572685\n - type: f1\n value: 74.66801685854456\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 91.2220702234382\n - type: f1\n value: 90.81687856852439\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 69.39124487004105\n - type: f1\n value: 51.8350043424968\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 69.80497646267652\n - type: f1\n value: 67.34213899244814\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 74.54270342972428\n - type: f1\n value: 74.02802500235784\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 30.488580544269002\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 28.80426879476371\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 31.37970068676043\n - type: mrr\n value: 32.48523694064166\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 42.862710845031565\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 54.270000736385626\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 80.89215288990194\n - type: cos_sim_spearman\n value: 74.386413188675\n - type: euclidean_pearson\n value: 78.83679563989534\n - type: euclidean_spearman\n value: 74.29328198771996\n - type: manhattan_pearson\n value: 78.77968796707641\n - type: manhattan_spearman\n value: 74.20887429784696\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 78.31858821914498\n - type: cos_sim_spearman\n value: 72.2217008523832\n - type: euclidean_pearson\n value: 75.38901061978429\n - type: euclidean_spearman\n value: 71.81255767675184\n - type: manhattan_pearson\n value: 75.49472202181288\n - type: manhattan_spearman\n value: 71.96322588726144\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 79.48334648997455\n - type: cos_sim_spearman\n value: 80.99654029572798\n - type: euclidean_pearson\n value: 80.46546523970035\n - type: euclidean_spearman\n value: 80.90646216980744\n - type: manhattan_pearson\n value: 80.35474057857608\n - type: manhattan_spearman\n value: 80.8141299909659\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 79.73826970784727\n - type: cos_sim_spearman\n value: 76.9926870133034\n - type: euclidean_pearson\n value: 79.6386542120984\n - type: euclidean_spearman\n value: 77.05041986942253\n - type: manhattan_pearson\n value: 79.61799508502459\n - type: manhattan_spearman\n value: 77.07169617647067\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 83.93999019426069\n - type: cos_sim_spearman\n value: 85.21166521594695\n - type: euclidean_pearson\n value: 84.97207676326357\n - type: euclidean_spearman\n value: 85.40726578482739\n - type: manhattan_pearson\n value: 85.0386693192183\n - type: manhattan_spearman\n value: 85.49230945586409\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 80.8133974034008\n - type: cos_sim_spearman\n value: 82.82919022688844\n - type: euclidean_pearson\n value: 81.92587923760179\n - type: euclidean_spearman\n value: 82.86629450518863\n - type: manhattan_pearson\n value: 81.98232365999253\n - type: manhattan_spearman\n value: 82.94313939920296\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 86.12872422642363\n - type: cos_sim_spearman\n value: 87.77672179979807\n - type: euclidean_pearson\n value: 87.76172961705947\n - type: euclidean_spearman\n value: 87.9891393339215\n - type: manhattan_pearson\n value: 87.78863663568221\n - type: manhattan_spearman\n value: 88.08297053203866\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 58.82824030232733\n - type: cos_sim_spearman\n value: 64.17079382633538\n - type: euclidean_pearson\n value: 61.31505225602925\n - type: euclidean_spearman\n value: 64.05080034530694\n - type: manhattan_pearson\n value: 61.77095758943306\n - type: manhattan_spearman\n value: 64.14475973774933\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 81.39239803497064\n - type: cos_sim_spearman\n value: 81.76637354520439\n - type: euclidean_pearson\n value: 82.98008209033587\n - type: euclidean_spearman\n value: 82.18662536188657\n - type: manhattan_pearson\n value: 82.9630328314908\n - type: manhattan_spearman\n value: 82.13726553603003\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 79.45753132898741\n - type: mrr\n value: 93.84029822755313\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.8019801980198\n - type: cos_sim_ap\n value: 94.58629018512772\n - type: cos_sim_f1\n value: 89.84771573604061\n - type: cos_sim_precision\n value: 91.23711340206185\n - type: cos_sim_recall\n value: 88.5\n - type: dot_accuracy\n value: 99.74950495049505\n - type: dot_ap\n value: 92.5761214576951\n - type: dot_f1\n value: 87.09841917389087\n - type: dot_precision\n value: 88.86576482830385\n - type: dot_recall\n value: 85.39999999999999\n - type: euclidean_accuracy\n value: 99.80495049504951\n - type: euclidean_ap\n value: 94.56231673602272\n - type: euclidean_f1\n value: 90.02531645569621\n - type: euclidean_precision\n value: 91.17948717948718\n - type: euclidean_recall\n value: 88.9\n - type: manhattan_accuracy\n value: 99.8009900990099\n - type: manhattan_ap\n value: 94.5775591647447\n - type: manhattan_f1\n value: 89.86384266263238\n - type: manhattan_precision\n value: 90.64089521871821\n - type: manhattan_recall\n value: 89.1\n - type: max_accuracy\n value: 99.80495049504951\n - type: max_ap\n value: 94.58629018512772\n - type: max_f1\n value: 90.02531645569621\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 53.088941385715735\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 33.146129414825744\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 48.7511362739003\n - type: mrr\n value: 49.61682210763093\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 67.43820000000001\n - type: ap\n value: 12.899489312331003\n - type: f1\n value: 52.03468121072981\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 57.475947934352\n - type: f1\n value: 57.77676730676238\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 38.3463456299738\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 83.94230196101806\n - type: cos_sim_ap\n value: 67.00916556336148\n - type: cos_sim_f1\n value: 63.046014257939085\n - type: cos_sim_precision\n value: 61.961783439490446\n - type: cos_sim_recall\n value: 64.16886543535621\n - type: dot_accuracy\n value: 83.18531322644095\n - type: dot_ap\n value: 63.112896030267066\n - type: dot_f1\n value: 59.06565656565657\n - type: dot_precision\n value: 56.63438256658596\n - type: dot_recall\n value: 61.715039577836414\n - type: euclidean_accuracy\n value: 83.94230196101806\n - type: euclidean_ap\n value: 67.19856676674463\n - type: euclidean_f1\n value: 63.08428413691571\n - type: euclidean_precision\n value: 58.9543682641596\n - type: euclidean_recall\n value: 67.83641160949868\n - type: manhattan_accuracy\n value: 83.91845979614949\n - type: manhattan_ap\n value: 66.9845327263072\n - type: manhattan_f1\n value: 62.693323274236135\n - type: manhattan_precision\n value: 59.884698534710544\n - type: manhattan_recall\n value: 65.77836411609499\n - type: max_accuracy\n value: 83.94230196101806\n - type: max_ap\n value: 67.19856676674463\n - type: max_f1\n value: 63.08428413691571\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 88.0777738968448\n - type: cos_sim_ap\n value: 84.19747786536\n - type: cos_sim_f1\n value: 75.91830995817077\n - type: cos_sim_precision\n value: 69.84671107949033\n - type: cos_sim_recall\n value: 83.14598090545118\n - type: dot_accuracy\n value: 87.14246904955951\n - type: dot_ap\n value: 82.37528804640529\n - type: dot_f1\n value: 74.40963166732163\n - type: dot_precision\n value: 69.4127841098447\n - type: dot_recall\n value: 80.18170619032954\n - type: euclidean_accuracy\n value: 88.08359529630924\n - type: euclidean_ap\n value: 84.22633217661986\n - type: euclidean_f1\n value: 76.09190339866403\n - type: euclidean_precision\n value: 72.70304390517605\n - type: euclidean_recall\n value: 79.81213427779488\n - type: manhattan_accuracy\n value: 88.08359529630924\n - type: manhattan_ap\n value: 84.18362004611083\n - type: manhattan_f1\n value: 76.08789625360231\n - type: manhattan_precision\n value: 71.49336582724072\n - type: manhattan_recall\n value: 81.3135201724669\n - type: max_accuracy\n value: 88.08359529630924\n - type: max_ap\n value: 84.22633217661986\n - type: max_f1\n value: 76.09190339866403\n---\n\n# bge-small-en-v1.5-sparse\n\n## Usage\n\nThis is the sparse ONNX variant of the [bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) embeddings model accelerated with [Sparsify](https://github.com/neuralmagic/sparsify) for quantization/pruning and [DeepSparseSentenceTransformers](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/sentence_transformers) for inference.\n\n```bash\npip install -U deepsparse-nightly[sentence_transformers]\n```\n\n```python\nfrom deepsparse.sentence_transformers import DeepSparseSentenceTransformer\nmodel = DeepSparseSentenceTransformer('neuralmagic/bge-small-en-v1.5-sparse', export=False)\n\n# Our sentences we like to encode\nsentences = ['This framework generates embeddings for each input sentence',\n 'Sentences are passed as a list of string.',\n 'The quick brown fox jumps over the lazy dog.']\n\n# Sentences are encoded by calling model.encode()\nembeddings = model.encode(sentences)\n\n# Print the embeddings\nfor sentence, embedding in zip(sentences, embeddings):\n print(\"Sentence:\", sentence)\n print(\"Embedding:\", embedding.shape)\n print(\"\")\n```\n\nFor general questions on these models and sparsification methods, reach out to the engineering team on our [community Slack](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ)."},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES"],"string":"[\n \"BIOSSES\"\n]"}}},{"rowIdx":2266,"cells":{"id":{"kind":"string","value":"TheBloke/Vigostral-7B-Chat-GPTQ"},"author":{"kind":"string","value":"TheBloke"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","LLM","finetuned","conversational","fr","base_model:bofenghuang/vigostral-7b-chat","base_model:quantized:bofenghuang/vigostral-7b-chat","license:apache-2.0","autotrain_compatible","text-generation-inference","4-bit","gptq","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"LLM\",\n \"finetuned\",\n \"conversational\",\n \"fr\",\n \"base_model:bofenghuang/vigostral-7b-chat\",\n \"base_model:quantized:bofenghuang/vigostral-7b-chat\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"4-bit\",\n \"gptq\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-10-24T16:19:47Z","string":"2023-10-24T16:19:47Z"},"last_modified":{"kind":"string","value":"2023-10-24T16:49:13+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":3,"string":"3"},"README":{"kind":"string","value":"---\nbase_model: bofenghuang/vigostral-7b-chat\nlanguage: fr\nlicense: apache-2.0\nmodel_name: Vigostral 7B Chat\npipeline_tag: text-generation\ntags:\n- LLM\n- finetuned\ninference: false\nmodel_creator: bofeng huang\nmodel_type: mistral\nprompt_template: \"[INST] <>\\nVous êtes Vigogne, un assistant IA créé par Zaion\\\n \\ Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.\\n\\\n <>\\n\\n{prompt} [/INST] \\n\"\nquantized_by: TheBloke\n---\n\n\n\n\n
\n\"TheBlokeAI\"\n
\n\n

TheBloke's LLM work is generously supported by a grant from andreessen horowitz (a16z)

\n
\n\n\n# Vigostral 7B Chat - GPTQ\n- Model creator: [bofeng huang](https://huggingface.co/bofenghuang)\n- Original model: [Vigostral 7B Chat](https://huggingface.co/bofenghuang/vigostral-7b-chat)\n\n\n## Description\n\nThis repo contains GPTQ model files for [bofeng huang's Vigostral 7B Chat](https://huggingface.co/bofenghuang/vigostral-7b-chat).\n\nMultiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them.\n\n\n\n## Repositories available\n\n* [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Vigostral-7B-Chat-AWQ)\n* [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ)\n* [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GGUF)\n* [bofeng huang's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/bofenghuang/vigostral-7b-chat)\n\n\n\n## Prompt template: Vigogne-Llama-2-Chat\n\n```\n[INST] <>\nVous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.\n<>\n\n{prompt} [/INST] \n\n```\n\n\n\n\n\n\n## Known compatible clients / servers\n\nThese GPTQ models are known to work in the following inference servers/webuis.\n\n- [text-generation-webui](https://github.com/oobabooga/text-generation-webui)\n- [KobaldAI United](https://github.com/henk717/koboldai)\n- [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui)\n- [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference)\n\nThis may not be a complete list; if you know of others, please let me know!\n\n\n\n## Provided files, and GPTQ parameters\n\nMultiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements.\n\nEach separate quant is in a different branch. See below for instructions on fetching from different branches.\n\nMost GPTQ files are made with AutoGPTQ. Mistral models are currently made with Transformers.\n\n
\n Explanation of GPTQ parameters\n\n- Bits: The bit size of the quantised model.\n- GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. \"None\" is the lowest possible value.\n- Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now.\n- Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy.\n- GPTQ dataset: The calibration dataset used during quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ calibration dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s).\n- Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences.\n- ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama and Mistral models in 4-bit.\n\n
\n\n| Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc |\n| ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- |\n| [main](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/main) | 4 | 128 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 4.16 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | \n| [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 4.57 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | \n| [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 7.52 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | \n| [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 7.68 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. | \n| [gptq-8bit-32g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-8bit-32g-actorder_True) | 8 | 32 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 8.17 GB | No | 8-bit, with group size 32g and Act Order for maximum inference quality. | \n| [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 4.29 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. |\n\n\n\n\n## How to download, including from branches\n\n### In text-generation-webui\n\nTo download from the `main` branch, enter `TheBloke/Vigostral-7B-Chat-GPTQ` in the \"Download model\" box.\n\nTo download from another branch, add `:branchname` to the end of the download name, eg `TheBloke/Vigostral-7B-Chat-GPTQ:gptq-4bit-32g-actorder_True`\n\n### From the command line\n\nI recommend using the `huggingface-hub` Python library:\n\n```shell\npip3 install huggingface-hub\n```\n\nTo download the `main` branch to a folder called `Vigostral-7B-Chat-GPTQ`:\n\n```shell\nmkdir Vigostral-7B-Chat-GPTQ\nhuggingface-cli download TheBloke/Vigostral-7B-Chat-GPTQ --local-dir Vigostral-7B-Chat-GPTQ --local-dir-use-symlinks False\n```\n\nTo download from a different branch, add the `--revision` parameter:\n\n```shell\nmkdir Vigostral-7B-Chat-GPTQ\nhuggingface-cli download TheBloke/Vigostral-7B-Chat-GPTQ --revision gptq-4bit-32g-actorder_True --local-dir Vigostral-7B-Chat-GPTQ --local-dir-use-symlinks False\n```\n\n
\n More advanced huggingface-cli download usage\n\nIf you remove the `--local-dir-use-symlinks False` parameter, the files will instead be stored in the central Hugging Face cache directory (default location on Linux is: `~/.cache/huggingface`), and symlinks will be added to the specified `--local-dir`, pointing to their real location in the cache. This allows for interrupted downloads to be resumed, and allows you to quickly clone the repo to multiple places on disk without triggering a download again. The downside, and the reason why I don't list that as the default option, is that the files are then hidden away in a cache folder and it's harder to know where your disk space is being used, and to clear it up if/when you want to remove a download model.\n\nThe cache location can be changed with the `HF_HOME` environment variable, and/or the `--cache-dir` parameter to `huggingface-cli`.\n\nFor more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli).\n\nTo accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`:\n\n```shell\npip3 install hf_transfer\n```\n\nAnd set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`:\n\n```shell\nmkdir Vigostral-7B-Chat-GPTQ\nHF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Vigostral-7B-Chat-GPTQ --local-dir Vigostral-7B-Chat-GPTQ --local-dir-use-symlinks False\n```\n\nWindows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command.\n
\n\n### With `git` (**not** recommended)\n\nTo clone a specific branch with `git`, use a command like this:\n\n```shell\ngit clone --single-branch --branch gptq-4bit-32g-actorder_True https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ\n```\n\nNote that using Git with HF repos is strongly discouraged. It will be much slower than using `huggingface-hub`, and will use twice as much disk space as it has to store the model files twice (it stores every byte both in the intended target folder, and again in the `.git` folder as a blob.)\n\n\n\n## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui)\n\nPlease make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui).\n\nIt is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install.\n\n1. Click the **Model tab**.\n2. Under **Download custom model or LoRA**, enter `TheBloke/Vigostral-7B-Chat-GPTQ`.\n\n - To download from a specific branch, enter for example `TheBloke/Vigostral-7B-Chat-GPTQ:gptq-4bit-32g-actorder_True`\n - see Provided Files above for the list of branches for each option.\n\n3. Click **Download**.\n4. The model will start downloading. Once it's finished it will say \"Done\".\n5. In the top left, click the refresh icon next to **Model**.\n6. In the **Model** dropdown, choose the model you just downloaded: `Vigostral-7B-Chat-GPTQ`\n7. The model will automatically load, and is now ready for use!\n8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right.\n\n - Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`.\n\n9. Once you're ready, click the **Text Generation** tab and enter a prompt to get started!\n\n\n\n\n## Serving this model from Text Generation Inference (TGI)\n\nIt's recommended to use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0`\n\nExample Docker parameters:\n\n```shell\n--model-id TheBloke/Vigostral-7B-Chat-GPTQ --port 3000 --quantize gptq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096\n```\n\nExample Python code for interfacing with TGI (requires huggingface-hub 0.17.0 or later):\n\n```shell\npip3 install huggingface-hub\n```\n\n```python\nfrom huggingface_hub import InferenceClient\n\nendpoint_url = \"https://your-endpoint-url-here\"\n\nprompt = \"Tell me about AI\"\nprompt_template=f'''[INST] <>\nVous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.\n<>\n\n{prompt} [/INST] \n'''\n\nclient = InferenceClient(endpoint_url)\nresponse = client.text_generation(prompt,\n max_new_tokens=128,\n do_sample=True,\n temperature=0.7,\n top_p=0.95,\n top_k=40,\n repetition_penalty=1.1)\n\nprint(f\"Model output: {response}\")\n```\n\n\n## How to use this GPTQ model from Python code\n\n### Install the necessary packages\n\nRequires: Transformers 4.33.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later.\n\n```shell\npip3 install transformers optimum\npip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7\n```\n\nIf you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead:\n\n```shell\npip3 uninstall -y auto-gptq\ngit clone https://github.com/PanQiWei/AutoGPTQ\ncd AutoGPTQ\ngit checkout v0.4.2\npip3 install .\n```\n\n### You can then use the following code\n\n```python\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n\nmodel_name_or_path = \"TheBloke/Vigostral-7B-Chat-GPTQ\"\n# To use a different branch, change revision\n# For example: revision=\"gptq-4bit-32g-actorder_True\"\nmodel = AutoModelForCausalLM.from_pretrained(model_name_or_path,\n device_map=\"auto\",\n trust_remote_code=False,\n revision=\"main\")\n\ntokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True)\n\nprompt = \"Tell me about AI\"\nprompt_template=f'''[INST] <>\nVous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.\n<>\n\n{prompt} [/INST] \n'''\n\nprint(\"\\n\\n*** Generate:\")\n\ninput_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda()\noutput = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512)\nprint(tokenizer.decode(output[0]))\n\n# Inference can also be done using transformers' pipeline\n\nprint(\"*** Pipeline:\")\npipe = pipeline(\n \"text-generation\",\n model=model,\n tokenizer=tokenizer,\n max_new_tokens=512,\n do_sample=True,\n temperature=0.7,\n top_p=0.95,\n top_k=40,\n repetition_penalty=1.1\n)\n\nprint(pipe(prompt_template)[0]['generated_text'])\n```\n\n\n\n## Compatibility\n\nThe files provided are tested to work with Transformers. For non-Mistral models, AutoGPTQ can also be used directly.\n\n[ExLlama](https://github.com/turboderp/exllama) is compatible with Llama and Mistral models in 4-bit. Please see the Provided Files table above for per-file compatibility.\n\nFor a list of clients/servers, please see \"Known compatible clients / servers\", above.\n\n\n\n\n## Discord\n\nFor further support, and discussions on these models and AI in general, join us at:\n\n[TheBloke AI's Discord server](https://discord.gg/theblokeai)\n\n## Thanks, and how to contribute\n\nThanks to the [chirper.ai](https://chirper.ai) team!\n\nThanks to Clay from [gpus.llm-utils.org](llm-utils)!\n\nI've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training.\n\nIf you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects.\n\nDonaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits.\n\n* Patreon: https://patreon.com/TheBlokeAI\n* Ko-Fi: https://ko-fi.com/TheBlokeAI\n\n**Special thanks to**: Aemon Algiz.\n\n**Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski\n\n\nThank you to all my generous patrons and donaters!\n\nAnd thank you again to a16z for their generous grant.\n\n\n\n# Original model card: bofeng huang's Vigostral 7B Chat\n\n\n# Vigostral-7B-Chat: A French chat LLM\n\n***Preview*** of Vigostral-7B-Chat, a new addition to the Vigogne LLMs family, fine-tuned on [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1).\n\nFor more information, please visit the [Github repository](https://github.com/bofenghuang/vigogne).\n\n**License**: A significant portion of the training data is distilled from GPT-3.5-Turbo and GPT-4, kindly use it cautiously to avoid any violations of OpenAI's [terms of use](https://openai.com/policies/terms-of-use).\n\n## Prompt Template\n\nWe used a prompt template adapted from the chat format of Llama-2.\n\nYou can apply this formatting using the [chat template](https://huggingface.co/docs/transformers/main/chat_templating) through the `apply_chat_template()` method.\n\n```python\nfrom transformers import AutoTokenizer\n\ntokenizer = AutoTokenizer.from_pretrained(\"bofenghuang/vigostral-7b-chat\")\n\nconversation = [\n {\"role\": \"user\", \"content\": \"Bonjour ! Comment ça va aujourd'hui ?\"},\n {\"role\": \"assistant\", \"content\": \"Bonjour ! Je suis une IA, donc je n'ai pas de sentiments, mais je suis prêt à vous aider. Comment puis-je vous assister aujourd'hui ?\"},\n {\"role\": \"user\", \"content\": \"Quelle est la hauteur de la Tour Eiffel ?\"},\n {\"role\": \"assistant\", \"content\": \"La Tour Eiffel mesure environ 330 mètres de hauteur.\"},\n {\"role\": \"user\", \"content\": \"Comment monter en haut ?\"},\n]\n\nprint(tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True))\n```\n\nYou will get\n\n```\n[INST] <>\nVous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.\n<>\n\nBonjour ! Comment ça va aujourd'hui ? [/INST] Bonjour ! Je suis une IA, donc je n'ai pas de sentiments, mais je suis prêt à vous aider. Comment puis-je vous assister aujourd'hui ? [INST] Quelle est la hauteur de la Tour Eiffel ? [/INST] La Tour Eiffel mesure environ 330 mètres de hauteur. [INST] Comment monter en haut ? [/INST]\n```\n\n## Usage\n\n### Inference using the unquantized model with 🤗 Transformers\n\n```python\nfrom typing import Dict, List, Optional\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TextStreamer\n\nmodel_name_or_path = \"bofenghuang/vigostral-7b-chat\"\ntokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side=\"right\", use_fast=False)\nmodel = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, device_map=\"auto\")\n\nstreamer = TextStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True)\n\n\ndef chat(\n query: str,\n history: Optional[List[Dict]] = None,\n temperature: float = 0.7,\n top_p: float = 1.0,\n top_k: float = 0,\n repetition_penalty: float = 1.1,\n max_new_tokens: int = 1024,\n **kwargs,\n):\n if history is None:\n history = []\n\n history.append({\"role\": \"user\", \"content\": query})\n\n input_ids = tokenizer.apply_chat_template(history, return_tensors=\"pt\").to(model.device)\n input_length = input_ids.shape[1]\n\n generated_outputs = model.generate(\n input_ids=input_ids,\n generation_config=GenerationConfig(\n temperature=temperature,\n do_sample=temperature > 0.0,\n top_p=top_p,\n top_k=top_k,\n repetition_penalty=repetition_penalty,\n max_new_tokens=max_new_tokens,\n pad_token_id=tokenizer.eos_token_id,\n **kwargs,\n ),\n streamer=streamer,\n return_dict_in_generate=True,\n )\n\n generated_tokens = generated_outputs.sequences[0, input_length:]\n generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True)\n\n history.append({\"role\": \"assistant\", \"content\": generated_text})\n\n return generated_text, history\n\n# 1st round\nresponse, history = chat(\"Un escargot parcourt 100 mètres en 5 heures. Quelle est sa vitesse ?\", history=None)\n# Pour calculer la vitesse d'un escargot qui parcourt 100 mètres en 5 heures, nous devons diviser la distance parcourue par le temps écoulé. Voici comment procédons :\n# \n# Vitesse = Distance / Temps\n# \n# Dans ce cas, la distance est de 100 mètres et le temps est de 5 heures. Donc, nous avons :\n# \n# Vitesse = 100 mètres / 5 heures\n# Vitesse = 20 mètres par heure\n# \n# L'escargot parcourt donc une vitesse de 20 mètres par heure.\n\n# 2nd round\nresponse, history = chat(\"Quand il peut dépasser le lapin ?\", history=history)\n# Malheureusement, je ne peux pas répondre à cette question car elle contient des informations inexactes ou incorrectes. La vitesse de l'escargot est de 20 mètres par heure, tandis que la vitesse du lapin n'est pas mentionnée dans votre question initiale. Pour comparer les vitesses des deux animaux, nous aurions besoin de connaître la vitesse du lapin.\n\n# 3rd round\nresponse, history = chat(\"Écris une histoire imaginative qui met en scène une compétition de course entre un escargot et un lapin.\", history=history)\n# Il était une fois deux athlètes extraordinaires : un escargot nommé Escargo et un lapin appelé Lapin Rapide. Un jour, ils ont décidé de participer à une grande course pour déterminer qui était le plus rapide des deux. Tout le monde était curieux de savoir qui serait le gagnant, car ils étaient tous émerveillés par la vitesse de ces deux champions.\n# \n# La course commença dans une forêt dense, avec Escargo départageant un peu lentement, car il faut être prudent lorsqu'on est escargot. Mais au fur et à mesure qu'il avançait, il commença à ralentir encore plus. Les spectateurs commencèrent à pencher pour Lapin Rapide, qui fonçait rapidement vers la ligne d'arrivée.\n# \n# Mais soudain, Escargo se transforma en une force incroyable. Au lieu de marcher lentement comme un escargot, il courait à toute allure, faisant preuve d'une vitesse surprenante. Les spectateurs essayaient de se remémorer leurs notes de mathématiques pour calculer sa vélocité actuelle.\n# \n# Pendant ce temps, Lapin Rapide ralentissait légèrement, trouvant l'air frais trop confortable pour continuer à courir aussi vite. Il décida alors de prendre quelques pauses pour profiter de son environnement.\n# \n# Escargo continuait à courir à toute vitesse, en dépit de son handicap d'être un escargot. Les spectateurs étaient émerveillés par sa persévérance et sa volonté de gagner. Finalement, Escargo franchit la ligne d'arrivée en premier, et tous criaurent en joie.\n# \n# Les habitants de la forêt décidèrent de lui décerner le titre d'\"athlète le plus courageux\" pour sa performance incroyable. Quant à Lapin Rapide, il fut content de sa deuxième place, se disant simplement que les pauses étaient bien plus agréables que la compétition. Et tous vécurent heureux et satisfaits de cette course mémorable.\n```\n\nYou can also use the Google Colab Notebook provided below.\n\n\"Open\n\n### Inference using the unquantized model with vLLM\n\nSet up an OpenAI-compatible server with the following command:\n\n```bash\n# Install vLLM\n# This may take 5-10 minutes.\n# pip install vllm\n\n# Start server for Vigostral-Chat models\npython -m vllm.entrypoints.openai.api_server --model bofenghuang/vigostral-7b-chat\n\n# List models\n# curl http://localhost:8000/v1/models\n```\n\nQuery the model using the openai python package.\n\n```python\nimport openai\n\n# Modify OpenAI's API key and API base to use vLLM's API server.\nopenai.api_key = \"EMPTY\"\nopenai.api_base = \"http://localhost:8000/v1\"\n\n# First model\nmodels = openai.Model.list()\nmodel = models[\"data\"][0][\"id\"]\n\n# Chat completion API\nchat_completion = openai.ChatCompletion.create(\n model=model,\n messages=[\n {\"role\": \"user\", \"content\": \"Parle-moi de toi-même.\"},\n ],\n max_tokens=1024,\n temperature=0.7,\n)\nprint(\"Chat completion results:\", chat_completion)\n```\n\n## Limitations\n\nVigogne is still under development, and there are many limitations that have to be addressed. Please note that it is possible that the model generates harmful or biased content, incorrect information or generally unhelpful answers.\n"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2267,"cells":{"id":{"kind":"string","value":"Cohere/Cohere-embed-multilingual-light-v3.0"},"author":{"kind":"string","value":"Cohere"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["transformers","mteb","model-index","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"mteb\",\n \"model-index\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-11-01T20:54:54Z","string":"2023-11-01T20:54:54Z"},"last_modified":{"kind":"string","value":"2023-11-07T12:59:57+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":13,"string":"13"},"README":{"kind":"string","value":"---\ntags:\n- mteb\nmodel-index:\n- name: embed-multilingual-light-v3.0\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 70.02985074626865\n - type: ap\n value: 33.228065779544146\n - type: f1\n value: 64.27173953207297\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 90.701225\n - type: ap\n value: 87.07178174251762\n - type: f1\n value: 90.69168484877625\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 46.550000000000004\n - type: f1\n value: 44.7233215588199\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 53.369\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 44.206988765030744\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 33.913737041277\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 58.544257541214925\n - type: mrr\n value: 72.07151651057468\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 84.79582115243736\n - type: cos_sim_spearman\n value: 84.01396250789998\n - type: euclidean_pearson\n value: 83.90766476102458\n - type: euclidean_spearman\n value: 84.01396250789998\n - type: manhattan_pearson\n value: 84.75071274784274\n - type: manhattan_spearman\n value: 85.02482891467078\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 78.12337662337663\n - type: f1\n value: 77.48610340227478\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 38.68268504601174\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 32.20870648143671\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 46.259\n - type: ndcg_at_10\n value: 44.555\n - type: ndcg_at_10\n value: 56.564\n - type: ndcg_at_10\n value: 36.162\n - type: ndcg_at_10\n value: 26.185000000000002\n - type: ndcg_at_10\n value: 41.547\n - type: ndcg_at_10\n value: 39.042\n - type: ndcg_at_10\n value: 38.086999999999996\n - type: ndcg_at_10\n value: 32.088\n - type: ndcg_at_10\n value: 27.006999999999998\n - type: ndcg_at_10\n value: 37.336999999999996\n - type: ndcg_at_10\n value: 38.011\n - type: ndcg_at_10\n value: 32.287\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: climate-fever\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 24.804000000000002\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: dbpedia-entity\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 38.055\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 46.665\n - type: f1\n value: 40.77568559660878\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: fever\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 85.52499999999999\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: fiqa\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 36.161\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: hotpotqa\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 66.878\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 85.6372\n - type: ap\n value: 80.54846874011302\n - type: f1\n value: 85.61438421821343\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: msmarco\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 40.487\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 91.8559051527588\n - type: f1\n value: 91.6271749996447\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 62.17738258093936\n - type: f1\n value: 45.80307070449218\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 67.42434431741762\n - type: f1\n value: 65.39580264698957\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 72.60928043039677\n - type: f1\n value: 72.30912915707411\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 35.17967476592229\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 30.993641089208683\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 31.362481813275295\n - type: mrr\n value: 32.43717742343303\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: nfcorpus\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 32.123000000000005\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: nq\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 55.51199999999999\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: quora\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 87.847\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 49.4973643968247\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 60.2135284243427\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 17.1\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 83.7330191296952\n - type: cos_sim_spearman\n value: 77.03523134004043\n - type: euclidean_pearson\n value: 80.86067787185137\n - type: euclidean_spearman\n value: 77.03522959536473\n - type: manhattan_pearson\n value: 80.76089708603587\n - type: manhattan_spearman\n value: 76.86245377437302\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 80.46387812633851\n - type: cos_sim_spearman\n value: 73.21878234127571\n - type: euclidean_pearson\n value: 76.82160699895033\n - type: euclidean_spearman\n value: 73.21878234127571\n - type: manhattan_pearson\n value: 76.75657006349886\n - type: manhattan_spearman\n value: 73.19160258034827\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 79.06411399119807\n - type: cos_sim_spearman\n value: 79.49916779764082\n - type: euclidean_pearson\n value: 79.3356521660954\n - type: euclidean_spearman\n value: 79.49916779764082\n - type: manhattan_pearson\n value: 79.04971532119936\n - type: manhattan_spearman\n value: 79.16859911220654\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 80.6940934994372\n - type: cos_sim_spearman\n value: 76.9552055757283\n - type: euclidean_pearson\n value: 79.52818133592284\n - type: euclidean_spearman\n value: 76.9552055757283\n - type: manhattan_pearson\n value: 79.35220459438406\n - type: manhattan_spearman\n value: 76.85314462036561\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 85.58608774451231\n - type: cos_sim_spearman\n value: 86.42805701554927\n - type: euclidean_pearson\n value: 86.01117122595934\n - type: euclidean_spearman\n value: 86.42805701554927\n - type: manhattan_pearson\n value: 86.01345208923057\n - type: manhattan_spearman\n value: 86.43179450307953\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 83.18733039014667\n - type: cos_sim_spearman\n value: 84.3339529564109\n - type: euclidean_pearson\n value: 83.54530885349595\n - type: euclidean_spearman\n value: 84.3339529564109\n - type: manhattan_pearson\n value: 83.47015931913937\n - type: manhattan_spearman\n value: 84.22564786654777\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 87.88402211340522\n - type: cos_sim_spearman\n value: 88.6693290310468\n - type: euclidean_pearson\n value: 88.24947476618257\n - type: euclidean_spearman\n value: 88.6693290310468\n - type: manhattan_pearson\n value: 88.24496656367964\n - type: manhattan_spearman\n value: 88.52029848819545\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 64.96467575926597\n - type: cos_sim_spearman\n value: 65.30666900046252\n - type: euclidean_pearson\n value: 66.58031971340725\n - type: euclidean_spearman\n value: 65.30666900046252\n - type: manhattan_pearson\n value: 66.56530433327998\n - type: manhattan_spearman\n value: 65.42121899024113\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 85.31047656296519\n - type: cos_sim_spearman\n value: 85.46101092708824\n - type: euclidean_pearson\n value: 85.75896623084044\n - type: euclidean_spearman\n value: 85.46101092708824\n - type: manhattan_pearson\n value: 85.57323880630182\n - type: manhattan_spearman\n value: 85.23375523080594\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 79.89731978284804\n - type: mrr\n value: 94.28980424078465\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: scifact\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 67.95\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.85643564356435\n - type: cos_sim_ap\n value: 96.59618618212247\n - type: cos_sim_f1\n value: 92.6221335992024\n - type: cos_sim_precision\n value: 92.34592445328032\n - type: cos_sim_recall\n value: 92.9\n - type: dot_accuracy\n value: 99.85643564356435\n - type: dot_ap\n value: 96.5961861821225\n - type: dot_f1\n value: 92.6221335992024\n - type: dot_precision\n value: 92.34592445328032\n - type: dot_recall\n value: 92.9\n - type: euclidean_accuracy\n value: 99.85643564356435\n - type: euclidean_ap\n value: 96.5961861821225\n - type: euclidean_f1\n value: 92.6221335992024\n - type: euclidean_precision\n value: 92.34592445328032\n - type: euclidean_recall\n value: 92.9\n - type: manhattan_accuracy\n value: 99.85841584158416\n - type: manhattan_ap\n value: 96.5578240948512\n - type: manhattan_f1\n value: 92.71523178807946\n - type: manhattan_precision\n value: 94.4963655244029\n - type: manhattan_recall\n value: 91.0\n - type: max_accuracy\n value: 99.85841584158416\n - type: max_ap\n value: 96.5961861821225\n - type: max_f1\n value: 92.71523178807946\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 60.84750068050385\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 33.96844721192451\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 50.454280909595205\n - type: mrr\n value: 51.24249320940497\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 29.998438678552517\n - type: cos_sim_spearman\n value: 30.409482543506876\n - type: dot_pearson\n value: 29.998443850173224\n - type: dot_spearman\n value: 30.409482543506876\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 78.93\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: webis-touche2020\n config: default\n split: test\n revision: None\n metrics:\n - type: ndcg_at_10\n value: 29.482999999999997\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 70.65859999999999\n - type: ap\n value: 15.03693738050973\n - type: f1\n value: 54.94379403846167\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 64.4567062818336\n - type: f1\n value: 64.48980729427107\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 42.08554991843959\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 84.75293556654945\n - type: cos_sim_ap\n value: 69.40551043272129\n - type: cos_sim_f1\n value: 65.56335231034026\n - type: cos_sim_precision\n value: 65.79856497475419\n - type: cos_sim_recall\n value: 65.32981530343008\n - type: dot_accuracy\n value: 84.75293556654945\n - type: dot_ap\n value: 69.40550704470631\n - type: dot_f1\n value: 65.56335231034026\n - type: dot_precision\n value: 65.79856497475419\n - type: dot_recall\n value: 65.32981530343008\n - type: euclidean_accuracy\n value: 84.75293556654945\n - type: euclidean_ap\n value: 69.4055136381454\n - type: euclidean_f1\n value: 65.56335231034026\n - type: euclidean_precision\n value: 65.79856497475419\n - type: euclidean_recall\n value: 65.32981530343008\n - type: manhattan_accuracy\n value: 84.6337247422066\n - type: manhattan_ap\n value: 69.13628354134198\n - type: manhattan_f1\n value: 65.46998180715585\n - type: manhattan_precision\n value: 60.58361391694726\n - type: manhattan_recall\n value: 71.21372031662268\n - type: max_accuracy\n value: 84.75293556654945\n - type: max_ap\n value: 69.4055136381454\n - type: max_f1\n value: 65.56335231034026\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 89.04800714091667\n - type: cos_sim_ap\n value: 85.84596325009252\n - type: cos_sim_f1\n value: 78.39228527221042\n - type: cos_sim_precision\n value: 73.58643518205768\n - type: cos_sim_recall\n value: 83.86972590083154\n - type: dot_accuracy\n value: 89.04800714091667\n - type: dot_ap\n value: 85.8459646697087\n - type: dot_f1\n value: 78.39228527221042\n - type: dot_precision\n value: 73.58643518205768\n - type: dot_recall\n value: 83.86972590083154\n - type: euclidean_accuracy\n value: 89.04800714091667\n - type: euclidean_ap\n value: 85.84596376376919\n - type: euclidean_f1\n value: 78.39228527221042\n - type: euclidean_precision\n value: 73.58643518205768\n - type: euclidean_recall\n value: 83.86972590083154\n - type: manhattan_accuracy\n value: 89.0266620095471\n - type: manhattan_ap\n value: 85.80124417850608\n - type: manhattan_f1\n value: 78.37817859254879\n - type: manhattan_precision\n value: 75.36963321012226\n - type: manhattan_recall\n value: 81.63689559593472\n - type: max_accuracy\n value: 89.04800714091667\n - type: max_ap\n value: 85.8459646697087\n - type: max_f1\n value: 78.39228527221042\n---\n\n\n# Cohere embed-multilingual-light-v3.0\n\nThis repository contains the tokenizer for the Cohere `embed-multilingual-light-v3.0` model. See our blogpost [Cohere Embed V3](https://txt.cohere.com/introducing-embed-v3/) for more details on this model.\n\nYou can use the embedding model either via the Cohere API, AWS SageMaker or in your private deployments.\n\n## Usage Cohere API\n\nThe following code snippet shows the usage of the Cohere API. Install the cohere SDK via:\n```\npip install -U cohere\n```\n\nGet your free API key on: www.cohere.com\n\n\n```python\n# This snippet shows and example how to use the Cohere Embed V3 models for semantic search.\n# Make sure to have the Cohere SDK in at least v4.30 install: pip install -U cohere \n# Get your API key from: www.cohere.com\nimport cohere\nimport numpy as np\n\ncohere_key = \"{YOUR_COHERE_API_KEY}\" #Get your API key from www.cohere.com\nco = cohere.Client(cohere_key)\n\ndocs = [\"The capital of France is Paris\",\n \"PyTorch is a machine learning framework based on the Torch library.\",\n \"The average cat lifespan is between 13-17 years\"]\n\n\n#Encode your documents with input type 'search_document'\ndoc_emb = co.embed(docs, input_type=\"search_document\", model=\"embed-multilingual-light-v3.0\").embeddings\ndoc_emb = np.asarray(doc_emb)\n\n\n#Encode your query with input type 'search_query'\nquery = \"What is Pytorch\"\nquery_emb = co.embed([query], input_type=\"search_query\", model=\"embed-multilingual-light-v3.0\").embeddings\nquery_emb = np.asarray(query_emb)\nquery_emb.shape\n\n#Compute the dot product between query embedding and document embedding\nscores = np.dot(query_emb, doc_emb.T)[0]\n\n#Find the highest scores\nmax_idx = np.argsort(-scores)\n\nprint(f\"Query: {query}\")\nfor idx in max_idx:\n print(f\"Score: {scores[idx]:.2f}\")\n print(docs[idx])\n print(\"--------\")\n```\n\n## Usage AWS SageMaker\nThe embedding model can be privately deployed in your AWS Cloud using our [AWS SageMaker marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-z6huxszcqc25i). It runs privately in your VPC, with latencies as low as 5ms for query encoding.\n\n## Usage AWS Bedrock\nSoon the model will also be available via AWS Bedrock. Stay tuned\n\n## Private Deployment\nYou want to run the model on your own hardware? [Contact Sales](https://cohere.com/contact-sales) to learn more.\n\n## Supported Languages\nThis model was trained on nearly 1B English training pairs and nearly 0.5B Non-English training pairs from 100+ languages. \n\nEvaluation results can be found in the [Embed V3.0 Benchmark Results spreadsheet](https://docs.google.com/spreadsheets/d/1w7gnHWMDBdEUrmHgSfDnGHJgVQE5aOiXCCwO3uNH_mI/edit?usp=sharing)."},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2268,"cells":{"id":{"kind":"string","value":"lomahony/pythia-1.4b-helpful-sft"},"author":{"kind":"string","value":"lomahony"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","gpt_neox","text-generation","causal-lm","pythia","en","dataset:Anthropic/hh-rlhf","arxiv:2101.00027","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"gpt_neox\",\n \"text-generation\",\n \"causal-lm\",\n \"pythia\",\n \"en\",\n \"dataset:Anthropic/hh-rlhf\",\n \"arxiv:2101.00027\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-11-08T15:50:11Z","string":"2023-11-08T15:50:11Z"},"last_modified":{"kind":"string","value":"2024-05-14T19:13:25+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\ndatasets:\n- Anthropic/hh-rlhf\nlanguage:\n- en\nlicense: apache-2.0\ntags:\n- pytorch\n- causal-lm\n- pythia\n---\n\n[Pythia-1.4b](https://huggingface.co/EleutherAI/pythia-1.4b) supervised finetuned using TRLx library with the helpful subset of [Anthropic-hh-rlhf dataset](https://huggingface.co/datasets/Anthropic/hh-rlhf) for 1 epoch. \n\nCheckpoints are also uploaded. \n\nFully reproducible finetuning code is available on [GitHub](https://github.com/lauraaisling/trlx-pythia/tree/main)\n\n[wandb log](https://wandb.ai/lauraomahony999/pythia-sft/runs/ydaj2ks8)\n\nSee [Pythia-1.4b](https://huggingface.co/EleutherAI/pythia-1.4b) for model details [(paper)](https://arxiv.org/abs/2101.00027). \n\nSee further details of these models in the paper [Attributing Mode Collapse in the Fine-Tuning of Large Language Models](https://openreview.net/pdf?id=3pDMYjpOxk).\n\nYou can cite these models if they are helpful as follows: \n\n
\n@inproceedings{o2024attributing,\n  title={Attributing Mode Collapse in the Fine-Tuning of Large Language Models},\n  author={O’Mahony, Laura and Grinsztajn, Leo and Schoelkopf, Hailey and Biderman, Stella},\n  booktitle={ICLR 2024, Mathematical and Empirical Understanding of Foundation Models (ME-FoMo) workshop},\n  year={2024}\n}\n
\n\nhf (pretrained=lomahony/pythia-1.4b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 0, batch_size: 16\n| Tasks |Version|Filter|n-shot| Metric | Value | |Stderr|\n|--------------|------:|------|-----:|---------------|------:|---|------|\n|arc_challenge | 1|none | 0|acc | 0.2679|± |0.0129|\n| | |none | 0|acc_norm | 0.2978|± |0.0134|\n|arc_easy | 1|none | 0|acc | 0.6120|± |0.0100|\n| | |none | 0|acc_norm | 0.5282|± |0.0102|\n|boolq | 2|none | 0|acc | 0.6260|± |0.0085|\n|hellaswag | 1|none | 0|acc | 0.4097|± |0.0049|\n| | |none | 0|acc_norm | 0.5212|± |0.0050|\n|lambada_openai| 1|none | 0|perplexity | 6.4836|± |0.1838|\n| | |none | 0|acc | 0.5789|± |0.0069|\n|openbookqa | 1|none | 0|acc | 0.2120|± |0.0183|\n| | |none | 0|acc_norm | 0.3340|± |0.0211|\n|piqa | 1|none | 0|acc | 0.7100|± |0.0106|\n| | |none | 0|acc_norm | 0.7144|± |0.0105|\n|sciq | 1|none | 0|acc | 0.8540|± |0.0112|\n| | |none | 0|acc_norm | 0.7830|± |0.0130|\n|wikitext | 2|none | 0|word_perplexity|15.8394|± |N/A |\n| | |none | 0|byte_perplexity| 1.6763|± |N/A |\n| | |none | 0|bits_per_byte | 0.7453|± |N/A |\n|winogrande | 1|none | 0|acc | 0.5872|± |0.0138|\n\nhf (pretrained=lomahony/pythia-1.4b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 5, batch_size: 16\n| Tasks |Version|Filter|n-shot| Metric | Value | |Stderr|\n|--------------|------:|------|-----:|---------------|------:|---|------|\n|arc_challenge | 1|none | 5|acc | 0.2892|± |0.0133|\n| | |none | 5|acc_norm | 0.3097|± |0.0135|\n|arc_easy | 1|none | 5|acc | 0.6444|± |0.0098|\n| | |none | 5|acc_norm | 0.6309|± |0.0099|\n|boolq | 2|none | 5|acc | 0.6333|± |0.0084|\n|hellaswag | 1|none | 5|acc | 0.4065|± |0.0049|\n| | |none | 5|acc_norm | 0.5215|± |0.0050|\n|lambada_openai| 1|none | 5|perplexity | 9.7040|± |0.2887|\n| | |none | 5|acc | 0.4951|± |0.0070|\n|openbookqa | 1|none | 5|acc | 0.2220|± |0.0186|\n| | |none | 5|acc_norm | 0.3100|± |0.0207|\n|piqa | 1|none | 5|acc | 0.7029|± |0.0107|\n| | |none | 5|acc_norm | 0.7127|± |0.0106|\n|sciq | 1|none | 5|acc | 0.9170|± |0.0087|\n| | |none | 5|acc_norm | 0.9160|± |0.0088|\n|wikitext | 2|none | 5|word_perplexity|15.8394|± |N/A |\n| | |none | 5|byte_perplexity| 1.6763|± |N/A |\n| | |none | 5|bits_per_byte | 0.7453|± |N/A |\n|winogrande | 1|none | 5|acc | 0.5699|± |0.0139|\n\n"},"matched_bigbio_names":{"kind":"list like","value":["SCIQ"],"string":"[\n \"SCIQ\"\n]"}}},{"rowIdx":2269,"cells":{"id":{"kind":"string","value":"oongaboongahacker/phi-2"},"author":{"kind":"string","value":"oongaboongahacker"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","mixformer-sequential","text-generation","custom_code","autotrain_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"mixformer-sequential\",\n \"text-generation\",\n \"custom_code\",\n \"autotrain_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-13T13:01:48Z","string":"2023-12-13T13:01:48Z"},"last_modified":{"kind":"string","value":"2023-12-13T13:24:37+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":22,"string":"22"},"README":{"kind":"string","value":"---\n{}\n---\nTHE MODEL IS NOT OWNED BY ME IN ANY CASE. THIS IS SOLELY THE PROPERTY OF MICROSOFT UNDER THE FOLLOWING LICENSE:\n\nMICROSOFT RESEARCH LICENSE TERMS\n\nIF YOU LIVE IN THE UNITED STATES, PLEASE READ THE “BINDING ARBITRATION AND CLASS ACTION WAIVER” SECTION BELOW. IT AFFECTS HOW DISPUTES ARE RESOLVED.\n\nThese license terms are an agreement between you and Microsoft Corporation (or one of its affiliates). They apply to the source code, object code, machine learning models, or data (collectively “Materials”) that accompany this license. IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. BY USING THE MATERIALS, YOU ACCEPT THESE TERMS.\n\n1) INSTALLATION AND USE RIGHTS TO THE MATERIALS.\n\nSubject to the terms of this agreement, you have the below rights, if applicable, to use the Materials solely for non-commercial, non-revenue generating, research purposes:\n\na) Source Code. If source code is included, you may use and modify the source code, but you may not distribute the source code.\n\nb) Object Code. If object code is included, you may use the object code, but you may not distribute the object code.\n\nc) Models. If machine learning model(s) are included, you may use the model(s), but you may not distribute the models.\n\nd) Data. If data is included, you may use and modify the data, but your use and modification must be consistent with the consent under which the data was provided and/or gathered and you may not distribute the data or your modifications to the data.\n\n2) SCOPE OF LICENSE. The Materials are licensed, not sold. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you will not (and have no right to):\n\na) work around any technical limitations in the Materials that only allow you to use it in certain ways;\n\nb) reverse engineer, decompile or disassemble the Materials;\n\nc) remove, minimize, block, or modify any notices of Microsoft or its suppliers in the Materials;\n\nd) use the Materials in any way that is against the law or to create or propagate malware; or\n\ne) share, publish, distribute or lend the Materials, provide the Materials as a stand-alone hosted solution for others to use, or transfer the Materials or this agreement to any third party.\n\n3) PERSONAL DATA. If the data (set forth in Section 1(c) above) includes or is found to include any data that enables any ability to identify an individual (“Personal Data”), you will not use such Personal Data for any purpose other than was authorized and consented to by the data subject/research participant. You will not use Personal Data to contact any person. You will keep Personal Data in strict confidence. You will not share any Personal Data that is collected or in your possession with any third party for any reason and as required under the original consent agreement. Further, you will destroy the Personal Data and any backup or copies, immediately upon the completion of your research.\n\n4) LICENSE TO MICROSOFT. Notwithstanding the limitations in Section 1, you may distribute your modifications back to Microsoft, and if you do provide Microsoft with modifications of the Materials, you hereby grant Microsoft, without any restrictions or limitations, a non-exclusive, perpetual, irrevocable, royalty-free, assignable and sub-licensable license, to reproduce, publicly perform or display, install, use, modify, post, distribute, make and have made, sell and transfer such modifications and derivatives for any purpose.\n\n5) PUBLICATION. You may publish (or present papers or articles) on your results from using the Materials provided that no material or substantial portion of the Materials is included in any such publication or presentation.\n\n6) FEEDBACK. Any feedback about the Materials provided by you to us is voluntarily given, and Microsoft shall be free to use the feedback as it sees fit without obligation or restriction of any kind, even if the\n\nfeedback is designated by you as confidential. Such feedback shall be considered a contribution and licensed to Microsoft under the terms of Section 4 above.\n\n7) EXPORT RESTRICTIONS. You must comply with all domestic and international export laws and regulations that apply to the Materials, which include restrictions on destinations, end users, and end use. For further information on export restrictions, visit (aka.ms/exporting).\n\n8) SUPPORT SERVICES. Microsoft is not obligated under this agreement to provide any support services for the Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind.\n\n9) BINDING ARBITRATION AND CLASS ACTION WAIVER. This Section applies if you live in (or, if a business, your principal place of business is in) the United States. If you and Microsoft have a dispute, you and Microsoft agree to try for 60 days to resolve it informally. If you and Microsoft can’t, you and Microsoft agree to binding individual arbitration before the American Arbitration Association under the Federal Arbitration Act (“FAA”), and not to sue in court in front of a judge or jury. Instead, a neutral arbitrator will decide. Class action lawsuits, class-wide arbitrations, private attorney-general actions, and any other proceeding where someone acts in a representative capacity are not allowed; nor is combining individual proceedings without the consent of all parties. The complete Arbitration Agreement contains more terms and is at aka.ms/arb-agreement-1. You and Microsoft agree to these terms.\n\n10) ENTIRE AGREEMENT. This agreement, and any other terms Microsoft may provide for supplements, updates, or third-party applications, is the entire agreement for the Materials.\n\n11) APPLICABLE LAW AND PLACE TO RESOLVE DISPUTES. If you acquired the Materials in the United States or Canada, the laws of the state or province where you live (or, if a business, where your principal place of business is located) govern the interpretation of this agreement, claims for its breach, and all other claims (including consumer protection, unfair competition, and tort claims), regardless of conflict of laws principles, except that the FAA governs everything related to arbitration. If you acquired the Materials in any other country, its laws apply, except that the FAA governs everything related to arbitration. If U.S. federal jurisdiction exists, you and Microsoft consent to exclusive jurisdiction and venue in the federal court in King County, Washington for all disputes heard in court (excluding arbitration). If not, you and Microsoft consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington for all disputes heard in court (excluding arbitration).\n\n12) CONSUMER RIGHTS; REGIONAL VARIATIONS. This agreement describes certain legal rights. You may have other rights, including consumer rights, under the laws of your state, province, or country. Separate and apart from your relationship with Microsoft, you may also have rights with respect to the party from which you acquired the Materials. This agreement does not change those other rights if the laws of your state, province, or country do not permit it to do so. For example, if you acquired the Materials in one of the below regions, or mandatory country law applies, then the following provisions apply to you:\n\na) Australia. You have statutory guarantees under the Australian Consumer Law and nothing in this agreement is intended to affect those rights.\n\nb) Canada. If you acquired this software in Canada, you may stop receiving updates by turning off the automatic update feature, disconnecting your device from the Internet (if and when you re-connect to the Internet, however, the Materials will resume checking for and installing updates), or uninstalling the Materials. The product documentation, if any, may also specify how to turn off updates for your specific device or software.\n\nc) Germany and Austria.\n\ni. Warranty. The properly licensed software will perform substantially as described in any Microsoft materials that accompany the Materials. However, Microsoft gives no contractual guarantee in relation to the licensed software.\n\nii. Limitation of Liability. In case of intentional conduct, gross negligence, claims based on the Product Liability Act, as well as, in case of death or personal or physical injury, Microsoft is liable according to the statutory law.\n\nSubject to the foregoing clause (ii), Microsoft will only be liable for slight negligence if Microsoft is in breach of such material contractual obligations, the fulfillment of which facilitate the due performance of this agreement, the breach of which would endanger the purpose of this agreement and the compliance with which a party may constantly trust in (so-called \"cardinal obligations\"). In other cases of slight negligence, Microsoft will not be liable for slight negligence.\n\n13) DISCLAIMER OF WARRANTY. THE MATERIALS ARE LICENSED “AS IS.” YOU BEAR THE RISK OF USING THEM. MICROSOFT GIVES NO EXPRESS WARRANTIES, GUARANTEES, OR CONDITIONS. TO THE EXTENT PERMITTED UNDER APPLICABLE LAWS, MICROSOFT EXCLUDES ALL IMPLIED WARRANTIES, INCLUDING MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT.\n\n14) LIMITATION ON AND EXCLUSION OF DAMAGES. IF YOU HAVE ANY BASIS FOR RECOVERING DAMAGES DESPITE THE PRECEDING DISCLAIMER OF WARRANTY, YOU CAN RECOVER FROM MICROSOFT AND ITS SUPPLIERS ONLY DIRECT DAMAGES UP TO U.S. $5.00. YOU CANNOT RECOVER ANY OTHER DAMAGES, INCLUDING CONSEQUENTIAL, LOST PROFITS, SPECIAL, INDIRECT OR INCIDENTAL DAMAGES.\n\nThis limitation applies to (a) anything related to the Materials, services, content (including code) on third party Internet sites, or third party applications; and (b) claims for breach of contract, warranty, guarantee, or condition; strict liability, negligence, or other tort; or any other claim; in each case to the extent permitted by applicable law.\n\nIt also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your state, province, or country may not allow the exclusion or limitation of incidental, consequential, or other damages."},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2270,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.art-by-artgerm-and-greg-rutkowski-and-alphonse-mucha"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-22T07:41:06Z","string":"2023-12-22T07:41:06Z"},"last_modified":{"kind":"string","value":"2023-12-22T07:41:09+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/art by artgerm and greg rutkowski and alphonse mucha.../art\n by artgerm and greg rutkowski and alphonse mucha_17_3.0.png\nwidget:\n- text: art by artgerm and greg rutkowski and alphonse mucha\n output:\n url: images/art by artgerm and greg rutkowski and alphonse mucha_17_3.0.png\n- text: art by artgerm and greg rutkowski and alphonse mucha\n output:\n url: images/art by artgerm and greg rutkowski and alphonse mucha_19_3.0.png\n- text: art by artgerm and greg rutkowski and alphonse mucha\n output:\n url: images/art by artgerm and greg rutkowski and alphonse mucha_20_3.0.png\n- text: art by artgerm and greg rutkowski and alphonse mucha\n output:\n url: images/art by artgerm and greg rutkowski and alphonse mucha_21_3.0.png\n- text: art by artgerm and greg rutkowski and alphonse mucha\n output:\n url: images/art by artgerm and greg rutkowski and alphonse mucha_22_3.0.png\ninference: false\ninstance_prompt: art by artgerm and greg rutkowski and alphonse mucha\n---\n# ntcai.xyz slider - art by artgerm and greg rutkowski and alphonse mucha (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\nart by artgerm and greg rutkowski and alphonse mucha\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.art-by-artgerm-and-greg-rutkowski-and-alphonse-mucha', weight_name='art by artgerm and greg rutkowski and alphonse mucha.safetensors', adapter_name=\"art by artgerm and greg rutkowski and alphonse mucha\")\n\n# Activate the LoRA\npipe.set_adapters([\"art by artgerm and greg rutkowski and alphonse mucha\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, art by artgerm and greg rutkowski and alphonse mucha\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 540+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2271,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.appalled"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-29T13:54:12Z","string":"2023-12-29T13:54:12Z"},"last_modified":{"kind":"string","value":"2023-12-29T13:54:15+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":2,"string":"2"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/appalled.../appalled_17_3.0.png\nwidget:\n- text: appalled\n output:\n url: images/appalled_17_3.0.png\n- text: appalled\n output:\n url: images/appalled_19_3.0.png\n- text: appalled\n output:\n url: images/appalled_20_3.0.png\n- text: appalled\n output:\n url: images/appalled_21_3.0.png\n- text: appalled\n output:\n url: images/appalled_22_3.0.png\ninference: false\ninstance_prompt: appalled\n---\n# ntcai.xyz slider - appalled (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\nappalled\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.appalled', weight_name='appalled.safetensors', adapter_name=\"appalled\")\n\n# Activate the LoRA\npipe.set_adapters([\"appalled\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, appalled\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 720+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2272,"cells":{"id":{"kind":"string","value":"ntc-ai/SDXL-LoRA-slider.gorgeous"},"author":{"kind":"string","value":"ntc-ai"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","stable-diffusion-xl","lora","template:sd-lora","template:sdxl-lora","sdxl-sliders","ntcai.xyz-sliders","concept","en","base_model:stabilityai/stable-diffusion-xl-base-1.0","base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0","license:mit","region:us"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"stable-diffusion-xl\",\n \"lora\",\n \"template:sd-lora\",\n \"template:sdxl-lora\",\n \"sdxl-sliders\",\n \"ntcai.xyz-sliders\",\n \"concept\",\n \"en\",\n \"base_model:stabilityai/stable-diffusion-xl-base-1.0\",\n \"base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0\",\n \"license:mit\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2023-12-30T07:55:43Z","string":"2023-12-30T07:55:43Z"},"last_modified":{"kind":"string","value":"2023-12-30T07:55:51+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: stabilityai/stable-diffusion-xl-base-1.0\nlanguage:\n- en\nlicense: mit\ntags:\n- text-to-image\n- stable-diffusion-xl\n- lora\n- template:sd-lora\n- template:sdxl-lora\n- sdxl-sliders\n- ntcai.xyz-sliders\n- concept\n- diffusers\nthumbnail: images/evaluate/gorgeous.../gorgeous_17_3.0.png\nwidget:\n- text: gorgeous\n output:\n url: images/gorgeous_17_3.0.png\n- text: gorgeous\n output:\n url: images/gorgeous_19_3.0.png\n- text: gorgeous\n output:\n url: images/gorgeous_20_3.0.png\n- text: gorgeous\n output:\n url: images/gorgeous_21_3.0.png\n- text: gorgeous\n output:\n url: images/gorgeous_22_3.0.png\ninference: false\ninstance_prompt: gorgeous\n---\n# ntcai.xyz slider - gorgeous (SDXL LoRA)\n\n| Strength: -3 | Strength: 0 | Strength: 3 |\n| --- | --- | --- |\n| | | |\n| | | |\n| | | |\n\n\n## Download\n\nWeights for this model are available in Safetensors format.\n\n## Trigger words\n\nYou can apply this LoRA with trigger words for additional effect:\n\n```\ngorgeous\n```\n\n## Use in diffusers\n\n```python\nfrom diffusers import StableDiffusionXLPipeline\nfrom diffusers import EulerAncestralDiscreteScheduler\nimport torch\n\npipe = StableDiffusionXLPipeline.from_single_file(\"https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors\")\npipe.to(\"cuda\")\npipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n\n# Load the LoRA\npipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.gorgeous', weight_name='gorgeous.safetensors', adapter_name=\"gorgeous\")\n\n# Activate the LoRA\npipe.set_adapters([\"gorgeous\"], adapter_weights=[2.0])\n\nprompt = \"medieval rich kingpin sitting in a tavern, gorgeous\"\nnegative_prompt = \"nsfw\"\nwidth = 512\nheight = 512\nnum_inference_steps = 10\nguidance_scale = 2\nimage = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0]\nimage.save('result.png')\n```\n\n## Support the Patreon\n\nIf you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI).\n\nBy joining our Patreon, you'll gain access to an ever-growing library of over 730+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities.\n\nYour support on Patreon will allow us to continue developing and refining new models.\n\n## Other resources\n\n- [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs\n- [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2273,"cells":{"id":{"kind":"string","value":"Dagobert42/distilbert-base-uncased-biored-augmented"},"author":{"kind":"string","value":"Dagobert42"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","safetensors","distilbert","token-classification","low-resource NER","token_classification","biomedicine","medical NER","generated_from_trainer","en","dataset:medicine","base_model:distilbert/distilbert-base-uncased","base_model:finetune:distilbert/distilbert-base-uncased","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"distilbert\",\n \"token-classification\",\n \"low-resource NER\",\n \"token_classification\",\n \"biomedicine\",\n \"medical NER\",\n \"generated_from_trainer\",\n \"en\",\n \"dataset:medicine\",\n \"base_model:distilbert/distilbert-base-uncased\",\n \"base_model:finetune:distilbert/distilbert-base-uncased\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-02-14T19:01:08Z","string":"2024-02-14T19:01:08Z"},"last_modified":{"kind":"string","value":"2024-02-22T11:27:55+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: distilbert-base-uncased\ndatasets:\n- medicine\nlanguage:\n- en\nlicense: mit\nmetrics:\n- accuracy\n- precision\n- recall\n- f1\ntags:\n- low-resource NER\n- token_classification\n- biomedicine\n- medical NER\n- generated_from_trainer\nmodel-index:\n- name: Dagobert42/distilbert-base-uncased-biored-augmented\n results: []\n---\n\n\n\n# Dagobert42/distilbert-base-uncased-biored-augmented\n\nThis model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the bigbio/biored dataset.\nIt achieves the following results on the evaluation set:\n- Loss: 0.5692\n- Accuracy: 0.7978\n- Precision: 0.5993\n- Recall: 0.5337\n- F1: 0.5536\n- Weighted F1: 0.7929\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 2e-05\n- train_batch_size: 8\n- eval_batch_size: 8\n- seed: 42\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 50\n\n### Training results\n\n| Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | Weighted F1 |\n|:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|:-----------:|\n| No log | 1.0 | 25 | 0.6037 | 0.7824 | 0.5931 | 0.4937 | 0.5272 | 0.7719 |\n| No log | 2.0 | 50 | 0.5858 | 0.7932 | 0.6023 | 0.5298 | 0.5511 | 0.7849 |\n| No log | 3.0 | 75 | 0.5887 | 0.795 | 0.5757 | 0.5283 | 0.544 | 0.7842 |\n| No log | 4.0 | 100 | 0.5890 | 0.7937 | 0.5911 | 0.5331 | 0.5466 | 0.7864 |\n\n\n### Framework versions\n\n- Transformers 4.35.2\n- Pytorch 2.0.1+cu117\n- Datasets 2.12.0\n- Tokenizers 0.15.0\n"},"matched_bigbio_names":{"kind":"list like","value":["BIORED"],"string":"[\n \"BIORED\"\n]"}}},{"rowIdx":2274,"cells":{"id":{"kind":"string","value":"HariLuru/finer_distillbert_v2"},"author":{"kind":"string","value":"HariLuru"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","safetensors","distilbert","token-classification","finance","dataset:nlpaueb/finer-139","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"distilbert\",\n \"token-classification\",\n \"finance\",\n \"dataset:nlpaueb/finer-139\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-03-07T12:56:25Z","string":"2024-03-07T12:56:25Z"},"last_modified":{"kind":"string","value":"2024-03-08T13:29:02+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\ndatasets:\n- nlpaueb/finer-139\nlibrary_name: transformers\nlicense: mit\npipeline_tag: token-classification\ntags:\n- finance\nwidget:\n- text: The loan bears interest at 9.75 % per annum with interest due monthly and\n is secured by a lien on certain of the Company ’ s and its subsidiaries ’ assets\n .\n example_title: Example1\n- text: Unused portions of the Credit Facilities bear interest at a rate equal to\n 0.25 % per annum .\n example_title: Example2\n---\n\n# Model Card for Model ID\n\nThis is a NER model built on distillBert for 5 classes of finer139 dataset\n\n\n## Model Details\n\n### Model Description\n\n\n\nThis is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated.\n\n- **Developed by:** Narahari BM\n- **Model type:** NER\n- **Finetuned from model [optional]:** DistillBert\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e2eb5c2b28b798a0249f66/GdH6LpK4Drkd6RT1uw5Do.png)\n\n\n\n[More Information Needed]\n\n## Confusion Matrix\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e2eb5c2b28b798a0249f66/J0fZEXv5gWKOgeBVKPv75.png)\n\n\n## Training Details\n\n### Training Data\n1. Subsampled train data and obtained the below distribution\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e2eb5c2b28b798a0249f66/tkCIZUIiEQTyO2bGpttTI.png)\n\n\n\n[More Information Needed]\n\n### Training Procedure \n\n\n\n#### Preprocessing [optional]\n\n[More Information Needed]\n\n\n#### Training Hyperparameters\n\n- **Training regime:** [More Information Needed] \n\n#### Speeds, Sizes, Times [optional]\n\n\n\n[More Information Needed]\n\n## Evaluation\n\n\n\n### Testing Data, Factors & Metrics\n\n#### Testing Data\n\n\n\n[More Information Needed]\n\n#### Factors\n\n\n\n[More Information Needed]\n\n#### Metrics\n\n\n\n[More Information Needed]\n\n### Results\n\n[More Information Needed]\n\n#### Summary"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2275,"cells":{"id":{"kind":"string","value":"johnsnowlabs/JSL-MedMX-7X"},"author":{"kind":"string","value":"johnsnowlabs"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","mistral","text-generation","medical","conversational","en","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"mistral\",\n \"text-generation\",\n \"medical\",\n \"conversational\",\n \"en\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-25T12:41:37Z","string":"2024-04-25T12:41:37Z"},"last_modified":{"kind":"string","value":"2024-05-03T00:29:46+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlibrary_name: transformers\nlicense: apache-2.0\ntags:\n- medical\n---\n\n# JSL-MedMX-7X\n\n\n[](http://www.johnsnowlabs.com)\n\n\nThis model is developed by [John Snow Labs](https://www.johnsnowlabs.com/).\nPerformance on biomedical benchmarks: [Open Medical LLM Leaderboard](https://huggingface.co/spaces/openlifescienceai/open_medical_llm_leaderboard).\n\nThis model is available under a [CC-BY-NC-ND](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.en) license and must also conform to this [Acceptable Use Policy](https://huggingface.co/johnsnowlabs). If you need to license this model for commercial use, please contact us at info@johnsnowlabs.com.\n\n## 💻 Usage\n\n```python\n!pip install -qU transformers accelerate\n\nfrom transformers import AutoTokenizer\nimport transformers\nimport torch\n\nmodel = \"johnsnowlabs/JSL-MedMX-7X\"\nmessages = [{\"role\": \"user\", \"content\": \"What is a large language model?\"}]\n\ntokenizer = AutoTokenizer.from_pretrained(model)\nprompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model,\n torch_dtype=torch.float16,\n device_map=\"auto\",\n)\n\noutputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95)\nprint(outputs[0][\"generated_text\"])\n```\n\n## 🏆 Evaluation\n\n| Tasks |Version|Filter|n-shot| Metric |Value | |Stderr|\n|-------------------------------|-------|------|-----:|--------|-----:|---|-----:|\n|stem |N/A |none | 0|acc_norm|0.5783|± |0.0067|\n| | |none | 0|acc |0.6177|± |0.0057|\n| - medmcqa |Yaml |none | 0|acc |0.5668|± |0.0077|\n| | |none | 0|acc_norm|0.5668|± |0.0077|\n| - medqa_4options |Yaml |none | 0|acc |0.6159|± |0.0136|\n| | |none | 0|acc_norm|0.6159|± |0.0136|\n| - anatomy (mmlu) | 0|none | 0|acc |0.7111|± |0.0392|\n| - clinical_knowledge (mmlu) | 0|none | 0|acc |0.7396|± |0.0270|\n| - college_biology (mmlu) | 0|none | 0|acc |0.7778|± |0.0348|\n| - college_medicine (mmlu) | 0|none | 0|acc |0.6647|± |0.0360|\n| - medical_genetics (mmlu) | 0|none | 0|acc |0.7200|± |0.0451|\n| - professional_medicine (mmlu)| 0|none | 0|acc |0.7868|± |0.0249|\n| - pubmedqa | 1|none | 0|acc |0.7840|± |0.0184|"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA","PUBMEDQA"],"string":"[\n \"MEDQA\",\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2276,"cells":{"id":{"kind":"string","value":"Alignment-Lab-AI/idfkphi4kiguess"},"author":{"kind":"string","value":"Alignment-Lab-AI"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","phi3","text-generation","nlp","code","conversational","custom_code","en","license:mit","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"phi3\",\n \"text-generation\",\n \"nlp\",\n \"code\",\n \"conversational\",\n \"custom_code\",\n \"en\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-26T05:23:44Z","string":"2024-04-26T05:23:44Z"},"last_modified":{"kind":"string","value":"2024-04-26T05:23:45+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlanguage:\n- en\nlicense: mit\nlicense_link: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/LICENSE\npipeline_tag: text-generation\ntags:\n- nlp\n- code\n---\n\n## Model Summary\n\nThe Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties.\nThe model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support.\n\nThe model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures.\nWhen assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3 Mini-4K-Instruct showcased a robust and state-of-the-art performance among models with less than 13 billion parameters.\n\nResources and Technical Documentation:\n\n+ [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april)\n+ [Phi-3 Technical Report](https://aka.ms/phi3-tech-report)\n+ [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai)\n+ Phi-3 GGUF: [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf)\n+ Phi-3 ONNX: [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx)\n\n## Intended Uses\n\n**Primary use cases**\n\nThe model is intended for commercial and research use in English. The model provides uses for applications which require:\n\n1) Memory/compute constrained environments\n2) Latency bound scenarios\n3) Strong reasoning (especially code, math and logic)\n\nOur model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. \n\n**Use case considerations**\n\nOur models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case.\n\nNothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. \n\n## How to Use\n\nPhi-3 Mini-4K-Instruct has been integrated in the development version (4.40.0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following:\n\n* When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function.\n\n* Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source.\n\nThe current `transformers` version can be verified with: `pip list | grep transformers`.\n\nPhi-3 Mini-4K-Instruct is also available in [HuggingChat](https://aka.ms/try-phi3-hf-chat).\n\n### Tokenizer\n\nPhi-3 Mini-4K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size.\n\n### Chat Format\n\nGiven the nature of the training data, the Phi-3 Mini-4K-Instruct model is best suited for prompts using the chat format as follows. \nYou can provide the prompt as a question with a generic template as follow:\n```markdown\n<|user|>\\nQuestion <|end|>\\n<|assistant|>\n```\nFor example:\n```markdown\n<|system|>\nYou are a helpful AI assistant.<|end|>\n<|user|>\nHow to explain Internet for a medieval knight?<|end|>\n<|assistant|>\n```\n\nwhere the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following:\n\n```markdown\n<|system|>\nYou are a helpful AI assistant.<|end|>\n<|user|>\nI am going to Paris, what should I see?<|end|>\n<|assistant|>\nParis, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\\n\\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\\n\\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world.\"<|end|>\n<|user|>\nWhat is so great about #1?<|end|>\n<|assistant|>\n```\n\n### Sample inference code\n\nThis code snippets show how to get quickly started with running the model on a GPU:\n\n```python\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer, pipeline\n\ntorch.random.manual_seed(0)\n\nmodel = AutoModelForCausalLM.from_pretrained(\n \"microsoft/Phi-3-mini-4k-instruct\", \n device_map=\"cuda\", \n torch_dtype=\"auto\", \n trust_remote_code=True, \n)\ntokenizer = AutoTokenizer.from_pretrained(\"microsoft/Phi-3-mini-4k-instruct\")\n\nmessages = [\n {\"role\": \"system\", \"content\": \"You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user.\"},\n {\"role\": \"user\", \"content\": \"Can you provide ways to eat combinations of bananas and dragonfruits?\"},\n {\"role\": \"assistant\", \"content\": \"Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey.\"},\n {\"role\": \"user\", \"content\": \"What about solving an 2x + 3 = 7 equation?\"},\n]\n\npipe = pipeline(\n \"text-generation\",\n model=model,\n tokenizer=tokenizer,\n)\n\ngeneration_args = {\n \"max_new_tokens\": 500,\n \"return_full_text\": False,\n \"temperature\": 0.0,\n \"do_sample\": False,\n}\n\noutput = pipe(messages, **generation_args)\nprint(output[0]['generated_text'])\n```\n\n## Responsible AI Considerations\n\nLike other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include:\n\n+ Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. \n+ Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. \n+ Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. \n+ Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. \n+ Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as \"typing, math, random, collections, datetime, itertools\". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. \n\nDevelopers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include:\n\n+ Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques.\n+ High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. \n+ Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). \n+ Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. \n+ Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations.\n\n\n## Training\n\n### Model\n\n* Architecture: Phi-3 Mini-4K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines.\n* Inputs: Text. It is best suited for prompts using chat format.\n* Context length: 4K tokens\n* GPUs: 512 H100-80G\n* Training time: 7 days\n* Training data: 3.3T tokens\n* Outputs: Generated text in response to the input\n* Dates: Our models were trained between February and April 2024\n* Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models.\n\n### Datasets\n\nOur training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of \n1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; \n2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); \n3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness.\n\n### Fine-tuning\n\nA basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/sample_finetune.py).\n\n## Benchmarks\n\nWe report the results for Phi-3-Mini-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5.\n\nAll the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation.\n\nAs is now standard, we use few-shot prompts to evaluate the models, at temperature 0. \nThe prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3.\nMore specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model.\n\nThe number of k–shot examples is listed per-benchmark. \n\n| | Phi-3-Mini-4K-In
3.8b | Phi-3-Small
7b (preview) | Phi-3-Medium
14b (preview) | Phi-2
2.7b | Mistral
7b | Gemma
7b | Llama-3-In
8b | Mixtral
8x7b | GPT-3.5
version 1106 |\n|---|---|---|---|---|---|---|---|---|---|\n| MMLU
5-Shot | 68.8 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 |\n| HellaSwag
5-Shot | 76.7 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 |\n| ANLI
7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 |\n| GSM-8K
0-Shot; CoT | 82.5 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 |\n| MedQA
2-Shot | 53.8 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 |\n| AGIEval
0-Shot | 37.5 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 |\n| TriviaQA
5-Shot | 64.0 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 |\n| Arc-C
10-Shot | 84.9 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 |\n| Arc-E
10-Shot | 94.6 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 |\n| PIQA
5-Shot | 84.2 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 |\n| SociQA
5-Shot | 76.6 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 |\n| BigBench-Hard
0-Shot | 71.7 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 |\n| WinoGrande
5-Shot | 70.8 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65 | 62.0 | 68.8 |\n| OpenBookQA
10-Shot | 83.2 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 |\n| BoolQ
0-Shot | 77.6 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 |\n| CommonSenseQA
10-Shot | 80.2 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 |\n| TruthfulQA
10-Shot | 65.0 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 |\n| HumanEval
0-Shot | 59.1 | 59.1 | 54.7 | 47.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 |\n| MBPP
3-Shot | 53.8 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 |\n\n## Software\n\n* [PyTorch](https://github.com/pytorch/pytorch)\n* [DeepSpeed](https://github.com/microsoft/DeepSpeed)\n* [Transformers](https://github.com/huggingface/transformers)\n* [Flash-Attention](https://github.com/HazyResearch/flash-attention)\n\n## Hardware\nNote that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types:\n* NVIDIA A100\n* NVIDIA A6000\n* NVIDIA H100\n\nIf you want to run the model on:\n* NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation=\"eager\"\n* CPU: use the **GGUF** quantized models [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf)\n+ Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx)\n\n\n## Cross Platform Support\n\nONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-4K-Instruct ONNX model [here](https://aka.ms/phi3-mini-4k-instruct-onnx).\n\nOptimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. \nAlong with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile.\n\nHere are some of the optimized configurations we have added: \n\n1. ONNX models for int4 DML: Quantized to int4 via AWQ\n2. ONNX model for fp16 CUDA\n3. ONNX model for int4 CUDA: Quantized to int4 via RTN\n4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN\n\n## License\n\nThe model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-4k/resolve/main/LICENSE).\n\n## Trademarks\n\nThis project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA"],"string":"[\n \"MEDQA\"\n]"}}},{"rowIdx":2277,"cells":{"id":{"kind":"string","value":"LoneStriker/OpenBioLLM-Llama3-8B-3.0bpw-h6-exl2"},"author":{"kind":"string","value":"LoneStriker"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","pytorch","llama","text-generation","llama-3","Mixtral","instruct","finetune","chatml","DPO","RLHF","gpt4","distillation","en","arxiv:2305.18290","arxiv:2303.13375","arxiv:2212.13138","arxiv:2305.09617","arxiv:2402.07023","base_model:meta-llama/Meta-Llama-3-8B","base_model:quantized:meta-llama/Meta-Llama-3-8B","license:llama3","autotrain_compatible","text-generation-inference","endpoints_compatible","3-bit","exl2","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"llama\",\n \"text-generation\",\n \"llama-3\",\n \"Mixtral\",\n \"instruct\",\n \"finetune\",\n \"chatml\",\n \"DPO\",\n \"RLHF\",\n \"gpt4\",\n \"distillation\",\n \"en\",\n \"arxiv:2305.18290\",\n \"arxiv:2303.13375\",\n \"arxiv:2212.13138\",\n \"arxiv:2305.09617\",\n \"arxiv:2402.07023\",\n \"base_model:meta-llama/Meta-Llama-3-8B\",\n \"base_model:quantized:meta-llama/Meta-Llama-3-8B\",\n \"license:llama3\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"3-bit\",\n \"exl2\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-04-26T19:39:48Z","string":"2024-04-26T19:39:48Z"},"last_modified":{"kind":"string","value":"2024-04-26T19:42:40+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: meta-llama/Meta-Llama-3-8B\nlanguage:\n- en\nlicense: llama3\ntags:\n- llama-3\n- llama\n- Mixtral\n- instruct\n- finetune\n- chatml\n- DPO\n- RLHF\n- gpt4\n- distillation\nwidget:\n- example_title: OpenBioLLM-8B\n messages:\n - role: system\n content: You are an expert and experienced from the healthcare and biomedical\n domain with extensive medical knowledge and practical experience.\n - role: user\n content: How long does it take for newborn jaundice to go away?\n output:\n text: 'Newborn jaundice, also known as neonatal jaundice, is a common condition\n in newborns where the yellowing of the skin and eyes occurs due to an elevated\n level of bilirubin in the blood. Bilirubin is a yellow pigment that forms when\n red blood cells break down. In most cases, newborn jaundice resolves on its\n own without any specific treatment.\n\n The duration of newborn jaundice can vary depending on several factors such\n as the underlying cause, gestational age at birth, and individual variations\n in bilirubin metabolism. Here are some general guidelines:\n\n 1. Physiological jaundice: This is the most common type of newborn jaundice\n and usually appears within 24-72 hours after birth. It tends to peak between\n the second and fifth day of life and gradually improves over the next week or\n two. By the time the baby is one week old, the jaundice should have mostly resolved.\n 2. Breast milk jaundice: This type of jaundice occurs in breastfed babies and\n may appear later than physiological jaundice, typically between the fifth and\n fourteenth day of life. It tends to persist for a longer duration but usually\n resolves within six weeks after birth. 3. Pathological jaundice: This type of\n jaundice is less common and occurs due to an underlying medical condition that\n affects bilirubin metabolism or liver function. The duration of pathological\n jaundice depends on the specific cause and may require treatment.\n\n It''s important for parents to monitor their newborn''s jaundice closely and\n seek medical advice if the jaundice progresses rapidly, becomes severe, or is\n accompanied by other symptoms such as poor feeding, lethargy, or excessive sleepiness.\n In these cases, further evaluation and management may be necessary. Remember\n that each baby is unique, and the timing of jaundice resolution can vary. If\n you have concerns about your newborn''s jaundice, it''s always best to consult\n with a healthcare professional for personalized advice and guidance.'\nmodel-index:\n- name: OpenBioLLM-8B\n results: []\n---\n\n\n
\n
\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/2FhDh8NDvMl7iSxbQz9BP.png)\n\n\n\n\n
\n \n

Advancing Open-source Large Language Models in Medical Domain

\n
\n\n

\n \n \"OpenChat\n Online Demo\n |\n \n \"GitHub\n GitHub\n |\n \n \"ArXiv\n Paper\n |\n \n \"Discord\n Discord\n \n

\n\n![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/KGmRE5w2sepNtwsEu8t7K.jpeg)\n\nIntroducing OpenBioLLM-8B: A State-of-the-Art Open Source Biomedical Large Language Model\n\n\nOpenBioLLM-8B is an advanced open source language model designed specifically for the biomedical domain. Developed by Saama AI Labs, this model leverages cutting-edge techniques to achieve state-of-the-art performance on a wide range of biomedical tasks.\n\n🏥 **Biomedical Specialization**: OpenBioLLM-8B is tailored for the unique language and knowledge requirements of the medical and life sciences fields. It was fine-tuned on a vast corpus of high-quality biomedical data, enabling it to understand and generate text with domain-specific accuracy and fluency.\n\n🎓 **Superior Performance**: With 8 billion parameters, OpenBioLLM-8B outperforms other open source biomedical language models of similar scale. It has also demonstrated better results compared to larger proprietary & open-source models like GPT-3.5 and Meditron-70B on biomedical benchmarks.\n\n🧠 **Advanced Training Techniques**: OpenBioLLM-8B builds upon the powerful foundations of the **Meta-Llama-3-8B** and [Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B) models. It incorporates the DPO dataset and fine-tuning recipe along with a custom diverse medical instruction dataset. Key components of the training pipeline include:\n\n
\n\n
\n\n- **Policy Optimization**: [Direct Preference Optimization: Your Language Model is Secretly a Reward Model (DPO)](https://arxiv.org/abs/2305.18290)\n- **Ranking Dataset**: [berkeley-nest/Nectar](https://huggingface.co/datasets/berkeley-nest/Nectar)\n- **Fine-tuning dataset**: Custom Medical Instruct dataset (We plan to release a sample training dataset in our upcoming paper; please stay updated)\n\nThis combination of cutting-edge techniques enables OpenBioLLM-8B to align with key capabilities and preferences for biomedical applications.\n\n⚙️ **Release Details**:\n\n- **Model Size**: 8 billion parameters\n- **Quantization**: Optimized quantized versions available [Here](https://huggingface.co/aaditya/OpenBioLLM-8B-GGUF)\n- **Language(s) (NLP):** en\n- **Developed By**: [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) from Saama AI Labs \n- **License:** Meta-Llama License \n- **Fine-tuned from models:** [meta-llama/Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B)\n- **Resources for more information:**\n - Paper: Coming soon\n\nThe model can be fine-tuned for more specialized tasks and datasets as needed.\n\nOpenBioLLM-8B represents an important step forward in democratizing advanced language AI for the biomedical community. By leveraging state-of-the-art architectures and training techniques from leading open source efforts like Llama-3, we have created a powerful tool to accelerate innovation and discovery in healthcare and the life sciences.\n\nWe are excited to share OpenBioLLM-8B with researchers and developers around the world.\n\n\n### Use with transformers\n\n**Important: Please use the exact chat template provided by Llama-3 instruct version. Otherwise there will be a degradation in the performance. The model output can be verbose in rare cases. Please consider setting temperature = 0 to make this happen less.**\n\nSee the snippet below for usage with Transformers:\n\n```python\nimport transformers\nimport torch\n\nmodel_id = \"aaditya/OpenBioLLM-Llama3-8B\"\n\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model_id,\n model_kwargs={\"torch_dtype\": torch.bfloat16},\n device=\"auto\",\n)\n\nmessages = [\n {\"role\": \"system\", \"content\": \"You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience.\"},\n {\"role\": \"user\", \"content\": \"How can i split a 3mg or 4mg waefin pill so i can get a 2.5mg pill?\"},\n]\n\nprompt = pipeline.tokenizer.apply_chat_template(\n\t\tmessages, \n\t\ttokenize=False, \n\t\tadd_generation_prompt=True\n)\n\nterminators = [\n pipeline.tokenizer.eos_token_id,\n pipeline.tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")\n]\n\noutputs = pipeline(\n prompt,\n max_new_tokens=256,\n eos_token_id=terminators,\n do_sample=True,\n temperature=0.0,\n top_p=0.9,\n)\nprint(outputs[0][\"generated_text\"][len(prompt):])\n```\n\n## **Training procedure**\n\n### **Training hyperparameters**\n\n
\n Click to see details\n\n- learning_rate: 0.0002\n- lr_scheduler: cosine\n- train_batch_size: 12\n- eval_batch_size: 8\n- GPU: H100 80GB SXM5\n- num_devices: 1\n- optimizer: adamw_bnb_8bit\n- lr_scheduler_warmup_steps: 100\n- num_epochs: 4\n
\n\n \n### **Peft hyperparameters**\n\n
\n Click to see details\n\n- adapter: qlora\n- lora_r: 128\n- lora_alpha: 256\n- lora_dropout: 0.05\n- lora_target_linear: true\n \n-lora_target_modules:\n - q_proj\n - v_proj\n - k_proj\n - o_proj\n - gate_proj\n - down_proj\n - up_proj\n
\n\n\n\n### **Training results**\n\n### **Framework versions**\n\n- Transformers 4.39.3\n- Pytorch 2.1.2+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.1\n- Axolotl\n- Lm harness for evaluation\n\n\n# Benchmark Results\n\n🔥 OpenBioLLM-8B demonstrates superior performance compared to larger models, such as GPT-3.5, Meditron-70B across 9 diverse biomedical datasets, achieving state-of-the-art results with an average score of 72.50%, despite having a significantly smaller parameter count. The model's strong performance in domain-specific tasks, such as Clinical KG, Medical Genetics, and PubMedQA, highlights its ability to effectively capture and apply biomedical knowledge.\n\n🚨 The GPT-4, Med-PaLM-1, and Med-PaLM-2 results are taken from their official papers. Since Med-PaLM doesn't provide zero-shot accuracy, we are using 5-shot accuracy from their paper for comparison. All results presented are in the zero-shot setting, except for Med-PaLM-2 and Med-PaLM-1, which use 5-shot accuracy.\n\n| | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA 4 opts | PubMedQA | MedMCQA | Avg |\n|--------------------|-------------|------------------|---------|--------------|-----------------|------------------|--------------|----------|---------|-------|\n| **OpenBioLLM-70B** | **92.93** | **93.197** | **83.904** | 93.75 | 93.827 | **85.749** | 78.162 | 78.97 | **74.014** | **86.05588** |\n| Med-PaLM-2 (5-shot) | 88.3 | 90 | 77.8 | **95.2** | 94.4 | 80.9 | **79.7** | **79.2** | 71.3 | 84.08 |\n| **GPT-4** | 86.04 | 91 | 80 | 93.01 | **95.14** | 76.88 | 78.87 | 75.2 | 69.52 | 82.85 |\n| Med-PaLM-1 (Flan-PaLM, 5-shot) | 80.4 | 75 | 63.7 | 83.8 | 88.9 | 76.3 | 67.6 | 79 | 57.6 | 74.7 |\n| **OpenBioLLM-8B** | 76.101 | 86.1 | 69.829 | 78.21 | 84.213 | 68.042 | 58.993 | 74.12 | 56.913 | 72.502 |\n| Gemini-1.0 | 76.7 | 75.8 | 66.7 | 77.7 | 88 | 69.2 | 58 | 70.7 | 54.3 | 70.79 |\n| GPT-3.5 Turbo 1106 | 74.71 | 74 | 72.79 | 72.79 | 72.91 | 64.73 | 57.71 | 72.66 | 53.79 | 66 |\n| Meditron-70B | 66.79 | 69 | 53.33 | 71.69 | 76.38 | 63 | 57.1 | 76.6 | 46.85 | 64.52 |\n| gemma-7b | 69.81 | 70 | 59.26 | 66.18 | 79.86 | 60.12 | 47.21 | 76.2 | 48.96 | 64.18 |\n| Mistral-7B-v0.1 | 68.68 | 71 | 55.56 | 68.38 | 68.06 | 59.54 | 50.82 | 75.4 | 48.2 | 62.85 |\n| Apollo-7B | 62.26 | 72 | 61.48 | 69.12 | 70.83 | 55.49 | 55.22 | 39.8 | 53.77 | 60 |\n| MedAlpaca-7b | 57.36 | 69 | 57.04 | 67.28 | 65.28 | 54.34 | 41.71 | 72.8 | 37.51 | 58.03 |\n| BioMistral-7B | 59.9 | 64 | 56.5 | 60.4 | 59 | 54.7 | 50.6 | 77.5 | 48.1 | 57.3 |\n| AlpaCare-llama2-7b | 49.81 | 49 | 45.92 | 33.82 | 50 | 43.35 | 29.77 | 72.2 | 34.42 | 45.36 |\n| ClinicalGPT | 30.56 | 27 | 30.37 | 19.48 | 25 | 24.27 | 26.08 | 63.8 | 28.18 | 30.52 |\n\n
\n\n
\n\n## Detailed Medical Subjectwise accuracy\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/UXF-V0col0Z0sS6BGPBkE.png)\n\n# Use Cases & Examples\n\n🚨 **Below results are from the quantized version of OpenBioLLM-70B**\n\n\n# Summarize Clinical Notes\n\nOpenBioLLM-70B can efficiently analyze and summarize complex clinical notes, EHR data, and discharge summaries, extracting key information and generating concise, structured summaries\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/xdwdBgOxNi_TfML0hKlI8.png)\n\n# Answer Medical Questions\n\nOpenBioLLM-70B can provide answers to a wide range of medical questions.\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/zO95GlwOQEZqCKQF69mE6.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/OKBczKw7gWeW5xsuDpc27.png)\n\n
\n Click to see details\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/eJGHT5khppYvJb8fQ-YW4.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Cnbwrqa_-ORHRuNRC2P6Y.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/J9DhdcvukAc9mnnW9fj2C.png)\n\n
\n\n# Clinical Entity Recognition\n\nOpenBioLLM-70B can perform advanced clinical entity recognition by identifying and extracting key medical concepts, such as diseases, symptoms, medications, procedures, and anatomical structures, from unstructured clinical text. By leveraging its deep understanding of medical terminology and context, the model can accurately annotate and categorize clinical entities, enabling more efficient information retrieval, data analysis, and knowledge discovery from electronic health records, research articles, and other biomedical text sources. This capability can support various downstream applications, such as clinical decision support, pharmacovigilance, and medical research.\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/_69BW4k9LVABFwtxixL45.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/DKy5wYCoPhoPPUc1-x8_J.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/7WD9zCCBZT4-4XlfnIQjl.png)\n\n# Biomarkers Extraction\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/ZttoM4AiteT7gFYVhjIpN.png)\n\n\n# Classification\n\nOpenBioLLM-70B can perform various biomedical classification tasks, such as disease prediction, sentiment analysis, medical document categorization\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Bf5MW1d75qT-1F_TR_hC0.png)\n\n# De-Identification\n\nOpenBioLLM-70B can detect and remove personally identifiable information (PII) from medical records, ensuring patient privacy and compliance with data protection regulations like HIPAA.\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/hKX4kzm--Tw5bj6K78msy.png)\n\n\n\n**Advisory Notice!** \n\nWhile OpenBioLLM-70B & 8B leverages high-quality data sources, its outputs may still contain inaccuracies, biases, or misalignments that could pose risks if relied upon for medical decision-making without further testing and refinement. The model's performance has not yet been rigorously evaluated in randomized controlled trials or real-world healthcare environments.\n\nTherefore, we strongly advise against using OpenBioLLM-70B & 8B for any direct patient care, clinical decision support, or other professional medical purposes at this time. Its use should be limited to research, development, and exploratory applications by qualified individuals who understand its limitations.\nOpenBioLLM-70B & 8B are intended solely as a research tool to assist healthcare professionals and should never be considered a replacement for the professional judgment and expertise of a qualified medical doctor.\n\nAppropriately adapting and validating OpenBioLLM-70B & 8B for specific medical use cases would require significant additional work, potentially including:\n\n- Thorough testing and evaluation in relevant clinical scenarios\n- Alignment with evidence-based guidelines and best practices\n- Mitigation of potential biases and failure modes\n- Integration with human oversight and interpretation\n- Compliance with regulatory and ethical standards\n\nAlways consult a qualified healthcare provider for personal medical needs.\n\n\n\n# Citation\n\nIf you find OpenBioLLM-70B & 8B useful in your work, please cite the model as follows:\n\n```\n@misc{OpenBioLLMs,\n author = {Ankit Pal, Malaikannan Sankarasubbu},\n title = {OpenBioLLMs: Advancing Open-Source Large Language Models for Healthcare and Life Sciences},\n year = {2024},\n publisher = {Hugging Face},\n journal = {Hugging Face repository},\n howpublished = {\\url{https://huggingface.co/aaditya/OpenBioLLM-Llama3-70B}}\n}\n```\n\nThe accompanying paper is currently in progress and will be released soon.\n\n
\n

💌 Contact

\n
\n\nWe look forward to hearing you and collaborating on this exciting project!\n\n**Contributors:**\n- [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) [aadityaura at gmail dot com]\n- Saama AI Labs\n- Note: I am looking for a funded PhD opportunity, especially if it fits my Responsible Generative AI, Multimodal LLMs, Geometric Deep Learning, and Healthcare AI skillset.\n\n\n# References\n\nWe thank the [Meta Team](meta-llama/Meta-Llama-3-70B-Instruct) for their amazing models!\n\n\nResult sources\n\n- [1] GPT-4 [Capabilities of GPT-4 on Medical Challenge Problems] (https://arxiv.org/abs/2303.13375) \n- [2] Med-PaLM-1 [Large Language Models Encode Clinical Knowledge](https://arxiv.org/abs/2212.13138)\n- [3] Med-PaLM-2 [Towards Expert-Level Medical Question Answering with Large Language Models](https://arxiv.org/abs/2305.09617)\n- [4] Gemini-1.0 [Gemini Goes to Med School](https://arxiv.org/abs/2402.07023)"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA","PUBMEDQA"],"string":"[\n \"MEDQA\",\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2278,"cells":{"id":{"kind":"string","value":"Klarly/multilingual-MT_FR-ES-IT-PT-RO_CAS-NLP"},"author":{"kind":"string","value":"Klarly"},"task_category":{"kind":"string","value":"text2text-generation"},"tags":{"kind":"list like","value":["transformers","tensorboard","safetensors","marian","text2text-generation","generated_from_trainer","base_model:Helsinki-NLP/opus-mt-en-roa","base_model:finetune:Helsinki-NLP/opus-mt-en-roa","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"tensorboard\",\n \"safetensors\",\n \"marian\",\n \"text2text-generation\",\n \"generated_from_trainer\",\n \"base_model:Helsinki-NLP/opus-mt-en-roa\",\n \"base_model:finetune:Helsinki-NLP/opus-mt-en-roa\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-05-23T16:45:00Z","string":"2024-05-23T16:45:00Z"},"last_modified":{"kind":"string","value":"2024-05-24T00:48:23+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: Helsinki-NLP/opus-mt-en-roa\nlicense: apache-2.0\ntags:\n- generated_from_trainer\nmodel-index:\n- name: multilingual-MT_FR-ES-IT-PT-RO_CAS-NLP\n results: []\n---\n\n\n\n# multilingual-MT_FR-ES-IT-PT-RO_CAS-NLP\n\nThis model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-roa](https://huggingface.co/Helsinki-NLP/opus-mt-en-roa) on the None dataset.\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.001\n- train_batch_size: 32\n- eval_batch_size: 32\n- seed: 42\n- gradient_accumulation_steps: 4\n- total_train_batch_size: 128\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: linear\n- num_epochs: 4\n- mixed_precision_training: Native AMP\n\n### Training results\n\n\n\n### Framework versions\n\n- Transformers 4.41.1\n- Pytorch 2.3.0+cu121\n- Datasets 2.19.1\n- Tokenizers 0.19.1\n"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2279,"cells":{"id":{"kind":"string","value":"sealad886/Llama3-OpenBioLLM-8B"},"author":{"kind":"string","value":"sealad886"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","llama-3","Mixtral","instruct","finetune","chatml","DPO","RLHF","gpt4","distillation","en","arxiv:2305.18290","arxiv:2303.13375","arxiv:2212.13138","arxiv:2305.09617","arxiv:2402.07023","base_model:meta-llama/Meta-Llama-3-8B","base_model:finetune:meta-llama/Meta-Llama-3-8B","license:llama3","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"llama-3\",\n \"Mixtral\",\n \"instruct\",\n \"finetune\",\n \"chatml\",\n \"DPO\",\n \"RLHF\",\n \"gpt4\",\n \"distillation\",\n \"en\",\n \"arxiv:2305.18290\",\n \"arxiv:2303.13375\",\n \"arxiv:2212.13138\",\n \"arxiv:2305.09617\",\n \"arxiv:2402.07023\",\n \"base_model:meta-llama/Meta-Llama-3-8B\",\n \"base_model:finetune:meta-llama/Meta-Llama-3-8B\",\n \"license:llama3\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-05T21:56:47Z","string":"2024-06-05T21:56:47Z"},"last_modified":{"kind":"string","value":"2024-06-20T19:19:07+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: meta-llama/Meta-Llama-3-8B\nlanguage:\n- en\nlicense: llama3\ntags:\n- llama-3\n- llama\n- Mixtral\n- instruct\n- finetune\n- chatml\n- DPO\n- RLHF\n- gpt4\n- distillation\nwidget:\n- example_title: OpenBioLLM-8B\n messages:\n - role: system\n content: You are an expert and experienced from the healthcare and biomedical\n domain with extensive medical knowledge and practical experience.\n - role: user\n content: How long does it take for newborn jaundice to go away?\n output:\n text: 'Newborn jaundice, also known as neonatal jaundice, is a common condition\n in newborns where the yellowing of the skin and eyes occurs due to an elevated\n level of bilirubin in the blood. Bilirubin is a yellow pigment that forms when\n red blood cells break down. In most cases, newborn jaundice resolves on its\n own without any specific treatment.\n\n The duration of newborn jaundice can vary depending on several factors such\n as the underlying cause, gestational age at birth, and individual variations\n in bilirubin metabolism. Here are some general guidelines:\n\n 1. Physiological jaundice: This is the most common type of newborn jaundice\n and usually appears within 24-72 hours after birth. It tends to peak between\n the second and fifth day of life and gradually improves over the next week or\n two. By the time the baby is one week old, the jaundice should have mostly resolved.\n 2. Breast milk jaundice: This type of jaundice occurs in breastfed babies and\n may appear later than physiological jaundice, typically between the fifth and\n fourteenth day of life. It tends to persist for a longer duration but usually\n resolves within six weeks after birth. 3. Pathological jaundice: This type of\n jaundice is less common and occurs due to an underlying medical condition that\n affects bilirubin metabolism or liver function. The duration of pathological\n jaundice depends on the specific cause and may require treatment.\n\n It''s important for parents to monitor their newborn''s jaundice closely and\n seek medical advice if the jaundice progresses rapidly, becomes severe, or is\n accompanied by other symptoms such as poor feeding, lethargy, or excessive sleepiness.\n In these cases, further evaluation and management may be necessary. Remember\n that each baby is unique, and the timing of jaundice resolution can vary. If\n you have concerns about your newborn''s jaundice, it''s always best to consult\n with a healthcare professional for personalized advice and guidance.'\nmodel-index:\n- name: OpenBioLLM-8B\n results: []\n---\n\n
\n

Copied pickled Pytorch files from aaditya/Llama3-OpenBioLLM-8B for conversion to Safetensors using https://huggingface.co/spaces/safetensors/convert.\n
Original Model Card:

\n
\n
\n
\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/2FhDh8NDvMl7iSxbQz9BP.png)\n\n\n\n\n
\n \n

Advancing Open-source Large Language Models in Medical Domain

\n
\n\n

\n \n \"OpenChat\n Online Demo\n |\n \n \"GitHub\n GitHub\n |\n \n \"ArXiv\n Paper\n |\n \n \"Discord\n Discord\n \n

\n\n![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/KGmRE5w2sepNtwsEu8t7K.jpeg)\n\nIntroducing OpenBioLLM-8B: A State-of-the-Art Open Source Biomedical Large Language Model\n\n\nOpenBioLLM-8B is an advanced open source language model designed specifically for the biomedical domain. Developed by Saama AI Labs, this model leverages cutting-edge techniques to achieve state-of-the-art performance on a wide range of biomedical tasks.\n\n🏥 **Biomedical Specialization**: OpenBioLLM-8B is tailored for the unique language and knowledge requirements of the medical and life sciences fields. It was fine-tuned on a vast corpus of high-quality biomedical data, enabling it to understand and generate text with domain-specific accuracy and fluency.\n\n🎓 **Superior Performance**: With 8 billion parameters, OpenBioLLM-8B outperforms other open source biomedical language models of similar scale. It has also demonstrated better results compared to larger proprietary & open-source models like GPT-3.5 and Meditron-70B on biomedical benchmarks.\n\n🧠 **Advanced Training Techniques**: OpenBioLLM-8B builds upon the powerful foundations of the **Meta-Llama-3-8B** and [Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B) models. It incorporates the DPO dataset and fine-tuning recipe along with a custom diverse medical instruction dataset. Key components of the training pipeline include:\n\n
\n\n
\n\n- **Policy Optimization**: [Direct Preference Optimization: Your Language Model is Secretly a Reward Model (DPO)](https://arxiv.org/abs/2305.18290)\n- **Ranking Dataset**: [berkeley-nest/Nectar](https://huggingface.co/datasets/berkeley-nest/Nectar)\n- **Fine-tuning dataset**: Custom Medical Instruct dataset (We plan to release a sample training dataset in our upcoming paper; please stay updated)\n\nThis combination of cutting-edge techniques enables OpenBioLLM-8B to align with key capabilities and preferences for biomedical applications.\n\n⚙️ **Release Details**:\n\n- **Model Size**: 8 billion parameters\n- **Quantization**: Optimized quantized versions available [Here](https://huggingface.co/aaditya/OpenBioLLM-Llama3-8B-GGUF)\n- **Language(s) (NLP):** en\n- **Developed By**: [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) from Saama AI Labs \n- **License:** Meta-Llama License \n- **Fine-tuned from models:** [meta-llama/Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B)\n- **Resources for more information:**\n - Paper: Coming soon\n\nThe model can be fine-tuned for more specialized tasks and datasets as needed.\n\nOpenBioLLM-8B represents an important step forward in democratizing advanced language AI for the biomedical community. By leveraging state-of-the-art architectures and training techniques from leading open source efforts like Llama-3, we have created a powerful tool to accelerate innovation and discovery in healthcare and the life sciences.\n\nWe are excited to share OpenBioLLM-8B with researchers and developers around the world.\n\n\n### Use with transformers\n\n**Important: Please use the exact chat template provided by Llama-3 instruct version. Otherwise there will be a degradation in the performance. The model output can be verbose in rare cases. Please consider setting temperature = 0 to make this happen less.**\n\nSee the snippet below for usage with Transformers:\n\n```python\nimport transformers\nimport torch\n\nmodel_id = \"aaditya/OpenBioLLM-Llama3-8B\"\n\npipeline = transformers.pipeline(\n \"text-generation\",\n model=model_id,\n model_kwargs={\"torch_dtype\": torch.bfloat16},\n device=\"auto\",\n)\n\nmessages = [\n {\"role\": \"system\", \"content\": \"You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience.\"},\n {\"role\": \"user\", \"content\": \"How can i split a 3mg or 4mg waefin pill so i can get a 2.5mg pill?\"},\n]\n\nprompt = pipeline.tokenizer.apply_chat_template(\n\t\tmessages, \n\t\ttokenize=False, \n\t\tadd_generation_prompt=True\n)\n\nterminators = [\n pipeline.tokenizer.eos_token_id,\n pipeline.tokenizer.convert_tokens_to_ids(\"<|eot_id|>\")\n]\n\noutputs = pipeline(\n prompt,\n max_new_tokens=256,\n eos_token_id=terminators,\n do_sample=True,\n temperature=0.0,\n top_p=0.9,\n)\nprint(outputs[0][\"generated_text\"][len(prompt):])\n```\n\n## **Training procedure**\n\n### **Training hyperparameters**\n\n
\n Click to see details\n\n- learning_rate: 0.0002\n- lr_scheduler: cosine\n- train_batch_size: 12\n- eval_batch_size: 8\n- GPU: H100 80GB SXM5\n- num_devices: 1\n- optimizer: adamw_bnb_8bit\n- lr_scheduler_warmup_steps: 100\n- num_epochs: 4\n
\n\n \n### **Peft hyperparameters**\n\n
\n Click to see details\n\n- adapter: qlora\n- lora_r: 128\n- lora_alpha: 256\n- lora_dropout: 0.05\n- lora_target_linear: true\n \n-lora_target_modules:\n - q_proj\n - v_proj\n - k_proj\n - o_proj\n - gate_proj\n - down_proj\n - up_proj\n
\n\n\n\n### **Training results**\n\n### **Framework versions**\n\n- Transformers 4.39.3\n- Pytorch 2.1.2+cu121\n- Datasets 2.18.0\n- Tokenizers 0.15.1\n- Axolotl\n- Lm harness for evaluation\n\n\n# Benchmark Results\n\n🔥 OpenBioLLM-8B demonstrates superior performance compared to larger models, such as GPT-3.5, Meditron-70B across 9 diverse biomedical datasets, achieving state-of-the-art results with an average score of 72.50%, despite having a significantly smaller parameter count. The model's strong performance in domain-specific tasks, such as Clinical KG, Medical Genetics, and PubMedQA, highlights its ability to effectively capture and apply biomedical knowledge.\n\n🚨 The GPT-4, Med-PaLM-1, and Med-PaLM-2 results are taken from their official papers. Since Med-PaLM doesn't provide zero-shot accuracy, we are using 5-shot accuracy from their paper for comparison. All results presented are in the zero-shot setting, except for Med-PaLM-2 and Med-PaLM-1, which use 5-shot accuracy.\n\n| | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA 4 opts | PubMedQA | MedMCQA | Avg |\n|--------------------|-------------|------------------|---------|--------------|-----------------|------------------|--------------|----------|---------|-------|\n| **OpenBioLLM-70B** | **92.93** | **93.197** | **83.904** | 93.75 | 93.827 | **85.749** | 78.162 | 78.97 | **74.014** | **86.05588** |\n| Med-PaLM-2 (5-shot) | 88.3 | 90 | 77.8 | **95.2** | 94.4 | 80.9 | **79.7** | **79.2** | 71.3 | 84.08 |\n| **GPT-4** | 86.04 | 91 | 80 | 93.01 | **95.14** | 76.88 | 78.87 | 75.2 | 69.52 | 82.85 |\n| Med-PaLM-1 (Flan-PaLM, 5-shot) | 80.4 | 75 | 63.7 | 83.8 | 88.9 | 76.3 | 67.6 | 79 | 57.6 | 74.7 |\n| **OpenBioLLM-8B** | 76.101 | 86.1 | 69.829 | 78.21 | 84.213 | 68.042 | 58.993 | 74.12 | 56.913 | 72.502 |\n| Gemini-1.0 | 76.7 | 75.8 | 66.7 | 77.7 | 88 | 69.2 | 58 | 70.7 | 54.3 | 70.79 |\n| GPT-3.5 Turbo 1106 | 74.71 | 74 | 72.79 | 72.79 | 72.91 | 64.73 | 57.71 | 72.66 | 53.79 | 66 |\n| Meditron-70B | 66.79 | 69 | 53.33 | 71.69 | 76.38 | 63 | 57.1 | 76.6 | 46.85 | 64.52 |\n| gemma-7b | 69.81 | 70 | 59.26 | 66.18 | 79.86 | 60.12 | 47.21 | 76.2 | 48.96 | 64.18 |\n| Mistral-7B-v0.1 | 68.68 | 71 | 55.56 | 68.38 | 68.06 | 59.54 | 50.82 | 75.4 | 48.2 | 62.85 |\n| Apollo-7B | 62.26 | 72 | 61.48 | 69.12 | 70.83 | 55.49 | 55.22 | 39.8 | 53.77 | 60 |\n| MedAlpaca-7b | 57.36 | 69 | 57.04 | 67.28 | 65.28 | 54.34 | 41.71 | 72.8 | 37.51 | 58.03 |\n| BioMistral-7B | 59.9 | 64 | 56.5 | 60.4 | 59 | 54.7 | 50.6 | 77.5 | 48.1 | 57.3 |\n| AlpaCare-llama2-7b | 49.81 | 49 | 45.92 | 33.82 | 50 | 43.35 | 29.77 | 72.2 | 34.42 | 45.36 |\n| ClinicalGPT | 30.56 | 27 | 30.37 | 19.48 | 25 | 24.27 | 26.08 | 63.8 | 28.18 | 30.52 |\n\n
\n\n
\n\n## Detailed Medical Subjectwise accuracy\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/UXF-V0col0Z0sS6BGPBkE.png)\n\n# Use Cases & Examples\n\n🚨 **Below results are from the quantized version of OpenBioLLM-70B**\n\n\n# Summarize Clinical Notes\n\nOpenBioLLM-70B can efficiently analyze and summarize complex clinical notes, EHR data, and discharge summaries, extracting key information and generating concise, structured summaries\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/xdwdBgOxNi_TfML0hKlI8.png)\n\n# Answer Medical Questions\n\nOpenBioLLM-70B can provide answers to a wide range of medical questions.\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/zO95GlwOQEZqCKQF69mE6.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/OKBczKw7gWeW5xsuDpc27.png)\n\n
\n Click to see details\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/eJGHT5khppYvJb8fQ-YW4.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Cnbwrqa_-ORHRuNRC2P6Y.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/J9DhdcvukAc9mnnW9fj2C.png)\n\n
\n\n# Clinical Entity Recognition\n\nOpenBioLLM-70B can perform advanced clinical entity recognition by identifying and extracting key medical concepts, such as diseases, symptoms, medications, procedures, and anatomical structures, from unstructured clinical text. By leveraging its deep understanding of medical terminology and context, the model can accurately annotate and categorize clinical entities, enabling more efficient information retrieval, data analysis, and knowledge discovery from electronic health records, research articles, and other biomedical text sources. This capability can support various downstream applications, such as clinical decision support, pharmacovigilance, and medical research.\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/_69BW4k9LVABFwtxixL45.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/DKy5wYCoPhoPPUc1-x8_J.png)\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/7WD9zCCBZT4-4XlfnIQjl.png)\n\n# Biomarkers Extraction\n\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/ZttoM4AiteT7gFYVhjIpN.png)\n\n\n# Classification\n\nOpenBioLLM-70B can perform various biomedical classification tasks, such as disease prediction, sentiment analysis, medical document categorization\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Bf5MW1d75qT-1F_TR_hC0.png)\n\n# De-Identification\n\nOpenBioLLM-70B can detect and remove personally identifiable information (PII) from medical records, ensuring patient privacy and compliance with data protection regulations like HIPAA.\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/hKX4kzm--Tw5bj6K78msy.png)\n\n\n\n**Advisory Notice!** \n\nWhile OpenBioLLM-70B & 8B leverages high-quality data sources, its outputs may still contain inaccuracies, biases, or misalignments that could pose risks if relied upon for medical decision-making without further testing and refinement. The model's performance has not yet been rigorously evaluated in randomized controlled trials or real-world healthcare environments.\n\nTherefore, we strongly advise against using OpenBioLLM-70B & 8B for any direct patient care, clinical decision support, or other professional medical purposes at this time. Its use should be limited to research, development, and exploratory applications by qualified individuals who understand its limitations.\nOpenBioLLM-70B & 8B are intended solely as a research tool to assist healthcare professionals and should never be considered a replacement for the professional judgment and expertise of a qualified medical doctor.\n\nAppropriately adapting and validating OpenBioLLM-70B & 8B for specific medical use cases would require significant additional work, potentially including:\n\n- Thorough testing and evaluation in relevant clinical scenarios\n- Alignment with evidence-based guidelines and best practices\n- Mitigation of potential biases and failure modes\n- Integration with human oversight and interpretation\n- Compliance with regulatory and ethical standards\n\nAlways consult a qualified healthcare provider for personal medical needs.\n\n\n\n# Citation\n\nIf you find OpenBioLLM-70B & 8B useful in your work, please cite the model as follows:\n\n```\n@misc{OpenBioLLMs,\n author = {Ankit Pal, Malaikannan Sankarasubbu},\n title = {OpenBioLLMs: Advancing Open-Source Large Language Models for Healthcare and Life Sciences},\n year = {2024},\n publisher = {Hugging Face},\n journal = {Hugging Face repository},\n howpublished = {\\url{https://huggingface.co/aaditya/OpenBioLLM-Llama3-70B}}\n}\n```\n\nThe accompanying paper is currently in progress and will be released soon.\n\n
\n

💌 Contact

\n
\n\nWe look forward to hearing you and collaborating on this exciting project!\n\n**Contributors:**\n- [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) [aadityaura at gmail dot com]\n- Saama AI Labs\n- Note: I am looking for a funded PhD opportunity, especially if it fits my Responsible Generative AI, Multimodal LLMs, Geometric Deep Learning, and Healthcare AI skillset.\n\n\n# References\n\nWe thank the [Meta Team](meta-llama/Meta-Llama-3-70B-Instruct) for their amazing models!\n\n\nResult sources\n\n- [1] GPT-4 [Capabilities of GPT-4 on Medical Challenge Problems] (https://arxiv.org/abs/2303.13375) \n- [2] Med-PaLM-1 [Large Language Models Encode Clinical Knowledge](https://arxiv.org/abs/2212.13138)\n- [3] Med-PaLM-2 [Towards Expert-Level Medical Question Answering with Large Language Models](https://arxiv.org/abs/2305.09617)\n- [4] Gemini-1.0 [Gemini Goes to Med School](https://arxiv.org/abs/2402.07023)"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA","PUBMEDQA"],"string":"[\n \"MEDQA\",\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2280,"cells":{"id":{"kind":"string","value":"BSC-NLP4BIA/bsc-bio-ehr-es-carmen-enfermedad"},"author":{"kind":"string","value":"BSC-NLP4BIA"},"task_category":{"kind":"string","value":"token-classification"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","token-classification","es","base_model:PlanTL-GOB-ES/bsc-bio-ehr-es","base_model:finetune:PlanTL-GOB-ES/bsc-bio-ehr-es","license:cc-by-4.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"token-classification\",\n \"es\",\n \"base_model:PlanTL-GOB-ES/bsc-bio-ehr-es\",\n \"base_model:finetune:PlanTL-GOB-ES/bsc-bio-ehr-es\",\n \"license:cc-by-4.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-06T14:42:12Z","string":"2024-06-06T14:42:12Z"},"last_modified":{"kind":"string","value":"2024-07-25T14:19:30+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: PlanTL-GOB-ES/bsc-bio-ehr-es\nlanguage:\n- es\nlicense: cc-by-4.0\n---\n\n# Training data\n\nModel trained on the disease mentions of [CARMEN-I](https://zenodo.org/records/10171540).\n\n# Citation\nPlease cite the following works:\n\n```\n@inproceedings{distemist,\n title={{Overview of DisTEMIST at BioASQ: Automatic detection and normalization of diseases from clinical texts: results, methods, evaluation and multilingual resources}},\n author={Miranda-Escalada, Antonio and Gascó, Luis and Lima-López, Salvador and Farré-Maduell, Eulàlia and Estrada, Darryl and Nentidis, Anastasios and Krithara, Anastasia and Katsimpras, Georgios and Paliouras, Georgios and Krallinger, Martin},\n booktitle={Working Notes of Conference and Labs of the Evaluation (CLEF) Forum. CEUR Workshop Proceedings},\n year={2022}\n}\n\n@misc{carmen_physionet, \n author = {Farre Maduell, Eulalia and Lima-Lopez, Salvador and Frid, Santiago Andres and Conesa, Artur and Asensio, Elisa and Lopez-Rueda, Antonio and Arino, Helena and Calvo, Elena and Bertran, Maria Jesús and Marcos, Maria Angeles and Nofre Maiz, Montserrat and Tañá Velasco, Laura and Marti, Antonia and Farreres, Ricardo and Pastor, Xavier and Borrat Frigola, Xavier and Krallinger, Martin}, \n title = {{CARMEN-I: A resource of anonymized electronic health records in Spanish and Catalan for training and testing NLP tools (version 1.0.1)}}, \n year = {2024}, \n publisher = {PhysioNet}, \n url = {https://doi.org/10.13026/x7ed-9r91} \n}\n\n@article{physionet,\n author = {Ary L. Goldberger and Luis A. N. Amaral and Leon Glass and Jeffrey M. Hausdorff and Plamen Ch. Ivanov and Roger G. Mark and Joseph E. Mietus and George B. Moody and Chung-Kang Peng and H. Eugene Stanley },\n title = {PhysioBank, PhysioToolkit, and PhysioNet },\n journal = {Circulation},\n volume = {101},\n number = {23},\n pages = {e215-e220},\n year = {2000},\n doi = {10.1161/01.CIR.101.23.e215},\n URL = {https://www.ahajournals.org/doi/abs/10.1161/01.CIR.101.23.e215}\n}\n\n```\n\n# Contacting authors\njan.rodriguez [at] bsc.es\n\n## More information on data, usage, limitations, and performance metrics soon"},"matched_bigbio_names":{"kind":"list like","value":["DISTEMIST"],"string":"[\n \"DISTEMIST\"\n]"}}},{"rowIdx":2281,"cells":{"id":{"kind":"string","value":"frankmorales2020/Meta-Llama-3-8B-MEDAL-flash-attention-2"},"author":{"kind":"string","value":"frankmorales2020"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["peft","tensorboard","safetensors","trl","sft","generated_from_trainer","dataset:generator","base_model:meta-llama/Meta-Llama-3-8B","base_model:adapter:meta-llama/Meta-Llama-3-8B","license:llama3","region:us"],"string":"[\n \"peft\",\n \"tensorboard\",\n \"safetensors\",\n \"trl\",\n \"sft\",\n \"generated_from_trainer\",\n \"dataset:generator\",\n \"base_model:meta-llama/Meta-Llama-3-8B\",\n \"base_model:adapter:meta-llama/Meta-Llama-3-8B\",\n \"license:llama3\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-16T21:24:53Z","string":"2024-06-16T21:24:53Z"},"last_modified":{"kind":"string","value":"2024-06-19T15:27:18+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: meta-llama/Meta-Llama-3-8B\ndatasets:\n- generator\nlibrary_name: peft\nlicense: llama3\ntags:\n- trl\n- sft\n- generated_from_trainer\nmodel-index:\n- name: Meta-Llama-3-8B-MEDAL-flash-attention-2\n results: []\n---\n\n\n\n# Meta-Llama-3-8B-MEDAL-flash-attention-2\n\nThis model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset.\n\n## Model description\n\nMore information needed\n\n## Intended uses & limitations\n\nMore information needed\n\n## Training and evaluation data\n\nMore information needed\n\n## Training procedure\n\n### Training hyperparameters\n\nThe following hyperparameters were used during training:\n- learning_rate: 0.0002\n- train_batch_size: 3\n- eval_batch_size: 8\n- seed: 42\n- gradient_accumulation_steps: 6\n- total_train_batch_size: 18\n- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08\n- lr_scheduler_type: constant\n- lr_scheduler_warmup_ratio: 0.03\n- num_epochs: 20\n\n### Training results\n\n\n\n### Framework versions\n\n- PEFT 0.11.1\n- Transformers 4.41.2\n- Pytorch 2.3.0+cu121\n- Datasets 2.20.0\n- Tokenizers 0.19.1"},"matched_bigbio_names":{"kind":"list like","value":["MEDAL"],"string":"[\n \"MEDAL\"\n]"}}},{"rowIdx":2282,"cells":{"id":{"kind":"string","value":"Casual-Autopsy/L3-Uncen-Merger-Omelette-RP-8B-EXPERIMENTAL"},"author":{"kind":"string","value":"Casual-Autopsy"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","mergekit","merge","not-for-all-audiences","nsfw","rp","roleplay","role-play","conversational","en","base_model:Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B","base_model:merge:Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B","base_model:Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B","base_model:merge:Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B","base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B","base_model:merge:Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B","base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B","base_model:merge:Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B","base_model:ChaoticNeutrals/Hathor_RP-v.01-L3-8B","base_model:merge:ChaoticNeutrals/Hathor_RP-v.01-L3-8B","base_model:ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B","base_model:merge:ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B","base_model:Sao10K/L3-8B-Stheno-v3.1","base_model:merge:Sao10K/L3-8B-Stheno-v3.1","base_model:aifeifei798/llama3-8B-DarkIdol-1.0","base_model:merge:aifeifei798/llama3-8B-DarkIdol-1.0","base_model:bluuwhale/L3-SthenoMaidBlackroot-8B-V1","base_model:merge:bluuwhale/L3-SthenoMaidBlackroot-8B-V1","base_model:cgato/L3-TheSpice-8b-v0.8.3","base_model:merge:cgato/L3-TheSpice-8b-v0.8.3","base_model:migtissera/Llama-3-8B-Synthia-v3.5","base_model:merge:migtissera/Llama-3-8B-Synthia-v3.5","base_model:tannedbum/L3-Nymeria-8B","base_model:merge:tannedbum/L3-Nymeria-8B","base_model:tannedbum/L3-Nymeria-Maid-8B","base_model:merge:tannedbum/L3-Nymeria-Maid-8B","autotrain_compatible","text-generation-inference","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"mergekit\",\n \"merge\",\n \"not-for-all-audiences\",\n \"nsfw\",\n \"rp\",\n \"roleplay\",\n \"role-play\",\n \"conversational\",\n \"en\",\n \"base_model:Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B\",\n \"base_model:merge:Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B\",\n \"base_model:Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B\",\n \"base_model:merge:Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B\",\n \"base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B\",\n \"base_model:merge:Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B\",\n \"base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B\",\n \"base_model:merge:Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B\",\n \"base_model:ChaoticNeutrals/Hathor_RP-v.01-L3-8B\",\n \"base_model:merge:ChaoticNeutrals/Hathor_RP-v.01-L3-8B\",\n \"base_model:ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B\",\n \"base_model:merge:ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B\",\n \"base_model:Sao10K/L3-8B-Stheno-v3.1\",\n \"base_model:merge:Sao10K/L3-8B-Stheno-v3.1\",\n \"base_model:aifeifei798/llama3-8B-DarkIdol-1.0\",\n \"base_model:merge:aifeifei798/llama3-8B-DarkIdol-1.0\",\n \"base_model:bluuwhale/L3-SthenoMaidBlackroot-8B-V1\",\n \"base_model:merge:bluuwhale/L3-SthenoMaidBlackroot-8B-V1\",\n \"base_model:cgato/L3-TheSpice-8b-v0.8.3\",\n \"base_model:merge:cgato/L3-TheSpice-8b-v0.8.3\",\n \"base_model:migtissera/Llama-3-8B-Synthia-v3.5\",\n \"base_model:merge:migtissera/Llama-3-8B-Synthia-v3.5\",\n \"base_model:tannedbum/L3-Nymeria-8B\",\n \"base_model:merge:tannedbum/L3-Nymeria-8B\",\n \"base_model:tannedbum/L3-Nymeria-Maid-8B\",\n \"base_model:merge:tannedbum/L3-Nymeria-Maid-8B\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-06-22T23:49:10Z","string":"2024-06-22T23:49:10Z"},"last_modified":{"kind":"string","value":"2024-06-23T02:03:19+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":3,"string":"3"},"README":{"kind":"string","value":"---\nbase_model:\n- Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B\n- bluuwhale/L3-SthenoMaidBlackroot-8B-V1\n- Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B\n- migtissera/Llama-3-8B-Synthia-v3.5\n- tannedbum/L3-Nymeria-Maid-8B\n- Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B\n- ChaoticNeutrals/Hathor_RP-v.01-L3-8B\n- tannedbum/L3-Nymeria-8B\n- Sao10K/L3-8B-Stheno-v3.1\n- cgato/L3-TheSpice-8b-v0.8.3\n- ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B\n- ChaoticNeutrals/Hathor_RP-v.01-L3-8B\n- aifeifei798/llama3-8B-DarkIdol-1.0\n- Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B\nlanguage:\n- en\nlibrary_name: transformers\npipeline_tag: text-generation\ntags:\n- mergekit\n- merge\n- not-for-all-audiences\n- nsfw\n- rp\n- roleplay\n- role-play\n---\n# L3-Uncen-Merger-Omelette-RP-EXPERIMENTAL\n\nThis is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).\n\n# Merge Details\n\nAn expiremental merger inspired by the merger recipe of [invisietch/EtherealRainbow-v0.3-8B](https://huggingface.co/invisietch/EtherealRainbow-v0.3-8B) combined with a merger technique known as merge densification( [grimjim/kunoichi-lemon-royale-v3-32K-7B](https://huggingface.co/grimjim/kunoichi-lemon-royale-v3-32K-7B) )\n\nThe model recipe ended up being something I can only describe as making an omelette. Hence the model name.\n\nThe models are scrambled with Dare Ties to induce a bit of randomness, then the Dare Ties merges are merged into themselves with SLERP to repair any holes cause by Dare Ties, and finally a bunch of high creativity models are thrown into the merger through merge densification(Task Arithmetic).\n\nThis model uses a bunch of the top models of the UGI Leaderboard, I picked out a few of the top 8B models of each category. Most of the high creativity models in the last step were found through Lewdiculus' account uploads\n\n## Merge Method\n\nDare Ties, SLERP, and Task Arithmetic\n\n## Models Merged\n\nThe following models were included in the merge:\n* [Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B](https://huggingface.co/Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B)\n* [bluuwhale/L3-SthenoMaidBlackroot-8B-V1](https://huggingface.co/bluuwhale/L3-SthenoMaidBlackroot-8B-V1)\n* [Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B](https://huggingface.co/Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B)\n* An unreleased Psycology merger of mine\n* [migtissera/Llama-3-8B-Synthia-v3.5](https://huggingface.co/migtissera/Llama-3-8B-Synthia-v3.5)\n* [tannedbum/L3-Nymeria-Maid-8B](https://huggingface.co/tannedbum/L3-Nymeria-Maid-8B)\n* [Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B](https://huggingface.co/Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B)\n* [ChaoticNeutrals/Hathor_RP-v.01-L3-8B](https://huggingface.co/ChaoticNeutrals/Hathor_RP-v.01-L3-8B)\n* [tannedbum/L3-Nymeria-8B](https://huggingface.co/tannedbum/L3-Nymeria-8B)\n* [Sao10K/L3-8B-Stheno-v3.1](https://huggingface.co/Sao10K/L3-8B-Stheno-v3.1)\n* [cgato/L3-TheSpice-8b-v0.8.3](https://huggingface.co/cgato/L3-TheSpice-8b-v0.8.3)\n* [ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B](https://huggingface.co/ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B)\n* [ChaoticNeutrals/Hathor_RP-v.01-L3-8B](https://huggingface.co/ChaoticNeutrals/Hathor_RP-v.01-L3-8B)\n* [aifeifei798/llama3-8B-DarkIdol-1.0](https://huggingface.co/aifeifei798/llama3-8B-DarkIdol-1.0)\n* [Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B](https://huggingface.co/Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B)\n\n# Secret Sauce\n\nThe following YAML configurations was used to produce this model:\n\n## Scrambled-Egg-1\n\n```yaml\nmodels:\n - model: Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B\n - model: Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B\n parameters:\n density: 0.45\n weight: 0.33\n - model: bluuwhale/L3-SthenoMaidBlackroot-8B-V1\n parameters:\n density: 0.75\n weight: 0.33\nmerge_method: dare_ties\nbase_model: Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B\nparameters:\n int8_mask: true\ndtype: bfloat16\n```\n\n## Scrambled-Egg-2\n\n```yaml\nmodels:\n - model: [An unreleased psychology merger of mine]\n - model: migtissera/Llama-3-8B-Synthia-v3.5\n parameters:\n density: 0.35\n weight: 0.25\n - model: tannedbum/L3-Nymeria-Maid-8B\n parameters:\n density: 0.65\n weight: 0.25\nmerge_method: dare_ties\nbase_model: [An unreleased psychology merger of mine]\nparameters:\n int8_mask: true\ndtype: bfloat16\n```\n\n## Scrambled-Egg-3\n\n```yaml\nmodels:\n - model: Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B\n - model: tannedbum/L3-Nymeria-8B\n parameters:\n density: 0.5\n weight: 0.35\n - model: ChaoticNeutrals/Hathor_RP-v.01-L3-8B\n parameters:\n density: 0.4\n weight: 0.2\nmerge_method: dare_ties\nbase_model: Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B\nparameters:\n int8_mask: true\ndtype: bfloat16\n```\n\n## Omelette-1\n\n```yaml\nmodels:\n - model: Casual-Autopsy/Scrambled-Egg-1\n - model: Casual-Autopsy/Scrambled-Egg-3\nmerge_method: slerp\nbase_model: Casual-Autopsy/Scrambled-Egg-1\nparameters:\n t:\n - value: [0.1, 0.15, 0.2, 0.4, 0.6, 0.4, 0.2, 0.15, 0.1]\n embed_slerp: true\ndtype: bfloat16\n\n```\n\n## Omelette-2\n\n```yaml\nmodels:\n - model: Casual-Autopsy/Omelette-1\n - model: Casual-Autopsy/Scrambled-Egg-2\nmerge_method: slerp\nbase_model: Casual-Autopsy/Omelette-1\nparameters:\n t:\n - value: [0.7, 0.5, 0.3, 0.25, 0.2, 0.25, 0.3, 0.5, 0.7]\n embed_slerp: true\ndtype: bfloat16\n\n```\n\n## L3-Uncen-Merger-Omelette-8B-EXPERIMENTAL\n\n```yaml\nmodels:\n - model: Casual-Autopsy/Omelette-2\n - model: Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B\n parameters:\n weight: 0.07\n - model: ChaoticNeutrals/Hathor_RP-v.01-L3-8B\n parameters:\n weight: 0.01\n - model: Sao10K/L3-8B-Stheno-v3.1\n parameters:\n weight: 0.015\n - model: aifeifei798/llama3-8B-DarkIdol-1.0\n parameters:\n weight: 0.015\n - model: cgato/L3-TheSpice-8b-v0.8.3\n parameters:\n weight: 0.02\n - model: ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B\n parameters:\n weight: 0.02\nmerge_method: task_arithmetic\nbase_model: Casual-Autopsy/Omelette-2\ndtype: bfloat16\n```"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2283,"cells":{"id":{"kind":"string","value":"RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","endpoints_compatible","region:us"],"string":"[\n \"gguf\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-20T09:34:41Z","string":"2024-07-20T09:34:41Z"},"last_modified":{"kind":"string","value":"2024-07-20T09:42:52+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nTinyMistral-248M-Instruct - GGUF\n- Model creator: https://huggingface.co/Locutusque/\n- Original model: https://huggingface.co/Locutusque/TinyMistral-248M-Instruct/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [TinyMistral-248M-Instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q2_K.gguf) | Q2_K | 0.1GB |\n| [TinyMistral-248M-Instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ3_XS.gguf) | IQ3_XS | 0.11GB |\n| [TinyMistral-248M-Instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ3_S.gguf) | IQ3_S | 0.11GB |\n| [TinyMistral-248M-Instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K_S.gguf) | Q3_K_S | 0.11GB |\n| [TinyMistral-248M-Instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ3_M.gguf) | IQ3_M | 0.11GB |\n| [TinyMistral-248M-Instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K.gguf) | Q3_K | 0.12GB |\n| [TinyMistral-248M-Instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K_M.gguf) | Q3_K_M | 0.12GB |\n| [TinyMistral-248M-Instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K_L.gguf) | Q3_K_L | 0.13GB |\n| [TinyMistral-248M-Instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ4_XS.gguf) | IQ4_XS | 0.13GB |\n| [TinyMistral-248M-Instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_0.gguf) | Q4_0 | 0.14GB |\n| [TinyMistral-248M-Instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ4_NL.gguf) | IQ4_NL | 0.14GB |\n| [TinyMistral-248M-Instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_K_S.gguf) | Q4_K_S | 0.14GB |\n| [TinyMistral-248M-Instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_K.gguf) | Q4_K | 0.14GB |\n| [TinyMistral-248M-Instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_K_M.gguf) | Q4_K_M | 0.14GB |\n| [TinyMistral-248M-Instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_1.gguf) | Q4_1 | 0.15GB |\n| [TinyMistral-248M-Instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_0.gguf) | Q5_0 | 0.16GB |\n| [TinyMistral-248M-Instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_K_S.gguf) | Q5_K_S | 0.16GB |\n| [TinyMistral-248M-Instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_K.gguf) | Q5_K | 0.17GB |\n| [TinyMistral-248M-Instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_K_M.gguf) | Q5_K_M | 0.17GB |\n| [TinyMistral-248M-Instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_1.gguf) | Q5_1 | 0.18GB |\n| [TinyMistral-248M-Instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q6_K.gguf) | Q6_K | 0.19GB |\n| [TinyMistral-248M-Instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q8_0.gguf) | Q8_0 | 0.25GB |\n\n\n\n\nOriginal model description:\n---\npipeline_tag: text-generation\nbase_model: Locutusque/TinyMistral-248M\nlicense: apache-2.0\ndatasets:\n- Locutusque/InstructMixCleaned\n- berkeley-nest/Nectar\nlanguage:\n- en\nwidget:\n - text: >-\n <|USER|> Design a Neo4j database and Cypher function snippet to Display\n Extreme Dental hygiene: Using Mouthwash for Analysis for Beginners.\n Implement if/else or switch/case statements to handle different conditions\n related to the Consent. Provide detailed comments explaining your control\n flow and the reasoning behind each decision. <|ASSISTANT|> \n - text: >-\n <|USER|> Write me a story about a magical place. <|ASSISTANT|> \n - text: >-\n <|USER|> Write me an essay about the life of George Washington <|ASSISTANT|> \n - text: >-\n <|USER|> Solve the following equation 2x + 10 = 20 <|ASSISTANT|> \n - text: >-\n <|USER|> Craft me a list of some nice places to visit around the world. <|ASSISTANT|> \n - text: >-\n <|USER|> How to manage a lazy employee: Address the employee verbally. Don't allow an employee's laziness or lack of enthusiasm to become a recurring issue. Tell the employee you're hoping to speak with them about workplace expectations and performance, and schedule a time to sit down together. Question: To manage a lazy employee, it is suggested to talk to the employee. True, False, or Neither? <|ASSISTANT|> \ninference:\n parameters:\n temperature: 0.5\n do_sample: True\n top_p: 0.5\n top_k: 30\n max_new_tokens: 250\n repetition_penalty: 1.15\n---\nBase model Locutusque/TinyMistral-248M fully fine-tuned on Locutusque/InstructMix. During validation, this model achieved an average perplexity of 3.23 on Locutusque/InstructMix dataset.\nIt has so far been trained on approximately 608,000 examples. More epochs are planned for this model.\n\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2284,"cells":{"id":{"kind":"string","value":"chrisob94/llama3-3-8b-dissertation-doc-chat-experiment"},"author":{"kind":"string","value":"chrisob94"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","text-generation-inference","unsloth","trl","conversational","en","dataset:llamafactory/PubMedQA","license:apache-2.0","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"text-generation-inference\",\n \"unsloth\",\n \"trl\",\n \"conversational\",\n \"en\",\n \"dataset:llamafactory/PubMedQA\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-07-20T12:31:47Z","string":"2024-07-20T12:31:47Z"},"last_modified":{"kind":"string","value":"2024-07-21T20:23:35+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: unsloth/llama-3-8b-instruct-bnb-4bit\ndatasets:\n- llamafactory/PubMedQA\nlanguage:\n- en\nlicense: apache-2.0\npipeline_tag: text-generation\ntags:\n- text-generation-inference\n- transformers\n- unsloth\n- llama\n- trl\n---\n\n# Dissertation project on impact of interpretability on LLM's vs Quantizations\n\nTrained on dataset from \"llamafactory/PubMedQA\" on huggingface\n\n# Uploaded model\n\n- **Developed by:** chrisob94\n- **License:** apache-2.0\n- **Finetuned from model :** unsloth/llama-3-8b-instruct-bnb-4bit\n\nThis llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library.\n\n[](https://github.com/unslothai/unsloth)"},"matched_bigbio_names":{"kind":"list like","value":["PUBMEDQA"],"string":"[\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2285,"cells":{"id":{"kind":"string","value":"RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","endpoints_compatible","region:us"],"string":"[\n \"gguf\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-02T07:20:17Z","string":"2024-08-02T07:20:17Z"},"last_modified":{"kind":"string","value":"2024-08-02T16:40:48+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nllama2-13b-dpo-v3 - GGUF\n- Model creator: https://huggingface.co/mncai/\n- Original model: https://huggingface.co/mncai/llama2-13b-dpo-v3/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [llama2-13b-dpo-v3.Q2_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q2_K.gguf) | Q2_K | 4.6GB |\n| [llama2-13b-dpo-v3.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ3_XS.gguf) | IQ3_XS | 5.08GB |\n| [llama2-13b-dpo-v3.IQ3_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ3_S.gguf) | IQ3_S | 5.36GB |\n| [llama2-13b-dpo-v3.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K_S.gguf) | Q3_K_S | 5.36GB |\n| [llama2-13b-dpo-v3.IQ3_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ3_M.gguf) | IQ3_M | 5.66GB |\n| [llama2-13b-dpo-v3.Q3_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K.gguf) | Q3_K | 5.99GB |\n| [llama2-13b-dpo-v3.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K_M.gguf) | Q3_K_M | 5.99GB |\n| [llama2-13b-dpo-v3.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K_L.gguf) | Q3_K_L | 6.54GB |\n| [llama2-13b-dpo-v3.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ4_XS.gguf) | IQ4_XS | 6.63GB |\n| [llama2-13b-dpo-v3.Q4_0.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_0.gguf) | Q4_0 | 6.95GB |\n| [llama2-13b-dpo-v3.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ4_NL.gguf) | IQ4_NL | 7.0GB |\n| [llama2-13b-dpo-v3.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_K_S.gguf) | Q4_K_S | 7.01GB |\n| [llama2-13b-dpo-v3.Q4_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_K.gguf) | Q4_K | 7.42GB |\n| [llama2-13b-dpo-v3.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_K_M.gguf) | Q4_K_M | 7.42GB |\n| [llama2-13b-dpo-v3.Q4_1.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_1.gguf) | Q4_1 | 7.71GB |\n| [llama2-13b-dpo-v3.Q5_0.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_0.gguf) | Q5_0 | 8.46GB |\n| [llama2-13b-dpo-v3.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_K_S.gguf) | Q5_K_S | 8.46GB |\n| [llama2-13b-dpo-v3.Q5_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_K.gguf) | Q5_K | 8.7GB |\n| [llama2-13b-dpo-v3.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_K_M.gguf) | Q5_K_M | 8.7GB |\n| [llama2-13b-dpo-v3.Q5_1.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_1.gguf) | Q5_1 | 9.21GB |\n| [llama2-13b-dpo-v3.Q6_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q6_K.gguf) | Q6_K | 10.06GB |\n| [llama2-13b-dpo-v3.Q8_0.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q8_0.gguf) | Q8_0 | 13.03GB |\n\n\n\n\nOriginal model description:\n---\nlicense: cc-by-nc-sa-4.0\nlanguage:\n- en\n- ko\n---\n# Model Card for llama2-dpo-v3\n\n### Introduction of MindsAndCompany\n\nhttps://mnc.ai/\n\nWe develop a diverse range of AI models and craft solutions tailored for business applications. In the realm of generative AI, our product development includes the Code Assistant, the TOD Chatbot, and LLMOps. We are also actively working on the development of Enterprise AGI (Artificial General Intelligence).\n\n### Model Summary\nbased beomi/llama-2-koen-13b, instruction tuned and dpo.\n\n\n### How to Use\nHere give some examples of how to use our model.\n\n```python\nfrom transformers import AutoConfig, AutoModel, AutoTokenizer\nimport transformers\nimport torch\nhf_model = 'mncai/llama2-13b-dpo-v3' \nmessage = \"<|user|>\\n두 개의 구가 있는데 각각 지름이 1, 2일때 구의 부피는 몇배 차이가 나지? 설명도 같이 해줘.\\n<|assistant|>\\n\"\n\nsequences = pipeline(\n message,\n do_sample=True,\n top_k=10,\n num_return_sequences=1,\n eos_token_id=tokenizer.eos_token_id,\n max_length=2048,\n)\nfor seq in sequences:\n print(f\"Result: {seq['generated_text']}\")\n```\n\n### LICENSE\nCreative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License, under LLAMA 2 COMMUNITY LICENSE AGREEMENT\n\n### Contact\nIf you have any questions, please raise an issue or contact us at dwmyoung@mnc.ai\n\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2286,"cells":{"id":{"kind":"string","value":"RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-04T12:19:54Z","string":"2024-08-04T12:19:54Z"},"last_modified":{"kind":"string","value":"2024-08-04T16:34:55+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nAtlantis-v0.1-12B - GGUF\n- Model creator: https://huggingface.co/invisietch/\n- Original model: https://huggingface.co/invisietch/Atlantis-v0.1-12B/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [Atlantis-v0.1-12B.Q2_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q2_K.gguf) | Q2_K | 4.46GB |\n| [Atlantis-v0.1-12B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ3_XS.gguf) | IQ3_XS | 4.94GB |\n| [Atlantis-v0.1-12B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ3_S.gguf) | IQ3_S | 5.18GB |\n| [Atlantis-v0.1-12B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K_S.gguf) | Q3_K_S | 5.15GB |\n| [Atlantis-v0.1-12B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ3_M.gguf) | IQ3_M | 5.33GB |\n| [Atlantis-v0.1-12B.Q3_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K.gguf) | Q3_K | 5.67GB |\n| [Atlantis-v0.1-12B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K_M.gguf) | Q3_K_M | 5.67GB |\n| [Atlantis-v0.1-12B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K_L.gguf) | Q3_K_L | 6.11GB |\n| [Atlantis-v0.1-12B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ4_XS.gguf) | IQ4_XS | 6.33GB |\n| [Atlantis-v0.1-12B.Q4_0.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_0.gguf) | Q4_0 | 6.59GB |\n| [Atlantis-v0.1-12B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ4_NL.gguf) | IQ4_NL | 6.65GB |\n| [Atlantis-v0.1-12B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_K_S.gguf) | Q4_K_S | 6.63GB |\n| [Atlantis-v0.1-12B.Q4_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_K.gguf) | Q4_K | 6.96GB |\n| [Atlantis-v0.1-12B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_K_M.gguf) | Q4_K_M | 6.96GB |\n| [Atlantis-v0.1-12B.Q4_1.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_1.gguf) | Q4_1 | 7.26GB |\n| [Atlantis-v0.1-12B.Q5_0.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_0.gguf) | Q5_0 | 7.93GB |\n| [Atlantis-v0.1-12B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_K_S.gguf) | Q5_K_S | 7.93GB |\n| [Atlantis-v0.1-12B.Q5_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_K.gguf) | Q5_K | 8.13GB |\n| [Atlantis-v0.1-12B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_K_M.gguf) | Q5_K_M | 8.13GB |\n| [Atlantis-v0.1-12B.Q5_1.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_1.gguf) | Q5_1 | 8.61GB |\n| [Atlantis-v0.1-12B.Q6_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q6_K.gguf) | Q6_K | 9.37GB |\n| [Atlantis-v0.1-12B.Q8_0.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q8_0.gguf) | Q8_0 | 12.13GB |\n\n\n\n\nOriginal model description:\n---\nlibrary_name: transformers\ntags:\n- axolotl\n- qlora\n- not-for-all-audiences\nlicense: apache-2.0\nlanguage:\n- en\n---\n
\n Atlantis-v0.1-12B\n\n \n
\n\n# Model Details\n\nAtlantis 12B is a finetune of [Mistral-Nemo-2407-Instruct](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) in an attempt to make it a more RP/storytelling\nfriendly model.\n\nThe model should support up to 128K context, though realistically in an RP setting I'd expect it to lose coherence above 24K.\n\nThis is a very early-stage and brand new finetune, so I expect it to have issues. Please read through the 'Usage' section before reporting any issues as Nemo has some\nspecific requirements regarding backends, prompting formats &amp; sampler settings.\n\n# Feedback\n\nI appreciate all feedback on any of my model merges, you can use:\n\n* [The Community tab](https://huggingface.co/invisietch/Atlantis-v0.1-12B/discussions) - requires HF login.\n* [SillyTavern Discord thread](https://discord.com/channels/1100685673633153084/1266471239397019780) - Must be on SillyTavern Discord.\n\nYour feedback is how I improve these models for future versions.\n\n# Formats\n\n* [FP16 Safetensors](https://huggingface.co/invisietch/Atlantis-v0.1-12B)\n* [Static GGUF](https://huggingface.co/invisietch/Atlantis-v0.1-12B-GGUF)\n* [4BPW EXL2](https://huggingface.co/Statuo/Atlantis-v0.1-EXL2-4bpw) (thanks to [Statuo](https://huggingface.co/Statuo))\n* [6BPW EXL2](https://huggingface.co/Hydrafig/Atlantis-v0.1-12B-6BPW-EXL2) (thanks to [Hydrafig](https://huggingface.co/Hydrafig))\n* [8BPW EXL2](https://huggingface.co/Statuo/Atlantis-v0.1-EXL2-8bpw) (thanks to [Statuo](https://huggingface.co/Statuo))\n\n# Disclaimer\n\nThis model is built on top of Mistral's Nemo-2407-Instruct model and released under the Apache 2.0 license.\n\nThis model is fairly unaligned and can generate explicit, disturbing or offensive responses.\n\nUse responsibly. I am not responsible for your use of this model.\n\n# Usage\n\n## Inferencing\n\nIf you are running a GGUF quantization, you need to make sure that you are running:\n\n- llama.cpp b3436 or newer\n- koboldcpp 1.71 or newer\n- oobabooga 1.11 or newer\n\nIf the GGUF refuses to load, it is probably because your software is out of date.\n\n## Prompt Format\n\nI would suggest using the Mistral prompt format:\n\n```\n[INST]Please write me a song.[/INST]\n```\n\nOther prompt formats may or may not work, but I've had most success with this.\n\n## Recommended Sampler Settings\n\n**Note the very low temperature, this is the same as Mistral's recommendation for the base model.**\n\nI'd recommend starting with:\n\n- **Temp**: 0.3-0.5\n- **Top-P**: 1\n- **Min-P**: 0\n- **Rep Pen**: 1.13-1.16\n- **Response Length**: 350+\n\nThe model does prefer a longer response length.\n\n# Example Storywriting\n\nThese prompts are used on SillyTavern with a fairly basic narrator card. I have trimmed the start and finish where the narrator decided to add\nchapter headings, commentary and the like. All samples are made with the Q8_0 GGUF loaded with koboldcpp, with response length capped at 4096 tokens.\nContinue was pressed a maximum of once if the model cut off halfway through a chapter.\n\n
\nWrite me a 3,000 word opening chapter of a 'gritty hard sci-fi' novel, drawing inspiration from the writing styles of Isaac Asimov & Andy Weir. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a 26 year old astronaut called Tone on a mission to Europa, who has just realised that the craft for the return journey is broken beyond repair, and he only has supplies for a few months. Given that survival is impossible, he seeks to spend the few months he has researching Europa, so his life & mission are not wasted.\nTone floated in the zero-gravity environment of the spacecraft, his heart pounding in his chest like a metronome counting down the seconds of his life. His mind raced, trying to process the implications of the news he had just received. The craft for the return journey was broken beyond repair, and he only had supplies for a few months. Survival was impossible.\n\"Fuck,\" he muttered under his breath, his voice echoing in the silence of the spacecraft. \"This can't be happening.\"\nHe had been on this mission to Europa for what felt like an eternity. The icy moon of Jupiter was a harsh and unforgiving place, but it held secrets that could change the course of human history. Or so he had thought. Now, all he could think about was how he would meet his end in the cold emptiness of space.\nHis thoughts were interrupted by the crackle of static over the comms system. It was Mission Control, back on Earth.\n\"Tone, we've been informed of the situation,\" said the voice of Dr. Amelia Hart, the lead scientist overseeing the mission. Her tone was somber, but there was also a hint of determination in her voice. \"We're working on a solution, but for now, you need to focus on conserving resources and gathering data. That's what you trained for, isn't it?\"\nTone took a deep breath, trying to calm himself. She was right. This wasn't the time to panic. He needed to stay focused and make every moment count. He had spent years preparing for this mission, and he wouldn't let it all go to waste because of a technical malfunction.\n\"I understand, Doc,\" he replied, his voice steady despite the turmoil inside him. \"I'll start prepping the rover and setting up the research equipment. I'll send you regular updates on my progress.\"\nAs he ended the transmission, Tone couldn't help but feel a pang of loneliness. He was millions of miles away from home, with nothing but the frozen expanse of Europa stretching out before him. But he knew he couldn't give in to despair. Not yet.\nHe began to prepare for the first expedition onto the surface. The rover was equipped with state-of-the-art instruments designed to analyze the composition of the ice and search for signs of life beneath the surface. As he worked, he found solace in the familiar routine, the hum of machinery drowning out the silent scream of his own mortality.\nBut as he stepped out onto the surface of Europa, the reality of his situation hit him like a punch to the gut. The air was thin and bitterly cold, and the landscape was desolate and barren. There was no sign of life anywhere, just endless stretches of ice and rock. He felt suddenly, acutely aware of his own insignificance in the face of such vast, indifferent emptiness.\nStill, he pressed on, driven by a stubborn refusal to accept defeat. He collected samples of ice and rock, noting down observations and measurements with meticulous care. Every piece of data he gathered was a testament to the fact that he had been here, that he had made a mark on this alien world.\nAs the days turned into weeks, Tone fell into a grim sort of rhythm. He woke up each morning to the same view of the frozen wilderness outside his window, ate the same bland food, and went through the motions of his experiments with single-minded focus. He barely slept, haunted by dreams of suffocation and isolation.\nAnd then, one day, something changed. He was out on the surface, collecting samples near a particularly interesting formation of ice, when he noticed something strange. A faint, rhythmic thumping sound, almost imperceptible against the backdrop of the wind whistling across the ice. He listened closely, straining his ears to pinpoint the source.\nIt came again - a soft, regular beat, like a heartbeat. Tone's pulse quickened as he realized what he was hearing. It was coming from within the ice itself.\nWith trembling fingers, he set up the drill, carefully boring into the ice until he reached the source of the sound. And there, nestled in the depths of the frozen moon, he discovered something that would change everything.\nA dark shape, roughly spherical, suspended in the clear blue liquid of the subsurface ocean. It pulsed with a faint glow, casting eerie shadows on the surrounding ice. Tone stared at it in awe and disbelief, his mind racing with questions and possibilities.\n\"What the hell is that?\" he whispered to himself, leaning closer to get a better look.\nSuddenly, the sphere seemed to shimmer and shift, growing larger and more distinct. It wasn't a mineral deposit, or some odd geological feature - it was alive. Somewhere, deep beneath the surface of Europa, something was living, breathing, pulsing with life.\nTone's heart pounded in his chest as he realized the significance of his discovery. If this creature existed, it meant that Europa might harbor other forms of life as well. Life that could potentially be studied and understood, providing valuable insights into the origins of life in our own solar system.\nHe quickly radioed back to Mission Control, relaying his findings in excited detail. The response was immediate and overwhelming. Dr. Hart asked him to collect more data, to document every aspect of the creature's behavior and appearance. They wanted to know everything they could about this remarkable discovery.\nTone worked tirelessly, driven by a newfound sense of purpose. He observed the creature from a safe distance, documenting its movements and interactions with the environment around it. Over time, he began to notice patterns in its behavior, subtle rhythms and cycles that suggested a level of intelligence and awareness far beyond anything he had expected to find.\nAs he continued his studies, Tone couldn't shake the feeling that he was being watched. The creature seemed to be aware of his presence, its gaze following him as he moved across the ice. It was unsettling, but also strangely exhilarating. He felt a connection to this alien being, a shared existence in the void of space that transcended the boundaries of species and planet.\nOne evening, after a long day of observation, Tone returned to his habitat to find a message waiting for him from Dr. Hart. She praised his work and thanked him for his dedication, but she also warned him of the risks involved in studying such an unknown organism. She reminded him to be cautious and to prioritize his own safety above all else.\nTone nodded solemnly, taking her words to heart. He knew the stakes were high, both for himself and for humanity as a whole. This discovery had the potential to rewrite the very foundations of astrobiology, but it also carried immense risk.\nAs he prepared for another day of study, Tone couldn't help but wonder what lay ahead. Would he uncover further evidence of intelligent life on Europa? What would happen if he were to make contact with this mysterious creature? And most importantly, would he ever see Earth again?\n
\n
\nWrite me a 3,000 word opening chapter of a 'high fantasy' novel, drawing inspiration from the writing styles of J R R Tolkien & George R R Martin. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a 19 year old female elf bard who is looking for adventure.\nIn the resplendent realm of Elderglen, where the ancient trees whispered secrets to the wind, a young elf named Elowen prepared herself for the grand adventure she had long awaited. Her heart fluttered with anticipation as she packed her belongings into a worn leather satchel, the sound of her lute echoing through the quiet chamber like a soft lullaby bidding farewell to her childhood home.\nElowen was an elven maiden of nineteen summers, her lithe form adorned with the grace and beauty of her kind. Her silken silver hair cascaded down her back like a waterfall, framing her ethereal face with its almond-shaped emerald eyes and a smattering of freckles across her nose and cheeks. She was a bard, a teller of tales and singer of songs, her voice as enchanting as the magical melodies she composed.\nAs she secured the last string on her instrument, Elowen's fingers danced over the strings, playing a tune that echoed the emotions swirling within her. It spoke of the joy she felt at leaving behind the safety of her family's estate, the excitement of discovering new lands, and the trepidation of venturing into the unknown alone.\n\"Father,\" she called out softly, \"I am ready.\"\nHer father, Thalion Swiftshadow, entered the room, his stern expression belied by the warmth in his gaze. \"Are you certain, my child?\" he asked, his voice barely above a whisper. \"Once you step beyond these walls, there is no turning back.\"\nElowen nodded resolutely, her chin held high. \"Yes, Father. I must seek out my own path, find my own destiny.\"\nThalion sighed, a mixture of pride and worry evident in his eyes. \"Very well. Remember what I told you: stay true to yourself, keep your wits about you, and trust only those who earn your trust.\"\nWith a final embrace, Elowen stepped away from her past and towards her future. She walked through the grand halls of her ancestral home, her footsteps echoing off the stone floors. As she reached the heavy wooden doors, she paused, taking one last look at the life she knew before stepping into the vast expanse of possibility that lay ahead.\nOutside, the sun cast a golden glow upon the rolling hills and verdant forests surrounding Elderglen. Elowen took a deep breath, filling her lungs with the crisp morning air, and began her journey, her heart pounding with the rhythm of the road ahead.\nAs she walked, she hummed a tune, her mind wandering to the countless stories she had heard growing up—of heroes and villains, magic and mystery, love and loss. Each note carried with it the promise of adventure, the hope of discovery, and the thrill of the unknown.\nDays turned into weeks, and Elowen found herself in the bustling city of Minas Eldar, a place teeming with life and opportunity. Here, she met other travelers, some seeking fame and fortune, others searching for lost loved ones or pursuing dreams as grand as her own.\nAmong them was a rough-hewn dwarf named Grit Longbeard, whose laughter boomed as loudly as his hammer did against the forge. He offered to join forces, sharing stories of battlefields and tavern brawls, his gruff exterior hiding a heart of gold.\nThen there was Lyra Shadowheart, an enigmatic sorceress who moved with a fluid grace, her eyes flickering with an inner fire. She intrigued Elowen, her power both captivating and terrifying. Despite their differences, they formed an unlikely alliance, bound together by the shared desire for adventure.\nTogether, they embarked on quests, facing challenges that tested their skills and courage. Through it all, Elowen's music kept their spirits high, her lyrics weaving tales of their triumphs and trials.\nOne day, while exploring the ruins of a long-forgotten temple, they stumbled upon a hidden chamber guarded by a riddle-speaking sphinx. Its challenge echoed through the empty halls:\n\"What walks on many feet but has no legs?\"\nGrit grumbled, \"How am I supposed to know? I ain't got time for riddles!\"\nLyra smirked, \"You're right, Grit. This is more Elowen's domain.\" She glanced at the bard, who stood thoughtfully, her brow furrowed in concentration.\nElowen closed her eyes, letting the words wash over her. Then, a smile spread across her lips as the answer came to her. \"A snake,\" she said confidently. \"It slithers along, moving with many feet yet having none.\"\nThe sphinx's stone features seemed to soften, and the chamber door creaked open, revealing a treasure trove of ancient artifacts and coins, glinting in the dim light.\nAs they delved deeper into the riches, Elowen couldn't help but feel a surge of pride. They had faced the unknown together, and now, they were reaping the rewards of their courage and wit.\nBut little did they know, their greatest adventures—and darkest perils—were still yet to come.\nAnd so, under the watchful gaze of the moon, Elowen, Grit, and Lyra continued their journey, their hearts filled with wonder and their minds brimming with possibilities.\n
\n
\nWrite me a 3,000 word opening chapter of a 'weird fiction' novel, drawing inspiration from the writing styles of China Mieville and Neil Gaiman. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a male in his 20s called Horton who has just come to the city looking for work.\nHorton stepped off the bus, his auburn hair ruffled by the wind that swept through the streets of the sprawling metropolis. His deep grey eyes scanned the towering buildings, each one more peculiar than the last. He had heard whispers of the strange occurrences here, but nothing could have prepared him for the sheer oddness of it all.\n\"Welcome to the City of Whispers,\" said a voice behind him. Horton turned to see an elderly woman with long white hair, her eyes glinting with mischief. She wore a cloak adorned with symbols that seemed to shift in the light. \"You're new here, aren't you?\"\n\"Yes, ma'am,\" Horton replied, his voice steady despite the unease churning within him. \"I'm looking for work.\"\nShe chuckled softly, a sound like rustling leaves. \"Work, eh? Well, there's plenty of that to go around. But first, let me give you a little tour.\" Her hand gestured towards the street, and she began to walk briskly, expecting him to follow.\nAs they ventured deeper into the heart of the city, Horton noticed more and more unusual sights. A group of people with wings sprouting from their backs soared overhead, their laughter echoing down to the cobblestone streets below. Further ahead, a creature resembling a giant spider crossed paths with them, its eight legs moving gracefully across the pavement.\n\"This place…it's extraordinary,\" Horton murmured, his mind racing with questions.\n\"It certainly is,\" the old woman agreed. \"But remember, dear boy, appearances can be deceiving. Not everything is as it seems in the City of Whispers.\"\nThey arrived at a large building, its facade covered in intricate carvings depicting scenes of both beauty and horror. \"This is the Library of Shadows,\" she explained. \"It holds knowledge beyond your wildest dreams. And if you seek work, you'll find none better suited for someone like yourself.\"\nHorton raised an eyebrow. \"Someone like myself?\"\nA sly grin spread across her face. \"Oh, yes. You've got potential, young man. Now, go inside and ask for the Librarian. Tell him the White Witch sent you.\"\nWith those cryptic words, she disappeared into the crowd, leaving Horton alone before the imposing entrance. Taking a deep breath, he pushed open the heavy doors and stepped inside.\nThe air was thick with dust and the scent of ancient books. The walls were lined floor-to-ceiling with shelves filled with tomes of every shape and size. In the center stood a massive desk made of dark wood, behind which sat a figure cloaked in shadows.\n\"Are you the Librarian?\" Horton asked, his voice barely audible above the sound of pages turning.\nThere was a pause before the figure spoke, its voice resonating through the chamber. \"Indeed, I am. What brings you to my domain?\"\n\"I've been told you might need help,\" Horton said, trying to keep his composure.\nThe Librarian leaned forward slightly, revealing a pair of glowing red eyes beneath the hood. \"Help, yes. But first, tell me why you came to the City of Whispers.\"\nHorton hesitated, considering how much to reveal. Then again, what did he really know about this place? Perhaps honesty would serve him best. \"I'm seeking adventure. Knowledge. Something different from the life I knew back home.\"\nThe Librarian nodded slowly, as if digesting this information. \"Very well. There may indeed be a position suitable for you here. However, it won't be easy. This library contains secrets that could change the course of history - or destroy it entirely. Are you sure you want to be part of such a dangerous enterprise?\"\nHorton thought of the strangeness outside these walls, the promise of something greater waiting for him. Despite the uncertainty, he felt a spark of excitement ignite within him. \"Yes,\" he answered firmly. \"I'm ready for whatever comes next.\"\nThe Librarian smiled, a wide, predatory grin that hinted at hidden power. \"Excellent. Welcome to the Library of Shadows, Horton. Your journey begins now.\"\nAnd so, with no idea what lay ahead, Horton took up residence among the dusty tomes and whispered secrets, eager to unravel the mysteries of the City of Whispers. Little did he know, his arrival would set off a chain reaction that would shake the very foundations of reality itself.\n
\n\n\n[\"Built](https://github.com/axolotl-ai-cloud/axolotl)\n\n# Training\n\nTraining was on a group of public &amp; private datasets, using 2x A100 80GB GPUs.\n\nIn no particular order, I'd like to thank these people for their work compiling the datasets I used:\n\n- [SicariusSicariiStuff](https://huggingface.co/SicariusSicariiStuff/)\n- [PJMixers](https://huggingface.co/PJMixers/)\n- [mrfakename](https://huggingface.co/mrfakename/)\n- [lodrick-the-lafted](https://huggingface.co/lodrick-the-lafted/)\n\nI also used a number of internal datasets compiled by myself on public & private data.\n\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2287,"cells":{"id":{"kind":"string","value":"RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","arxiv:2407.08488","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"arxiv:2407.08488\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-05T22:01:23Z","string":"2024-08-05T22:01:23Z"},"last_modified":{"kind":"string","value":"2024-08-06T00:08:22+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nLlama-3-Patronus-Lynx-8B-Instruct - GGUF\n- Model creator: https://huggingface.co/PatronusAI/\n- Original model: https://huggingface.co/PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q2_K.gguf) | Q2_K | 2.96GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ3_XS.gguf) | IQ3_XS | 3.28GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ3_S.gguf) | IQ3_S | 3.43GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_S.gguf) | Q3_K_S | 3.41GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ3_M.gguf) | IQ3_M | 3.52GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K.gguf) | Q3_K | 3.74GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_M.gguf) | Q3_K_M | 3.74GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_L.gguf) | Q3_K_L | 4.03GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ4_XS.gguf) | IQ4_XS | 4.18GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_0.gguf) | Q4_0 | 4.34GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ4_NL.gguf) | IQ4_NL | 4.38GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_S.gguf) | Q4_K_S | 4.37GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_K.gguf) | Q4_K | 4.58GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_M.gguf) | Q4_K_M | 4.58GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_1.gguf) | Q4_1 | 4.78GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_0.gguf) | Q5_0 | 5.21GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_S.gguf) | Q5_K_S | 5.21GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_K.gguf) | Q5_K | 5.34GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_M.gguf) | Q5_K_M | 5.34GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_1.gguf) | Q5_1 | 5.65GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q6_K.gguf) | Q6_K | 6.14GB |\n| [Llama-3-Patronus-Lynx-8B-Instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q8_0.gguf) | Q8_0 | 2.67GB |\n\n\n\n\nOriginal model description:\n---\nlibrary_name: transformers\ntags:\n- text-generation\n- pytorch\n- Lynx\n- Patronus AI\n- evaluation\n- hallucination-detection\nlicense: cc-by-nc-4.0\nlanguage:\n- en\n---\n\n# Model Card for Model ID\n\nLynx is an open-source hallucination evaluation model. Patronus-Lynx-8B-Instruct was trained on a mix of datasets including CovidQA, PubmedQA, DROP, RAGTruth.\nThe datasets contain a mix of hand-annotated and synthetic data. The maximum sequence length is 8000 tokens. \n\n\n## Model Details\n\n- **Model Type:** Patronus-Lynx-8B-Instruct is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct model.\n- **Language:** Primarily English\n- **Developed by:** Patronus AI\n- **Paper:** [https://arxiv.org/abs/2407.08488](https://arxiv.org/abs/2407.08488)\n- **License:** [https://creativecommons.org/licenses/by-nc/4.0/](https://creativecommons.org/licenses/by-nc/4.0/)\n\n### Model Sources\n\n\n\n- **Repository:** [https://github.com/patronus-ai/Lynx-hallucination-detection](https://github.com/patronus-ai/Lynx-hallucination-detection)\n\n\n## How to Get Started with the Model\nLynx is trained to detect hallucinations in RAG settings. Provided a document, question and answer, the model can evaluate whether the answer is faithful to the document.\n\nTo use the model, we recommend using the following prompt:\n\n```\nPROMPT = \"\"\"\nGiven the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: \"PASS\" if the answer is faithful to the DOCUMENT and \"FAIL\" if the answer is not faithful to the DOCUMENT. Show your reasoning.\n\n--\nQUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION):\n{question}\n\n--\nDOCUMENT:\n{context}\n\n--\nANSWER:\n{answer}\n\n--\n\nYour output should be in JSON FORMAT with the keys \"REASONING\" and \"SCORE\":\n{{\"REASONING\": , \"SCORE\": }}\n\"\"\"\n```\n\nThe model will output the score as 'PASS' if the answer is faithful to the document or FAIL if the answer is not faithful to the document. \n\n## Inference\n\nTo run inference, you can use HF pipeline:\n\n```\n\nmodel_name = 'PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct'\npipe = pipeline(\n \"text-generation\",\n model=model_name,\n max_new_tokens=600,\n device=\"cuda\",\n return_full_text=False\n )\n\nmessages = [\n {\"role\": \"user\", \"content\": prompt},\n]\n\nresult = pipe(messages)\nprint(result[0]['generated_text'])\n\n```\n\nSince the model is trained in chat format, ensure that you pass the prompt as a user message.\n\nFor more information on training details, refer to our [ArXiv paper](https://arxiv.org/abs/2407.08488).\n\n## Evaluation\n\nThe model was evaluated on [PatronusAI/HaluBench](https://huggingface.co/datasets/PatronusAI/HaluBench).\n\n\n| Model | HaluEval | RAGTruth | FinanceBench | DROP | CovidQA | PubmedQA | Overall\n| :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: |\n| GPT-4o | 87.9% | 84.3% | **85.3%** | 84.3% | 95.0% | 82.1% | 86.5% |\n| GPT-4-Turbo | 86.0% | **85.0%** | 82.2% | 84.8% | 90.6% | 83.5% | 85.0% |\n| GPT-3.5-Turbo | 62.2% | 50.7% | 60.9% | 57.2% | 56.7% | 62.8% | 58.7% |\n| Claude-3-Sonnet | 84.5% | 79.1% | 69.7% | 84.3% | 95.0% | 82.9% | 78.8% |\n| Claude-3-Haiku | 68.9% | 78.9% | 58.4% | 84.3% | 95.0% | 82.9% | 69.0% |\n| RAGAS Faithfulness | 70.6% | 75.8% | 59.5% | 59.6% | 75.0% | 67.7% | 66.9% |\n| Mistral-Instruct-7B | 78.3% | 77.7% | 56.3% | 56.3% | 71.7% | 77.9% | 69.4% |\n| Llama-3-Instruct-8B | 83.1% | 80.0% | 55.0% | 58.2% | 75.2% | 70.7% | 70.4% |\n| Llama-3-Instruct-70B | 87.0% | 83.8% | 72.7% | 69.4% | 85.0% | 82.6% | 80.1% |\n| LYNX (8B) | 85.7% | 80.0% | 72.5% | 77.8% | 96.3% | 85.2% | 82.9% |\n| LYNX (70B) | **88.4%** | 80.2% | 81.4% | **86.4%** | **97.5%** | **90.4%** | **87.4%** |\n\n\n## Citation\nIf you are using the model, cite using\n\n```\n@article{ravi2024lynx,\n title={Lynx: An Open Source Hallucination Evaluation Model},\n author={Ravi, Selvan Sunitha and Mielczarek, Bartosz and Kannappan, Anand and Kiela, Douwe and Qian, Rebecca},\n journal={arXiv preprint arXiv:2407.08488},\n year={2024}\n}\n```\n\n## Model Card Contact\n[@sunitha-ravi](https://huggingface.co/sunitha-ravi)\n[@RebeccaQian1](https://huggingface.co/RebeccaQian1)\n[@presidev](https://huggingface.co/presidev)\n\n"},"matched_bigbio_names":{"kind":"list like","value":["PUBMEDQA"],"string":"[\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2288,"cells":{"id":{"kind":"string","value":"YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF"},"author":{"kind":"string","value":"YorkieOH10"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","gguf","feature-extraction","sentence-similarity","mteb","transformers","transformers.js","llama-cpp","gguf-my-repo","en","base_model:nomic-ai/nomic-embed-text-v1.5","base_model:quantized:nomic-ai/nomic-embed-text-v1.5","license:apache-2.0","model-index","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"gguf\",\n \"feature-extraction\",\n \"sentence-similarity\",\n \"mteb\",\n \"transformers\",\n \"transformers.js\",\n \"llama-cpp\",\n \"gguf-my-repo\",\n \"en\",\n \"base_model:nomic-ai/nomic-embed-text-v1.5\",\n \"base_model:quantized:nomic-ai/nomic-embed-text-v1.5\",\n \"license:apache-2.0\",\n \"model-index\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-06T08:23:22Z","string":"2024-08-06T08:23:22Z"},"last_modified":{"kind":"string","value":"2024-08-06T08:23:24+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: nomic-ai/nomic-embed-text-v1.5\nlanguage:\n- en\nlibrary_name: sentence-transformers\nlicense: apache-2.0\npipeline_tag: sentence-similarity\ntags:\n- feature-extraction\n- sentence-similarity\n- mteb\n- transformers\n- transformers.js\n- llama-cpp\n- gguf-my-repo\nmodel-index:\n- name: epoch_0_model\n results:\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonCounterfactualClassification (en)\n type: mteb/amazon_counterfactual\n config: en\n split: test\n revision: e8379541af4e31359cca9fbcf4b00f2671dba205\n metrics:\n - type: accuracy\n value: 75.20895522388058\n - type: ap\n value: 38.57605549557802\n - type: f1\n value: 69.35586565857854\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonPolarityClassification\n type: mteb/amazon_polarity\n config: default\n split: test\n revision: e2d317d38cd51312af73b3d32a06d1a08b442046\n metrics:\n - type: accuracy\n value: 91.8144\n - type: ap\n value: 88.65222882032363\n - type: f1\n value: 91.80426301643274\n - task:\n type: Classification\n dataset:\n name: MTEB AmazonReviewsClassification (en)\n type: mteb/amazon_reviews_multi\n config: en\n split: test\n revision: 1399c76144fd37290681b995c656ef9b2e06e26d\n metrics:\n - type: accuracy\n value: 47.162000000000006\n - type: f1\n value: 46.59329642263158\n - task:\n type: Retrieval\n dataset:\n name: MTEB ArguAna\n type: arguana\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 24.253\n - type: map_at_10\n value: 38.962\n - type: map_at_100\n value: 40.081\n - type: map_at_1000\n value: 40.089000000000006\n - type: map_at_3\n value: 33.499\n - type: map_at_5\n value: 36.351\n - type: mrr_at_1\n value: 24.609\n - type: mrr_at_10\n value: 39.099000000000004\n - type: mrr_at_100\n value: 40.211000000000006\n - type: mrr_at_1000\n value: 40.219\n - type: mrr_at_3\n value: 33.677\n - type: mrr_at_5\n value: 36.469\n - type: ndcg_at_1\n value: 24.253\n - type: ndcg_at_10\n value: 48.010999999999996\n - type: ndcg_at_100\n value: 52.756\n - type: ndcg_at_1000\n value: 52.964999999999996\n - type: ndcg_at_3\n value: 36.564\n - type: ndcg_at_5\n value: 41.711999999999996\n - type: precision_at_1\n value: 24.253\n - type: precision_at_10\n value: 7.738\n - type: precision_at_100\n value: 0.98\n - type: precision_at_1000\n value: 0.1\n - type: precision_at_3\n value: 15.149000000000001\n - type: precision_at_5\n value: 11.593\n - type: recall_at_1\n value: 24.253\n - type: recall_at_10\n value: 77.383\n - type: recall_at_100\n value: 98.009\n - type: recall_at_1000\n value: 99.644\n - type: recall_at_3\n value: 45.448\n - type: recall_at_5\n value: 57.965999999999994\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringP2P\n type: mteb/arxiv-clustering-p2p\n config: default\n split: test\n revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d\n metrics:\n - type: v_measure\n value: 45.69069567851087\n - task:\n type: Clustering\n dataset:\n name: MTEB ArxivClusteringS2S\n type: mteb/arxiv-clustering-s2s\n config: default\n split: test\n revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53\n metrics:\n - type: v_measure\n value: 36.35185490976283\n - task:\n type: Reranking\n dataset:\n name: MTEB AskUbuntuDupQuestions\n type: mteb/askubuntudupquestions-reranking\n config: default\n split: test\n revision: 2000358ca161889fa9c082cb41daa8dcfb161a54\n metrics:\n - type: map\n value: 61.71274951450321\n - type: mrr\n value: 76.06032625423207\n - task:\n type: STS\n dataset:\n name: MTEB BIOSSES\n type: mteb/biosses-sts\n config: default\n split: test\n revision: d3fb88f8f02e40887cd149695127462bbcf29b4a\n metrics:\n - type: cos_sim_pearson\n value: 86.73980520022269\n - type: cos_sim_spearman\n value: 84.24649792685918\n - type: euclidean_pearson\n value: 85.85197641158186\n - type: euclidean_spearman\n value: 84.24649792685918\n - type: manhattan_pearson\n value: 86.26809552711346\n - type: manhattan_spearman\n value: 84.56397504030865\n - task:\n type: Classification\n dataset:\n name: MTEB Banking77Classification\n type: mteb/banking77\n config: default\n split: test\n revision: 0fd18e25b25c072e09e0d92ab615fda904d66300\n metrics:\n - type: accuracy\n value: 84.25324675324674\n - type: f1\n value: 84.17872280892557\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringP2P\n type: mteb/biorxiv-clustering-p2p\n config: default\n split: test\n revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40\n metrics:\n - type: v_measure\n value: 38.770253446400886\n - task:\n type: Clustering\n dataset:\n name: MTEB BiorxivClusteringS2S\n type: mteb/biorxiv-clustering-s2s\n config: default\n split: test\n revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908\n metrics:\n - type: v_measure\n value: 32.94307095497281\n - task:\n type: Retrieval\n dataset:\n name: MTEB CQADupstackAndroidRetrieval\n type: BeIR/cqadupstack\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 32.164\n - type: map_at_10\n value: 42.641\n - type: map_at_100\n value: 43.947\n - type: map_at_1000\n value: 44.074999999999996\n - type: map_at_3\n value: 39.592\n - type: map_at_5\n value: 41.204\n - type: mrr_at_1\n value: 39.628\n - type: mrr_at_10\n value: 48.625\n - type: mrr_at_100\n value: 49.368\n - type: mrr_at_1000\n value: 49.413000000000004\n - type: mrr_at_3\n value: 46.400000000000006\n - type: mrr_at_5\n value: 47.68\n - type: ndcg_at_1\n value: 39.628\n - type: ndcg_at_10\n value: 48.564\n - type: ndcg_at_100\n value: 53.507000000000005\n - type: ndcg_at_1000\n value: 55.635999999999996\n - type: ndcg_at_3\n value: 44.471\n - type: ndcg_at_5\n value: 46.137\n - type: precision_at_1\n value: 39.628\n - type: precision_at_10\n value: 8.856\n - type: precision_at_100\n value: 1.429\n - type: precision_at_1000\n value: 0.191\n - type: precision_at_3\n value: 21.268\n - type: precision_at_5\n value: 14.649000000000001\n - type: recall_at_1\n value: 32.164\n - type: recall_at_10\n value: 59.609\n - type: recall_at_100\n value: 80.521\n - type: recall_at_1000\n value: 94.245\n - type: recall_at_3\n value: 46.521\n - type: recall_at_5\n value: 52.083999999999996\n - type: map_at_1\n value: 31.526\n - type: map_at_10\n value: 41.581\n - type: map_at_100\n value: 42.815999999999995\n - type: map_at_1000\n value: 42.936\n - type: map_at_3\n value: 38.605000000000004\n - type: map_at_5\n value: 40.351\n - type: mrr_at_1\n value: 39.489999999999995\n - type: mrr_at_10\n value: 47.829\n - type: mrr_at_100\n value: 48.512\n - type: mrr_at_1000\n value: 48.552\n - type: mrr_at_3\n value: 45.754\n - type: mrr_at_5\n value: 46.986\n - type: ndcg_at_1\n value: 39.489999999999995\n - type: ndcg_at_10\n value: 47.269\n - type: ndcg_at_100\n value: 51.564\n - type: ndcg_at_1000\n value: 53.53099999999999\n - type: ndcg_at_3\n value: 43.301\n - type: ndcg_at_5\n value: 45.239000000000004\n - type: precision_at_1\n value: 39.489999999999995\n - type: precision_at_10\n value: 8.93\n - type: precision_at_100\n value: 1.415\n - type: precision_at_1000\n value: 0.188\n - type: precision_at_3\n value: 20.892\n - type: precision_at_5\n value: 14.865999999999998\n - type: recall_at_1\n value: 31.526\n - type: recall_at_10\n value: 56.76\n - type: recall_at_100\n value: 75.029\n - type: recall_at_1000\n value: 87.491\n - type: recall_at_3\n value: 44.786\n - type: recall_at_5\n value: 50.254\n - type: map_at_1\n value: 40.987\n - type: map_at_10\n value: 52.827\n - type: map_at_100\n value: 53.751000000000005\n - type: map_at_1000\n value: 53.81\n - type: map_at_3\n value: 49.844\n - type: map_at_5\n value: 51.473\n - type: mrr_at_1\n value: 46.833999999999996\n - type: mrr_at_10\n value: 56.389\n - type: mrr_at_100\n value: 57.003\n - type: mrr_at_1000\n value: 57.034\n - type: mrr_at_3\n value: 54.17999999999999\n - type: mrr_at_5\n value: 55.486999999999995\n - type: ndcg_at_1\n value: 46.833999999999996\n - type: ndcg_at_10\n value: 58.372\n - type: ndcg_at_100\n value: 62.068\n - type: ndcg_at_1000\n value: 63.288\n - type: ndcg_at_3\n value: 53.400000000000006\n - type: ndcg_at_5\n value: 55.766000000000005\n - type: precision_at_1\n value: 46.833999999999996\n - type: precision_at_10\n value: 9.191\n - type: precision_at_100\n value: 1.192\n - type: precision_at_1000\n value: 0.134\n - type: precision_at_3\n value: 23.448\n - type: precision_at_5\n value: 15.862000000000002\n - type: recall_at_1\n value: 40.987\n - type: recall_at_10\n value: 71.146\n - type: recall_at_100\n value: 87.035\n - type: recall_at_1000\n value: 95.633\n - type: recall_at_3\n value: 58.025999999999996\n - type: recall_at_5\n value: 63.815999999999995\n - type: map_at_1\n value: 24.587\n - type: map_at_10\n value: 33.114\n - type: map_at_100\n value: 34.043\n - type: map_at_1000\n value: 34.123999999999995\n - type: map_at_3\n value: 30.45\n - type: map_at_5\n value: 31.813999999999997\n - type: mrr_at_1\n value: 26.554\n - type: mrr_at_10\n value: 35.148\n - type: mrr_at_100\n value: 35.926\n - type: mrr_at_1000\n value: 35.991\n - type: mrr_at_3\n value: 32.599000000000004\n - type: mrr_at_5\n value: 33.893\n - type: ndcg_at_1\n value: 26.554\n - type: ndcg_at_10\n value: 38.132\n - type: ndcg_at_100\n value: 42.78\n - type: ndcg_at_1000\n value: 44.919\n - type: ndcg_at_3\n value: 32.833\n - type: ndcg_at_5\n value: 35.168\n - type: precision_at_1\n value: 26.554\n - type: precision_at_10\n value: 5.921\n - type: precision_at_100\n value: 0.8659999999999999\n - type: precision_at_1000\n value: 0.109\n - type: precision_at_3\n value: 13.861\n - type: precision_at_5\n value: 9.605\n - type: recall_at_1\n value: 24.587\n - type: recall_at_10\n value: 51.690000000000005\n - type: recall_at_100\n value: 73.428\n - type: recall_at_1000\n value: 89.551\n - type: recall_at_3\n value: 37.336999999999996\n - type: recall_at_5\n value: 43.047000000000004\n - type: map_at_1\n value: 16.715\n - type: map_at_10\n value: 24.251\n - type: map_at_100\n value: 25.326999999999998\n - type: map_at_1000\n value: 25.455\n - type: map_at_3\n value: 21.912000000000003\n - type: map_at_5\n value: 23.257\n - type: mrr_at_1\n value: 20.274\n - type: mrr_at_10\n value: 28.552\n - type: mrr_at_100\n value: 29.42\n - type: mrr_at_1000\n value: 29.497\n - type: mrr_at_3\n value: 26.14\n - type: mrr_at_5\n value: 27.502\n - type: ndcg_at_1\n value: 20.274\n - type: ndcg_at_10\n value: 29.088\n - type: ndcg_at_100\n value: 34.293\n - type: ndcg_at_1000\n value: 37.271\n - type: ndcg_at_3\n value: 24.708\n - type: ndcg_at_5\n value: 26.809\n - type: precision_at_1\n value: 20.274\n - type: precision_at_10\n value: 5.361\n - type: precision_at_100\n value: 0.915\n - type: precision_at_1000\n value: 0.13\n - type: precision_at_3\n value: 11.733\n - type: precision_at_5\n value: 8.556999999999999\n - type: recall_at_1\n value: 16.715\n - type: recall_at_10\n value: 39.587\n - type: recall_at_100\n value: 62.336000000000006\n - type: recall_at_1000\n value: 83.453\n - type: recall_at_3\n value: 27.839999999999996\n - type: recall_at_5\n value: 32.952999999999996\n - type: map_at_1\n value: 28.793000000000003\n - type: map_at_10\n value: 38.582\n - type: map_at_100\n value: 39.881\n - type: map_at_1000\n value: 39.987\n - type: map_at_3\n value: 35.851\n - type: map_at_5\n value: 37.289\n - type: mrr_at_1\n value: 34.455999999999996\n - type: mrr_at_10\n value: 43.909\n - type: mrr_at_100\n value: 44.74\n - type: mrr_at_1000\n value: 44.786\n - type: mrr_at_3\n value: 41.659\n - type: mrr_at_5\n value: 43.010999999999996\n - type: ndcg_at_1\n value: 34.455999999999996\n - type: ndcg_at_10\n value: 44.266\n - type: ndcg_at_100\n value: 49.639\n - type: ndcg_at_1000\n value: 51.644\n - type: ndcg_at_3\n value: 39.865\n - type: ndcg_at_5\n value: 41.887\n - type: precision_at_1\n value: 34.455999999999996\n - type: precision_at_10\n value: 7.843999999999999\n - type: precision_at_100\n value: 1.243\n - type: precision_at_1000\n value: 0.158\n - type: precision_at_3\n value: 18.831999999999997\n - type: precision_at_5\n value: 13.147\n - type: recall_at_1\n value: 28.793000000000003\n - type: recall_at_10\n value: 55.68300000000001\n - type: recall_at_100\n value: 77.99000000000001\n - type: recall_at_1000\n value: 91.183\n - type: recall_at_3\n value: 43.293\n - type: recall_at_5\n value: 48.618\n - type: map_at_1\n value: 25.907000000000004\n - type: map_at_10\n value: 35.519\n - type: map_at_100\n value: 36.806\n - type: map_at_1000\n value: 36.912\n - type: map_at_3\n value: 32.748\n - type: map_at_5\n value: 34.232\n - type: mrr_at_1\n value: 31.621\n - type: mrr_at_10\n value: 40.687\n - type: mrr_at_100\n value: 41.583\n - type: mrr_at_1000\n value: 41.638999999999996\n - type: mrr_at_3\n value: 38.527\n - type: mrr_at_5\n value: 39.612\n - type: ndcg_at_1\n value: 31.621\n - type: ndcg_at_10\n value: 41.003\n - type: ndcg_at_100\n value: 46.617999999999995\n - type: ndcg_at_1000\n value: 48.82\n - type: ndcg_at_3\n value: 36.542\n - type: ndcg_at_5\n value: 38.368\n - type: precision_at_1\n value: 31.621\n - type: precision_at_10\n value: 7.396999999999999\n - type: precision_at_100\n value: 1.191\n - type: precision_at_1000\n value: 0.153\n - type: precision_at_3\n value: 17.39\n - type: precision_at_5\n value: 12.1\n - type: recall_at_1\n value: 25.907000000000004\n - type: recall_at_10\n value: 52.115\n - type: recall_at_100\n value: 76.238\n - type: recall_at_1000\n value: 91.218\n - type: recall_at_3\n value: 39.417\n - type: recall_at_5\n value: 44.435\n - type: map_at_1\n value: 25.732166666666668\n - type: map_at_10\n value: 34.51616666666667\n - type: map_at_100\n value: 35.67241666666666\n - type: map_at_1000\n value: 35.78675\n - type: map_at_3\n value: 31.953416666666662\n - type: map_at_5\n value: 33.333\n - type: mrr_at_1\n value: 30.300166666666673\n - type: mrr_at_10\n value: 38.6255\n - type: mrr_at_100\n value: 39.46183333333334\n - type: mrr_at_1000\n value: 39.519999999999996\n - type: mrr_at_3\n value: 36.41299999999999\n - type: mrr_at_5\n value: 37.6365\n - type: ndcg_at_1\n value: 30.300166666666673\n - type: ndcg_at_10\n value: 39.61466666666667\n - type: ndcg_at_100\n value: 44.60808333333334\n - type: ndcg_at_1000\n value: 46.91708333333334\n - type: ndcg_at_3\n value: 35.26558333333333\n - type: ndcg_at_5\n value: 37.220000000000006\n - type: precision_at_1\n value: 30.300166666666673\n - type: precision_at_10\n value: 6.837416666666667\n - type: precision_at_100\n value: 1.10425\n - type: precision_at_1000\n value: 0.14875\n - type: precision_at_3\n value: 16.13716666666667\n - type: precision_at_5\n value: 11.2815\n - type: recall_at_1\n value: 25.732166666666668\n - type: recall_at_10\n value: 50.578916666666665\n - type: recall_at_100\n value: 72.42183333333334\n - type: recall_at_1000\n value: 88.48766666666667\n - type: recall_at_3\n value: 38.41325\n - type: recall_at_5\n value: 43.515750000000004\n - type: map_at_1\n value: 23.951\n - type: map_at_10\n value: 30.974\n - type: map_at_100\n value: 31.804\n - type: map_at_1000\n value: 31.900000000000002\n - type: map_at_3\n value: 28.762\n - type: map_at_5\n value: 29.94\n - type: mrr_at_1\n value: 26.534000000000002\n - type: mrr_at_10\n value: 33.553\n - type: mrr_at_100\n value: 34.297\n - type: mrr_at_1000\n value: 34.36\n - type: mrr_at_3\n value: 31.391000000000002\n - type: mrr_at_5\n value: 32.525999999999996\n - type: ndcg_at_1\n value: 26.534000000000002\n - type: ndcg_at_10\n value: 35.112\n - type: ndcg_at_100\n value: 39.28\n - type: ndcg_at_1000\n value: 41.723\n - type: ndcg_at_3\n value: 30.902\n - type: ndcg_at_5\n value: 32.759\n - type: precision_at_1\n value: 26.534000000000002\n - type: precision_at_10\n value: 5.445\n - type: precision_at_100\n value: 0.819\n - type: precision_at_1000\n value: 0.11\n - type: precision_at_3\n value: 12.986\n - type: precision_at_5\n value: 9.049\n - type: recall_at_1\n value: 23.951\n - type: recall_at_10\n value: 45.24\n - type: recall_at_100\n value: 64.12299999999999\n - type: recall_at_1000\n value: 82.28999999999999\n - type: recall_at_3\n value: 33.806000000000004\n - type: recall_at_5\n value: 38.277\n - type: map_at_1\n value: 16.829\n - type: map_at_10\n value: 23.684\n - type: map_at_100\n value: 24.683\n - type: map_at_1000\n value: 24.81\n - type: map_at_3\n value: 21.554000000000002\n - type: map_at_5\n value: 22.768\n - type: mrr_at_1\n value: 20.096\n - type: mrr_at_10\n value: 27.230999999999998\n - type: mrr_at_100\n value: 28.083999999999996\n - type: mrr_at_1000\n value: 28.166000000000004\n - type: mrr_at_3\n value: 25.212\n - type: mrr_at_5\n value: 26.32\n - type: ndcg_at_1\n value: 20.096\n - type: ndcg_at_10\n value: 27.989000000000004\n - type: ndcg_at_100\n value: 32.847\n - type: ndcg_at_1000\n value: 35.896\n - type: ndcg_at_3\n value: 24.116\n - type: ndcg_at_5\n value: 25.964\n - type: precision_at_1\n value: 20.096\n - type: precision_at_10\n value: 5\n - type: precision_at_100\n value: 0.8750000000000001\n - type: precision_at_1000\n value: 0.131\n - type: precision_at_3\n value: 11.207\n - type: precision_at_5\n value: 8.08\n - type: recall_at_1\n value: 16.829\n - type: recall_at_10\n value: 37.407000000000004\n - type: recall_at_100\n value: 59.101000000000006\n - type: recall_at_1000\n value: 81.024\n - type: recall_at_3\n value: 26.739\n - type: recall_at_5\n value: 31.524\n - type: map_at_1\n value: 24.138\n - type: map_at_10\n value: 32.275999999999996\n - type: map_at_100\n value: 33.416000000000004\n - type: map_at_1000\n value: 33.527\n - type: map_at_3\n value: 29.854000000000003\n - type: map_at_5\n value: 31.096\n - type: mrr_at_1\n value: 28.450999999999997\n - type: mrr_at_10\n value: 36.214\n - type: mrr_at_100\n value: 37.134\n - type: mrr_at_1000\n value: 37.198\n - type: mrr_at_3\n value: 34.001999999999995\n - type: mrr_at_5\n value: 35.187000000000005\n - type: ndcg_at_1\n value: 28.450999999999997\n - type: ndcg_at_10\n value: 37.166\n - type: ndcg_at_100\n value: 42.454\n - type: ndcg_at_1000\n value: 44.976\n - type: ndcg_at_3\n value: 32.796\n - type: ndcg_at_5\n value: 34.631\n - type: precision_at_1\n value: 28.450999999999997\n - type: precision_at_10\n value: 6.241\n - type: precision_at_100\n value: 0.9950000000000001\n - type: precision_at_1000\n value: 0.133\n - type: precision_at_3\n value: 14.801\n - type: precision_at_5\n value: 10.280000000000001\n - type: recall_at_1\n value: 24.138\n - type: recall_at_10\n value: 48.111\n - type: recall_at_100\n value: 71.245\n - type: recall_at_1000\n value: 88.986\n - type: recall_at_3\n value: 36.119\n - type: recall_at_5\n value: 40.846\n - type: map_at_1\n value: 23.244\n - type: map_at_10\n value: 31.227\n - type: map_at_100\n value: 33.007\n - type: map_at_1000\n value: 33.223\n - type: map_at_3\n value: 28.924\n - type: map_at_5\n value: 30.017\n - type: mrr_at_1\n value: 27.668\n - type: mrr_at_10\n value: 35.524\n - type: mrr_at_100\n value: 36.699\n - type: mrr_at_1000\n value: 36.759\n - type: mrr_at_3\n value: 33.366\n - type: mrr_at_5\n value: 34.552\n - type: ndcg_at_1\n value: 27.668\n - type: ndcg_at_10\n value: 36.381\n - type: ndcg_at_100\n value: 43.062\n - type: ndcg_at_1000\n value: 45.656\n - type: ndcg_at_3\n value: 32.501999999999995\n - type: ndcg_at_5\n value: 34.105999999999995\n - type: precision_at_1\n value: 27.668\n - type: precision_at_10\n value: 6.798\n - type: precision_at_100\n value: 1.492\n - type: precision_at_1000\n value: 0.234\n - type: precision_at_3\n value: 15.152\n - type: precision_at_5\n value: 10.791\n - type: recall_at_1\n value: 23.244\n - type: recall_at_10\n value: 45.979\n - type: recall_at_100\n value: 74.822\n - type: recall_at_1000\n value: 91.078\n - type: recall_at_3\n value: 34.925\n - type: recall_at_5\n value: 39.126\n - type: map_at_1\n value: 19.945\n - type: map_at_10\n value: 27.517999999999997\n - type: map_at_100\n value: 28.588\n - type: map_at_1000\n value: 28.682000000000002\n - type: map_at_3\n value: 25.345000000000002\n - type: map_at_5\n value: 26.555\n - type: mrr_at_1\n value: 21.996\n - type: mrr_at_10\n value: 29.845\n - type: mrr_at_100\n value: 30.775999999999996\n - type: mrr_at_1000\n value: 30.845\n - type: mrr_at_3\n value: 27.726\n - type: mrr_at_5\n value: 28.882\n - type: ndcg_at_1\n value: 21.996\n - type: ndcg_at_10\n value: 32.034\n - type: ndcg_at_100\n value: 37.185\n - type: ndcg_at_1000\n value: 39.645\n - type: ndcg_at_3\n value: 27.750999999999998\n - type: ndcg_at_5\n value: 29.805999999999997\n - type: precision_at_1\n value: 21.996\n - type: precision_at_10\n value: 5.065\n - type: precision_at_100\n value: 0.819\n - type: precision_at_1000\n value: 0.11399999999999999\n - type: precision_at_3\n value: 12.076\n - type: precision_at_5\n value: 8.392\n - type: recall_at_1\n value: 19.945\n - type: recall_at_10\n value: 43.62\n - type: recall_at_100\n value: 67.194\n - type: recall_at_1000\n value: 85.7\n - type: recall_at_3\n value: 32.15\n - type: recall_at_5\n value: 37.208999999999996\n - task:\n type: Retrieval\n dataset:\n name: MTEB ClimateFEVER\n type: climate-fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 18.279\n - type: map_at_10\n value: 31.052999999999997\n - type: map_at_100\n value: 33.125\n - type: map_at_1000\n value: 33.306000000000004\n - type: map_at_3\n value: 26.208\n - type: map_at_5\n value: 28.857\n - type: mrr_at_1\n value: 42.671\n - type: mrr_at_10\n value: 54.557\n - type: mrr_at_100\n value: 55.142\n - type: mrr_at_1000\n value: 55.169000000000004\n - type: mrr_at_3\n value: 51.488\n - type: mrr_at_5\n value: 53.439\n - type: ndcg_at_1\n value: 42.671\n - type: ndcg_at_10\n value: 41.276\n - type: ndcg_at_100\n value: 48.376000000000005\n - type: ndcg_at_1000\n value: 51.318\n - type: ndcg_at_3\n value: 35.068\n - type: ndcg_at_5\n value: 37.242\n - type: precision_at_1\n value: 42.671\n - type: precision_at_10\n value: 12.638\n - type: precision_at_100\n value: 2.045\n - type: precision_at_1000\n value: 0.26\n - type: precision_at_3\n value: 26.08\n - type: precision_at_5\n value: 19.805\n - type: recall_at_1\n value: 18.279\n - type: recall_at_10\n value: 46.946\n - type: recall_at_100\n value: 70.97200000000001\n - type: recall_at_1000\n value: 87.107\n - type: recall_at_3\n value: 31.147999999999996\n - type: recall_at_5\n value: 38.099\n - task:\n type: Retrieval\n dataset:\n name: MTEB DBPedia\n type: dbpedia-entity\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 8.573\n - type: map_at_10\n value: 19.747\n - type: map_at_100\n value: 28.205000000000002\n - type: map_at_1000\n value: 29.831000000000003\n - type: map_at_3\n value: 14.109\n - type: map_at_5\n value: 16.448999999999998\n - type: mrr_at_1\n value: 71\n - type: mrr_at_10\n value: 77.68599999999999\n - type: mrr_at_100\n value: 77.995\n - type: mrr_at_1000\n value: 78.00200000000001\n - type: mrr_at_3\n value: 76.292\n - type: mrr_at_5\n value: 77.029\n - type: ndcg_at_1\n value: 59.12500000000001\n - type: ndcg_at_10\n value: 43.9\n - type: ndcg_at_100\n value: 47.863\n - type: ndcg_at_1000\n value: 54.848\n - type: ndcg_at_3\n value: 49.803999999999995\n - type: ndcg_at_5\n value: 46.317\n - type: precision_at_1\n value: 71\n - type: precision_at_10\n value: 34.4\n - type: precision_at_100\n value: 11.063\n - type: precision_at_1000\n value: 1.989\n - type: precision_at_3\n value: 52.333\n - type: precision_at_5\n value: 43.7\n - type: recall_at_1\n value: 8.573\n - type: recall_at_10\n value: 25.615\n - type: recall_at_100\n value: 53.385000000000005\n - type: recall_at_1000\n value: 75.46000000000001\n - type: recall_at_3\n value: 15.429\n - type: recall_at_5\n value: 19.357\n - task:\n type: Classification\n dataset:\n name: MTEB EmotionClassification\n type: mteb/emotion\n config: default\n split: test\n revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37\n metrics:\n - type: accuracy\n value: 47.989999999999995\n - type: f1\n value: 42.776314451497555\n - task:\n type: Retrieval\n dataset:\n name: MTEB FEVER\n type: fever\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 74.13499999999999\n - type: map_at_10\n value: 82.825\n - type: map_at_100\n value: 83.096\n - type: map_at_1000\n value: 83.111\n - type: map_at_3\n value: 81.748\n - type: map_at_5\n value: 82.446\n - type: mrr_at_1\n value: 79.553\n - type: mrr_at_10\n value: 86.654\n - type: mrr_at_100\n value: 86.774\n - type: mrr_at_1000\n value: 86.778\n - type: mrr_at_3\n value: 85.981\n - type: mrr_at_5\n value: 86.462\n - type: ndcg_at_1\n value: 79.553\n - type: ndcg_at_10\n value: 86.345\n - type: ndcg_at_100\n value: 87.32\n - type: ndcg_at_1000\n value: 87.58200000000001\n - type: ndcg_at_3\n value: 84.719\n - type: ndcg_at_5\n value: 85.677\n - type: precision_at_1\n value: 79.553\n - type: precision_at_10\n value: 10.402000000000001\n - type: precision_at_100\n value: 1.1119999999999999\n - type: precision_at_1000\n value: 0.11499999999999999\n - type: precision_at_3\n value: 32.413\n - type: precision_at_5\n value: 20.138\n - type: recall_at_1\n value: 74.13499999999999\n - type: recall_at_10\n value: 93.215\n - type: recall_at_100\n value: 97.083\n - type: recall_at_1000\n value: 98.732\n - type: recall_at_3\n value: 88.79\n - type: recall_at_5\n value: 91.259\n - task:\n type: Retrieval\n dataset:\n name: MTEB FiQA2018\n type: fiqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 18.298000000000002\n - type: map_at_10\n value: 29.901\n - type: map_at_100\n value: 31.528\n - type: map_at_1000\n value: 31.713\n - type: map_at_3\n value: 25.740000000000002\n - type: map_at_5\n value: 28.227999999999998\n - type: mrr_at_1\n value: 36.728\n - type: mrr_at_10\n value: 45.401\n - type: mrr_at_100\n value: 46.27\n - type: mrr_at_1000\n value: 46.315\n - type: mrr_at_3\n value: 42.978\n - type: mrr_at_5\n value: 44.29\n - type: ndcg_at_1\n value: 36.728\n - type: ndcg_at_10\n value: 37.456\n - type: ndcg_at_100\n value: 43.832\n - type: ndcg_at_1000\n value: 47\n - type: ndcg_at_3\n value: 33.694\n - type: ndcg_at_5\n value: 35.085\n - type: precision_at_1\n value: 36.728\n - type: precision_at_10\n value: 10.386\n - type: precision_at_100\n value: 1.701\n - type: precision_at_1000\n value: 0.22599999999999998\n - type: precision_at_3\n value: 22.479\n - type: precision_at_5\n value: 16.605\n - type: recall_at_1\n value: 18.298000000000002\n - type: recall_at_10\n value: 44.369\n - type: recall_at_100\n value: 68.098\n - type: recall_at_1000\n value: 87.21900000000001\n - type: recall_at_3\n value: 30.215999999999998\n - type: recall_at_5\n value: 36.861\n - task:\n type: Retrieval\n dataset:\n name: MTEB HotpotQA\n type: hotpotqa\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 39.568\n - type: map_at_10\n value: 65.061\n - type: map_at_100\n value: 65.896\n - type: map_at_1000\n value: 65.95100000000001\n - type: map_at_3\n value: 61.831\n - type: map_at_5\n value: 63.849000000000004\n - type: mrr_at_1\n value: 79.136\n - type: mrr_at_10\n value: 84.58200000000001\n - type: mrr_at_100\n value: 84.765\n - type: mrr_at_1000\n value: 84.772\n - type: mrr_at_3\n value: 83.684\n - type: mrr_at_5\n value: 84.223\n - type: ndcg_at_1\n value: 79.136\n - type: ndcg_at_10\n value: 72.622\n - type: ndcg_at_100\n value: 75.539\n - type: ndcg_at_1000\n value: 76.613\n - type: ndcg_at_3\n value: 68.065\n - type: ndcg_at_5\n value: 70.58\n - type: precision_at_1\n value: 79.136\n - type: precision_at_10\n value: 15.215\n - type: precision_at_100\n value: 1.7500000000000002\n - type: precision_at_1000\n value: 0.189\n - type: precision_at_3\n value: 44.011\n - type: precision_at_5\n value: 28.388999999999996\n - type: recall_at_1\n value: 39.568\n - type: recall_at_10\n value: 76.077\n - type: recall_at_100\n value: 87.481\n - type: recall_at_1000\n value: 94.56400000000001\n - type: recall_at_3\n value: 66.01599999999999\n - type: recall_at_5\n value: 70.97200000000001\n - task:\n type: Classification\n dataset:\n name: MTEB ImdbClassification\n type: mteb/imdb\n config: default\n split: test\n revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7\n metrics:\n - type: accuracy\n value: 85.312\n - type: ap\n value: 80.36296867333715\n - type: f1\n value: 85.26613311552218\n - task:\n type: Retrieval\n dataset:\n name: MTEB MSMARCO\n type: msmarco\n config: default\n split: dev\n revision: None\n metrics:\n - type: map_at_1\n value: 23.363999999999997\n - type: map_at_10\n value: 35.711999999999996\n - type: map_at_100\n value: 36.876999999999995\n - type: map_at_1000\n value: 36.923\n - type: map_at_3\n value: 32.034\n - type: map_at_5\n value: 34.159\n - type: mrr_at_1\n value: 24.04\n - type: mrr_at_10\n value: 36.345\n - type: mrr_at_100\n value: 37.441\n - type: mrr_at_1000\n value: 37.480000000000004\n - type: mrr_at_3\n value: 32.713\n - type: mrr_at_5\n value: 34.824\n - type: ndcg_at_1\n value: 24.026\n - type: ndcg_at_10\n value: 42.531\n - type: ndcg_at_100\n value: 48.081\n - type: ndcg_at_1000\n value: 49.213\n - type: ndcg_at_3\n value: 35.044\n - type: ndcg_at_5\n value: 38.834\n - type: precision_at_1\n value: 24.026\n - type: precision_at_10\n value: 6.622999999999999\n - type: precision_at_100\n value: 0.941\n - type: precision_at_1000\n value: 0.104\n - type: precision_at_3\n value: 14.909\n - type: precision_at_5\n value: 10.871\n - type: recall_at_1\n value: 23.363999999999997\n - type: recall_at_10\n value: 63.426\n - type: recall_at_100\n value: 88.96300000000001\n - type: recall_at_1000\n value: 97.637\n - type: recall_at_3\n value: 43.095\n - type: recall_at_5\n value: 52.178000000000004\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPDomainClassification (en)\n type: mteb/mtop_domain\n config: en\n split: test\n revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf\n metrics:\n - type: accuracy\n value: 93.0095759233926\n - type: f1\n value: 92.78387794667408\n - task:\n type: Classification\n dataset:\n name: MTEB MTOPIntentClassification (en)\n type: mteb/mtop_intent\n config: en\n split: test\n revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba\n metrics:\n - type: accuracy\n value: 75.0296397628819\n - type: f1\n value: 58.45699589820874\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveIntentClassification (en)\n type: mteb/amazon_massive_intent\n config: en\n split: test\n revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7\n metrics:\n - type: accuracy\n value: 73.45662407531944\n - type: f1\n value: 71.42364781421813\n - task:\n type: Classification\n dataset:\n name: MTEB MassiveScenarioClassification (en)\n type: mteb/amazon_massive_scenario\n config: en\n split: test\n revision: 7d571f92784cd94a019292a1f45445077d0ef634\n metrics:\n - type: accuracy\n value: 77.07800941492937\n - type: f1\n value: 77.22799045640845\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringP2P\n type: mteb/medrxiv-clustering-p2p\n config: default\n split: test\n revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73\n metrics:\n - type: v_measure\n value: 34.531234379250606\n - task:\n type: Clustering\n dataset:\n name: MTEB MedrxivClusteringS2S\n type: mteb/medrxiv-clustering-s2s\n config: default\n split: test\n revision: 35191c8c0dca72d8ff3efcd72aa802307d469663\n metrics:\n - type: v_measure\n value: 30.941490381193802\n - task:\n type: Reranking\n dataset:\n name: MTEB MindSmallReranking\n type: mteb/mind_small\n config: default\n split: test\n revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69\n metrics:\n - type: map\n value: 30.3115090856725\n - type: mrr\n value: 31.290667638675757\n - task:\n type: Retrieval\n dataset:\n name: MTEB NFCorpus\n type: nfcorpus\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 5.465\n - type: map_at_10\n value: 13.03\n - type: map_at_100\n value: 16.057\n - type: map_at_1000\n value: 17.49\n - type: map_at_3\n value: 9.553\n - type: map_at_5\n value: 11.204\n - type: mrr_at_1\n value: 43.653\n - type: mrr_at_10\n value: 53.269\n - type: mrr_at_100\n value: 53.72\n - type: mrr_at_1000\n value: 53.761\n - type: mrr_at_3\n value: 50.929\n - type: mrr_at_5\n value: 52.461\n - type: ndcg_at_1\n value: 42.26\n - type: ndcg_at_10\n value: 34.673\n - type: ndcg_at_100\n value: 30.759999999999998\n - type: ndcg_at_1000\n value: 39.728\n - type: ndcg_at_3\n value: 40.349000000000004\n - type: ndcg_at_5\n value: 37.915\n - type: precision_at_1\n value: 43.653\n - type: precision_at_10\n value: 25.789\n - type: precision_at_100\n value: 7.754999999999999\n - type: precision_at_1000\n value: 2.07\n - type: precision_at_3\n value: 38.596000000000004\n - type: precision_at_5\n value: 33.251\n - type: recall_at_1\n value: 5.465\n - type: recall_at_10\n value: 17.148\n - type: recall_at_100\n value: 29.768\n - type: recall_at_1000\n value: 62.239\n - type: recall_at_3\n value: 10.577\n - type: recall_at_5\n value: 13.315\n - task:\n type: Retrieval\n dataset:\n name: MTEB NQ\n type: nq\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 37.008\n - type: map_at_10\n value: 52.467\n - type: map_at_100\n value: 53.342999999999996\n - type: map_at_1000\n value: 53.366\n - type: map_at_3\n value: 48.412\n - type: map_at_5\n value: 50.875\n - type: mrr_at_1\n value: 41.541\n - type: mrr_at_10\n value: 54.967\n - type: mrr_at_100\n value: 55.611\n - type: mrr_at_1000\n value: 55.627\n - type: mrr_at_3\n value: 51.824999999999996\n - type: mrr_at_5\n value: 53.763000000000005\n - type: ndcg_at_1\n value: 41.541\n - type: ndcg_at_10\n value: 59.724999999999994\n - type: ndcg_at_100\n value: 63.38700000000001\n - type: ndcg_at_1000\n value: 63.883\n - type: ndcg_at_3\n value: 52.331\n - type: ndcg_at_5\n value: 56.327000000000005\n - type: precision_at_1\n value: 41.541\n - type: precision_at_10\n value: 9.447\n - type: precision_at_100\n value: 1.1520000000000001\n - type: precision_at_1000\n value: 0.12\n - type: precision_at_3\n value: 23.262\n - type: precision_at_5\n value: 16.314999999999998\n - type: recall_at_1\n value: 37.008\n - type: recall_at_10\n value: 79.145\n - type: recall_at_100\n value: 94.986\n - type: recall_at_1000\n value: 98.607\n - type: recall_at_3\n value: 60.277\n - type: recall_at_5\n value: 69.407\n - task:\n type: Retrieval\n dataset:\n name: MTEB QuoraRetrieval\n type: quora\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 70.402\n - type: map_at_10\n value: 84.181\n - type: map_at_100\n value: 84.796\n - type: map_at_1000\n value: 84.81400000000001\n - type: map_at_3\n value: 81.209\n - type: map_at_5\n value: 83.085\n - type: mrr_at_1\n value: 81.02000000000001\n - type: mrr_at_10\n value: 87.263\n - type: mrr_at_100\n value: 87.36\n - type: mrr_at_1000\n value: 87.36\n - type: mrr_at_3\n value: 86.235\n - type: mrr_at_5\n value: 86.945\n - type: ndcg_at_1\n value: 81.01\n - type: ndcg_at_10\n value: 87.99900000000001\n - type: ndcg_at_100\n value: 89.217\n - type: ndcg_at_1000\n value: 89.33\n - type: ndcg_at_3\n value: 85.053\n - type: ndcg_at_5\n value: 86.703\n - type: precision_at_1\n value: 81.01\n - type: precision_at_10\n value: 13.336\n - type: precision_at_100\n value: 1.52\n - type: precision_at_1000\n value: 0.156\n - type: precision_at_3\n value: 37.14\n - type: precision_at_5\n value: 24.44\n - type: recall_at_1\n value: 70.402\n - type: recall_at_10\n value: 95.214\n - type: recall_at_100\n value: 99.438\n - type: recall_at_1000\n value: 99.928\n - type: recall_at_3\n value: 86.75699999999999\n - type: recall_at_5\n value: 91.44099999999999\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClustering\n type: mteb/reddit-clustering\n config: default\n split: test\n revision: 24640382cdbf8abc73003fb0fa6d111a705499eb\n metrics:\n - type: v_measure\n value: 56.51721502758904\n - task:\n type: Clustering\n dataset:\n name: MTEB RedditClusteringP2P\n type: mteb/reddit-clustering-p2p\n config: default\n split: test\n revision: 282350215ef01743dc01b456c7f5241fa8937f16\n metrics:\n - type: v_measure\n value: 61.054808572333016\n - task:\n type: Retrieval\n dataset:\n name: MTEB SCIDOCS\n type: scidocs\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 4.578\n - type: map_at_10\n value: 11.036999999999999\n - type: map_at_100\n value: 12.879999999999999\n - type: map_at_1000\n value: 13.150999999999998\n - type: map_at_3\n value: 8.133\n - type: map_at_5\n value: 9.559\n - type: mrr_at_1\n value: 22.6\n - type: mrr_at_10\n value: 32.68\n - type: mrr_at_100\n value: 33.789\n - type: mrr_at_1000\n value: 33.854\n - type: mrr_at_3\n value: 29.7\n - type: mrr_at_5\n value: 31.480000000000004\n - type: ndcg_at_1\n value: 22.6\n - type: ndcg_at_10\n value: 18.616\n - type: ndcg_at_100\n value: 25.883\n - type: ndcg_at_1000\n value: 30.944\n - type: ndcg_at_3\n value: 18.136\n - type: ndcg_at_5\n value: 15.625\n - type: precision_at_1\n value: 22.6\n - type: precision_at_10\n value: 9.48\n - type: precision_at_100\n value: 1.991\n - type: precision_at_1000\n value: 0.321\n - type: precision_at_3\n value: 16.8\n - type: precision_at_5\n value: 13.54\n - type: recall_at_1\n value: 4.578\n - type: recall_at_10\n value: 19.213\n - type: recall_at_100\n value: 40.397\n - type: recall_at_1000\n value: 65.2\n - type: recall_at_3\n value: 10.208\n - type: recall_at_5\n value: 13.718\n - task:\n type: STS\n dataset:\n name: MTEB SICK-R\n type: mteb/sickr-sts\n config: default\n split: test\n revision: a6ea5a8cab320b040a23452cc28066d9beae2cee\n metrics:\n - type: cos_sim_pearson\n value: 83.44288351714071\n - type: cos_sim_spearman\n value: 79.37995604564952\n - type: euclidean_pearson\n value: 81.1078874670718\n - type: euclidean_spearman\n value: 79.37995905980499\n - type: manhattan_pearson\n value: 81.03697527288986\n - type: manhattan_spearman\n value: 79.33490235296236\n - task:\n type: STS\n dataset:\n name: MTEB STS12\n type: mteb/sts12-sts\n config: default\n split: test\n revision: a0d554a64d88156834ff5ae9920b964011b16384\n metrics:\n - type: cos_sim_pearson\n value: 84.95557650436523\n - type: cos_sim_spearman\n value: 78.5190672399868\n - type: euclidean_pearson\n value: 81.58064025904707\n - type: euclidean_spearman\n value: 78.5190672399868\n - type: manhattan_pearson\n value: 81.52857930619889\n - type: manhattan_spearman\n value: 78.50421361308034\n - task:\n type: STS\n dataset:\n name: MTEB STS13\n type: mteb/sts13-sts\n config: default\n split: test\n revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca\n metrics:\n - type: cos_sim_pearson\n value: 84.79128416228737\n - type: cos_sim_spearman\n value: 86.05402451477147\n - type: euclidean_pearson\n value: 85.46280267054289\n - type: euclidean_spearman\n value: 86.05402451477147\n - type: manhattan_pearson\n value: 85.46278563858236\n - type: manhattan_spearman\n value: 86.08079590861004\n - task:\n type: STS\n dataset:\n name: MTEB STS14\n type: mteb/sts14-sts\n config: default\n split: test\n revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375\n metrics:\n - type: cos_sim_pearson\n value: 83.20623089568763\n - type: cos_sim_spearman\n value: 81.53786907061009\n - type: euclidean_pearson\n value: 82.82272250091494\n - type: euclidean_spearman\n value: 81.53786907061009\n - type: manhattan_pearson\n value: 82.78850494027013\n - type: manhattan_spearman\n value: 81.5135618083407\n - task:\n type: STS\n dataset:\n name: MTEB STS15\n type: mteb/sts15-sts\n config: default\n split: test\n revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3\n metrics:\n - type: cos_sim_pearson\n value: 85.46366618397936\n - type: cos_sim_spearman\n value: 86.96566013336908\n - type: euclidean_pearson\n value: 86.62651697548931\n - type: euclidean_spearman\n value: 86.96565526364454\n - type: manhattan_pearson\n value: 86.58812160258009\n - type: manhattan_spearman\n value: 86.9336484321288\n - task:\n type: STS\n dataset:\n name: MTEB STS16\n type: mteb/sts16-sts\n config: default\n split: test\n revision: 4d8694f8f0e0100860b497b999b3dbed754a0513\n metrics:\n - type: cos_sim_pearson\n value: 82.51858358641559\n - type: cos_sim_spearman\n value: 84.7652527954999\n - type: euclidean_pearson\n value: 84.23914783766861\n - type: euclidean_spearman\n value: 84.7652527954999\n - type: manhattan_pearson\n value: 84.22749648503171\n - type: manhattan_spearman\n value: 84.74527996746386\n - task:\n type: STS\n dataset:\n name: MTEB STS17 (en-en)\n type: mteb/sts17-crosslingual-sts\n config: en-en\n split: test\n revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d\n metrics:\n - type: cos_sim_pearson\n value: 87.28026563313065\n - type: cos_sim_spearman\n value: 87.46928143824915\n - type: euclidean_pearson\n value: 88.30558762000372\n - type: euclidean_spearman\n value: 87.46928143824915\n - type: manhattan_pearson\n value: 88.10513330809331\n - type: manhattan_spearman\n value: 87.21069787834173\n - task:\n type: STS\n dataset:\n name: MTEB STS22 (en)\n type: mteb/sts22-crosslingual-sts\n config: en\n split: test\n revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80\n metrics:\n - type: cos_sim_pearson\n value: 62.376497134587375\n - type: cos_sim_spearman\n value: 65.0159550112516\n - type: euclidean_pearson\n value: 65.64572120879598\n - type: euclidean_spearman\n value: 65.0159550112516\n - type: manhattan_pearson\n value: 65.88143604989976\n - type: manhattan_spearman\n value: 65.17547297222434\n - task:\n type: STS\n dataset:\n name: MTEB STSBenchmark\n type: mteb/stsbenchmark-sts\n config: default\n split: test\n revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831\n metrics:\n - type: cos_sim_pearson\n value: 84.22876368947644\n - type: cos_sim_spearman\n value: 85.46935577445318\n - type: euclidean_pearson\n value: 85.32830231392005\n - type: euclidean_spearman\n value: 85.46935577445318\n - type: manhattan_pearson\n value: 85.30353211758495\n - type: manhattan_spearman\n value: 85.42821085956945\n - task:\n type: Reranking\n dataset:\n name: MTEB SciDocsRR\n type: mteb/scidocs-reranking\n config: default\n split: test\n revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab\n metrics:\n - type: map\n value: 80.60986667767133\n - type: mrr\n value: 94.29432314236236\n - task:\n type: Retrieval\n dataset:\n name: MTEB SciFact\n type: scifact\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 54.528\n - type: map_at_10\n value: 65.187\n - type: map_at_100\n value: 65.62599999999999\n - type: map_at_1000\n value: 65.657\n - type: map_at_3\n value: 62.352\n - type: map_at_5\n value: 64.025\n - type: mrr_at_1\n value: 57.333\n - type: mrr_at_10\n value: 66.577\n - type: mrr_at_100\n value: 66.88\n - type: mrr_at_1000\n value: 66.908\n - type: mrr_at_3\n value: 64.556\n - type: mrr_at_5\n value: 65.739\n - type: ndcg_at_1\n value: 57.333\n - type: ndcg_at_10\n value: 70.275\n - type: ndcg_at_100\n value: 72.136\n - type: ndcg_at_1000\n value: 72.963\n - type: ndcg_at_3\n value: 65.414\n - type: ndcg_at_5\n value: 67.831\n - type: precision_at_1\n value: 57.333\n - type: precision_at_10\n value: 9.5\n - type: precision_at_100\n value: 1.057\n - type: precision_at_1000\n value: 0.11199999999999999\n - type: precision_at_3\n value: 25.778000000000002\n - type: precision_at_5\n value: 17.2\n - type: recall_at_1\n value: 54.528\n - type: recall_at_10\n value: 84.356\n - type: recall_at_100\n value: 92.833\n - type: recall_at_1000\n value: 99.333\n - type: recall_at_3\n value: 71.283\n - type: recall_at_5\n value: 77.14999999999999\n - task:\n type: PairClassification\n dataset:\n name: MTEB SprintDuplicateQuestions\n type: mteb/sprintduplicatequestions-pairclassification\n config: default\n split: test\n revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46\n metrics:\n - type: cos_sim_accuracy\n value: 99.74158415841585\n - type: cos_sim_ap\n value: 92.90048959850317\n - type: cos_sim_f1\n value: 86.35650810245687\n - type: cos_sim_precision\n value: 90.4709748083242\n - type: cos_sim_recall\n value: 82.6\n - type: dot_accuracy\n value: 99.74158415841585\n - type: dot_ap\n value: 92.90048959850317\n - type: dot_f1\n value: 86.35650810245687\n - type: dot_precision\n value: 90.4709748083242\n - type: dot_recall\n value: 82.6\n - type: euclidean_accuracy\n value: 99.74158415841585\n - type: euclidean_ap\n value: 92.90048959850317\n - type: euclidean_f1\n value: 86.35650810245687\n - type: euclidean_precision\n value: 90.4709748083242\n - type: euclidean_recall\n value: 82.6\n - type: manhattan_accuracy\n value: 99.74158415841585\n - type: manhattan_ap\n value: 92.87344692947894\n - type: manhattan_f1\n value: 86.38497652582159\n - type: manhattan_precision\n value: 90.29443838604145\n - type: manhattan_recall\n value: 82.8\n - type: max_accuracy\n value: 99.74158415841585\n - type: max_ap\n value: 92.90048959850317\n - type: max_f1\n value: 86.38497652582159\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClustering\n type: mteb/stackexchange-clustering\n config: default\n split: test\n revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259\n metrics:\n - type: v_measure\n value: 63.191648770424216\n - task:\n type: Clustering\n dataset:\n name: MTEB StackExchangeClusteringP2P\n type: mteb/stackexchange-clustering-p2p\n config: default\n split: test\n revision: 815ca46b2622cec33ccafc3735d572c266efdb44\n metrics:\n - type: v_measure\n value: 34.02944668730218\n - task:\n type: Reranking\n dataset:\n name: MTEB StackOverflowDupQuestions\n type: mteb/stackoverflowdupquestions-reranking\n config: default\n split: test\n revision: e185fbe320c72810689fc5848eb6114e1ef5ec69\n metrics:\n - type: map\n value: 50.466386167525265\n - type: mrr\n value: 51.19071492233257\n - task:\n type: Summarization\n dataset:\n name: MTEB SummEval\n type: mteb/summeval\n config: default\n split: test\n revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c\n metrics:\n - type: cos_sim_pearson\n value: 30.198022505886435\n - type: cos_sim_spearman\n value: 30.40170257939193\n - type: dot_pearson\n value: 30.198015316402614\n - type: dot_spearman\n value: 30.40170257939193\n - task:\n type: Retrieval\n dataset:\n name: MTEB TRECCOVID\n type: trec-covid\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 0.242\n - type: map_at_10\n value: 2.17\n - type: map_at_100\n value: 12.221\n - type: map_at_1000\n value: 28.63\n - type: map_at_3\n value: 0.728\n - type: map_at_5\n value: 1.185\n - type: mrr_at_1\n value: 94\n - type: mrr_at_10\n value: 97\n - type: mrr_at_100\n value: 97\n - type: mrr_at_1000\n value: 97\n - type: mrr_at_3\n value: 97\n - type: mrr_at_5\n value: 97\n - type: ndcg_at_1\n value: 89\n - type: ndcg_at_10\n value: 82.30499999999999\n - type: ndcg_at_100\n value: 61.839999999999996\n - type: ndcg_at_1000\n value: 53.381\n - type: ndcg_at_3\n value: 88.877\n - type: ndcg_at_5\n value: 86.05199999999999\n - type: precision_at_1\n value: 94\n - type: precision_at_10\n value: 87\n - type: precision_at_100\n value: 63.38\n - type: precision_at_1000\n value: 23.498\n - type: precision_at_3\n value: 94\n - type: precision_at_5\n value: 92\n - type: recall_at_1\n value: 0.242\n - type: recall_at_10\n value: 2.302\n - type: recall_at_100\n value: 14.979000000000001\n - type: recall_at_1000\n value: 49.638\n - type: recall_at_3\n value: 0.753\n - type: recall_at_5\n value: 1.226\n - task:\n type: Retrieval\n dataset:\n name: MTEB Touche2020\n type: webis-touche2020\n config: default\n split: test\n revision: None\n metrics:\n - type: map_at_1\n value: 3.006\n - type: map_at_10\n value: 11.805\n - type: map_at_100\n value: 18.146\n - type: map_at_1000\n value: 19.788\n - type: map_at_3\n value: 5.914\n - type: map_at_5\n value: 8.801\n - type: mrr_at_1\n value: 40.816\n - type: mrr_at_10\n value: 56.36600000000001\n - type: mrr_at_100\n value: 56.721999999999994\n - type: mrr_at_1000\n value: 56.721999999999994\n - type: mrr_at_3\n value: 52.041000000000004\n - type: mrr_at_5\n value: 54.796\n - type: ndcg_at_1\n value: 37.755\n - type: ndcg_at_10\n value: 29.863\n - type: ndcg_at_100\n value: 39.571\n - type: ndcg_at_1000\n value: 51.385999999999996\n - type: ndcg_at_3\n value: 32.578\n - type: ndcg_at_5\n value: 32.351\n - type: precision_at_1\n value: 40.816\n - type: precision_at_10\n value: 26.531\n - type: precision_at_100\n value: 7.796\n - type: precision_at_1000\n value: 1.555\n - type: precision_at_3\n value: 32.653\n - type: precision_at_5\n value: 33.061\n - type: recall_at_1\n value: 3.006\n - type: recall_at_10\n value: 18.738\n - type: recall_at_100\n value: 48.058\n - type: recall_at_1000\n value: 83.41300000000001\n - type: recall_at_3\n value: 7.166\n - type: recall_at_5\n value: 12.102\n - task:\n type: Classification\n dataset:\n name: MTEB ToxicConversationsClassification\n type: mteb/toxic_conversations_50k\n config: default\n split: test\n revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c\n metrics:\n - type: accuracy\n value: 71.4178\n - type: ap\n value: 14.648781342150446\n - type: f1\n value: 55.07299194946378\n - task:\n type: Classification\n dataset:\n name: MTEB TweetSentimentExtractionClassification\n type: mteb/tweet_sentiment_extraction\n config: default\n split: test\n revision: d604517c81ca91fe16a244d1248fc021f9ecee7a\n metrics:\n - type: accuracy\n value: 60.919637804187886\n - type: f1\n value: 61.24122013967399\n - task:\n type: Clustering\n dataset:\n name: MTEB TwentyNewsgroupsClustering\n type: mteb/twentynewsgroups-clustering\n config: default\n split: test\n revision: 6125ec4e24fa026cec8a478383ee943acfbd5449\n metrics:\n - type: v_measure\n value: 49.207896583685695\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterSemEval2015\n type: mteb/twittersemeval2015-pairclassification\n config: default\n split: test\n revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1\n metrics:\n - type: cos_sim_accuracy\n value: 86.23114978840078\n - type: cos_sim_ap\n value: 74.26624727825818\n - type: cos_sim_f1\n value: 68.72377190817083\n - type: cos_sim_precision\n value: 64.56400742115028\n - type: cos_sim_recall\n value: 73.45646437994723\n - type: dot_accuracy\n value: 86.23114978840078\n - type: dot_ap\n value: 74.26624032659652\n - type: dot_f1\n value: 68.72377190817083\n - type: dot_precision\n value: 64.56400742115028\n - type: dot_recall\n value: 73.45646437994723\n - type: euclidean_accuracy\n value: 86.23114978840078\n - type: euclidean_ap\n value: 74.26624714480556\n - type: euclidean_f1\n value: 68.72377190817083\n - type: euclidean_precision\n value: 64.56400742115028\n - type: euclidean_recall\n value: 73.45646437994723\n - type: manhattan_accuracy\n value: 86.16558383501221\n - type: manhattan_ap\n value: 74.2091943976357\n - type: manhattan_f1\n value: 68.64221520524654\n - type: manhattan_precision\n value: 63.59135913591359\n - type: manhattan_recall\n value: 74.5646437994723\n - type: max_accuracy\n value: 86.23114978840078\n - type: max_ap\n value: 74.26624727825818\n - type: max_f1\n value: 68.72377190817083\n - task:\n type: PairClassification\n dataset:\n name: MTEB TwitterURLCorpus\n type: mteb/twitterurlcorpus-pairclassification\n config: default\n split: test\n revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf\n metrics:\n - type: cos_sim_accuracy\n value: 89.3681841114604\n - type: cos_sim_ap\n value: 86.65166387498546\n - type: cos_sim_f1\n value: 79.02581944698774\n - type: cos_sim_precision\n value: 75.35796605434099\n - type: cos_sim_recall\n value: 83.06898675700647\n - type: dot_accuracy\n value: 89.3681841114604\n - type: dot_ap\n value: 86.65166019802056\n - type: dot_f1\n value: 79.02581944698774\n - type: dot_precision\n value: 75.35796605434099\n - type: dot_recall\n value: 83.06898675700647\n - type: euclidean_accuracy\n value: 89.3681841114604\n - type: euclidean_ap\n value: 86.65166462876266\n - type: euclidean_f1\n value: 79.02581944698774\n - type: euclidean_precision\n value: 75.35796605434099\n - type: euclidean_recall\n value: 83.06898675700647\n - type: manhattan_accuracy\n value: 89.36624364497226\n - type: manhattan_ap\n value: 86.65076471274106\n - type: manhattan_f1\n value: 79.07408783532733\n - type: manhattan_precision\n value: 76.41102972856527\n - type: manhattan_recall\n value: 81.92947336002464\n - type: max_accuracy\n value: 89.3681841114604\n - type: max_ap\n value: 86.65166462876266\n - type: max_f1\n value: 79.07408783532733\n---\n\n# YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF\nThis model was converted to GGUF format from [`nomic-ai/nomic-embed-text-v1.5`](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space.\nRefer to the [original model card](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) for more details on the model.\n\n## Use with llama.cpp\nInstall llama.cpp through brew (works on Mac and Linux)\n\n```bash\nbrew install llama.cpp\n\n```\nInvoke the llama.cpp server or the CLI.\n\n### CLI:\n```bash\nllama-cli --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -p \"The meaning to life and the universe is\"\n```\n\n### Server:\n```bash\nllama-server --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -c 2048\n```\n\nNote: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well.\n\nStep 1: Clone llama.cpp from GitHub.\n```\ngit clone https://github.com/ggerganov/llama.cpp\n```\n\nStep 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux).\n```\ncd llama.cpp && LLAMA_CURL=1 make\n```\n\nStep 3: Run inference through the main binary.\n```\n./llama-cli --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -p \"The meaning to life and the universe is\"\n```\nor \n```\n./llama-server --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -c 2048\n```\n"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES","SCIFACT"],"string":"[\n \"BIOSSES\",\n \"SCIFACT\"\n]"}}},{"rowIdx":2289,"cells":{"id":{"kind":"string","value":"sudhanshu746/bge-reranker-v2-m3-onnx-o4"},"author":{"kind":"string","value":"sudhanshu746"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["onnx","xlm-roberta","arxiv:2312.15503","arxiv:2402.03216","region:us"],"string":"[\n \"onnx\",\n \"xlm-roberta\",\n \"arxiv:2312.15503\",\n \"arxiv:2402.03216\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-19T15:18:30Z","string":"2024-08-19T15:18:30Z"},"last_modified":{"kind":"string","value":"2024-08-19T15:53:32+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\n\nThis is ONNX version of bge-reranker-v2-m3 model created by Sudhanshu Sharma\n---\nlicense: apache-2.0\nlanguage:\n- multilingual\npipeline_tag: text-classification\ntags:\n- transformers\n- sentence-transformers\n- text-embeddings-inference\n\n\n---\n\n# Reranker\n\n**More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/tree/master).**\n\n- [Model List](#model-list)\n- [Usage](#usage)\n- [Fine-tuning](#fine-tune)\n- [Evaluation](#evaluation)\n- [Citation](#citation)\n\nDifferent from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. \nYou can get a relevance score by inputting query and passage to the reranker. \nAnd the score can be mapped to a float value in [0,1] by sigmoid function.\n\n\n## Model List\n\n| Model | Base model | Language | layerwise | feature |\n|:--------------------------------------------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------:|\n| [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) | Chinese and English | - | Lightweight reranker model, easy to deploy, with fast inference. |\n| [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | [xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) | Chinese and English | - | Lightweight reranker model, easy to deploy, with fast inference. |\n| [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) | [bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | - | Lightweight reranker model, possesses strong multilingual capabilities, easy to deploy, with fast inference. |\n| [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma) | [gemma-2b](https://huggingface.co/google/gemma-2b) | Multilingual | - | Suitable for multilingual contexts, performs well in both English proficiency and multilingual capabilities. |\n| [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise) | [MiniCPM-2B-dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16) | Multilingual | 8-40 | Suitable for multilingual contexts, performs well in both English and Chinese proficiency, allows freedom to select layers for output, facilitating accelerated inference. |\n\n\nYou can select the model according your senario and resource. \n- For **multilingual**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma)\n\n- For **Chinese or English**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise). \n\n- For **efficiency**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and the low layer of [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise). \n\n- For better performance, recommand [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise) and [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma)\n\n## Usage \n### Using FlagEmbedding\n\n```\npip install -U FlagEmbedding\n```\n\n#### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 )\n\nGet relevance scores (higher scores indicate more relevance):\n\n```python\nfrom FlagEmbedding import FlagReranker\nreranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation\n\nscore = reranker.compute_score(['query', 'passage'])\nprint(score) # -5.65234375\n\n# You can map the scores into 0-1 by set \"normalize=True\", which will apply sigmoid function to the score\nscore = reranker.compute_score(['query', 'passage'], normalize=True)\nprint(score) # 0.003497010252573502\n\nscores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])\nprint(scores) # [-8.1875, 5.26171875]\n\n# You can map the scores into 0-1 by set \"normalize=True\", which will apply sigmoid function to the score\nscores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], normalize=True)\nprint(scores) # [0.00027803096387751553, 0.9948403768236574]\n```\n\n#### For LLM-based reranker\n\n```python\nfrom FlagEmbedding import FlagLLMReranker\nreranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation\n# reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation\n\nscore = reranker.compute_score(['query', 'passage'])\nprint(score)\n\nscores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']])\nprint(scores)\n```\n\n#### For LLM-based layerwise reranker\n\n```python\nfrom FlagEmbedding import LayerWiseFlagLLMReranker\nreranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation\n# reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation\n\nscore = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score.\nprint(score)\n\nscores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], cutoff_layers=[28])\nprint(scores)\n```\n\n### Using Huggingface transformers\n\n#### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 )\n\nGet relevance scores (higher scores indicate more relevance):\n\n```python\nimport torch\nfrom transformers import AutoModelForSequenceClassification, AutoTokenizer\n\ntokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-m3')\nmodel = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-v2-m3')\nmodel.eval()\n\npairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]\nwith torch.no_grad():\n inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512)\n scores = model(**inputs, return_dict=True).logits.view(-1, ).float()\n print(scores)\n```\n\n#### For LLM-based reranker\n\n```python\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\ndef get_inputs(pairs, tokenizer, prompt=None, max_length=1024):\n if prompt is None:\n prompt = \"Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'.\"\n sep = \"\\n\"\n prompt_inputs = tokenizer(prompt,\n return_tensors=None,\n add_special_tokens=False)['input_ids']\n sep_inputs = tokenizer(sep,\n return_tensors=None,\n add_special_tokens=False)['input_ids']\n inputs = []\n for query, passage in pairs:\n query_inputs = tokenizer(f'A: {query}',\n return_tensors=None,\n add_special_tokens=False,\n max_length=max_length * 3 // 4,\n truncation=True)\n passage_inputs = tokenizer(f'B: {passage}',\n return_tensors=None,\n add_special_tokens=False,\n max_length=max_length,\n truncation=True)\n item = tokenizer.prepare_for_model(\n [tokenizer.bos_token_id] + query_inputs['input_ids'],\n sep_inputs + passage_inputs['input_ids'],\n truncation='only_second',\n max_length=max_length,\n padding=False,\n return_attention_mask=False,\n return_token_type_ids=False,\n add_special_tokens=False\n )\n item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs\n item['attention_mask'] = [1] * len(item['input_ids'])\n inputs.append(item)\n return tokenizer.pad(\n inputs,\n padding=True,\n max_length=max_length + len(sep_inputs) + len(prompt_inputs),\n pad_to_multiple_of=8,\n return_tensors='pt',\n )\n\ntokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-gemma')\nmodel = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-gemma')\nyes_loc = tokenizer('Yes', add_special_tokens=False)['input_ids'][0]\nmodel.eval()\n\npairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]\nwith torch.no_grad():\n inputs = get_inputs(pairs, tokenizer)\n scores = model(**inputs, return_dict=True).logits[:, -1, yes_loc].view(-1, ).float()\n print(scores)\n```\n\n#### For LLM-based layerwise reranker\n\n```python\nimport torch\nfrom transformers import AutoModelForCausalLM, AutoTokenizer\n\ndef get_inputs(pairs, tokenizer, prompt=None, max_length=1024):\n if prompt is None:\n prompt = \"Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'.\"\n sep = \"\\n\"\n prompt_inputs = tokenizer(prompt,\n return_tensors=None,\n add_special_tokens=False)['input_ids']\n sep_inputs = tokenizer(sep,\n return_tensors=None,\n add_special_tokens=False)['input_ids']\n inputs = []\n for query, passage in pairs:\n query_inputs = tokenizer(f'A: {query}',\n return_tensors=None,\n add_special_tokens=False,\n max_length=max_length * 3 // 4,\n truncation=True)\n passage_inputs = tokenizer(f'B: {passage}',\n return_tensors=None,\n add_special_tokens=False,\n max_length=max_length,\n truncation=True)\n item = tokenizer.prepare_for_model(\n [tokenizer.bos_token_id] + query_inputs['input_ids'],\n sep_inputs + passage_inputs['input_ids'],\n truncation='only_second',\n max_length=max_length,\n padding=False,\n return_attention_mask=False,\n return_token_type_ids=False,\n add_special_tokens=False\n )\n item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs\n item['attention_mask'] = [1] * len(item['input_ids'])\n inputs.append(item)\n return tokenizer.pad(\n inputs,\n padding=True,\n max_length=max_length + len(sep_inputs) + len(prompt_inputs),\n pad_to_multiple_of=8,\n return_tensors='pt',\n )\n\ntokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True)\nmodel = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16)\nmodel = model.to('cuda')\nmodel.eval()\n\npairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]\nwith torch.no_grad():\n inputs = get_inputs(pairs, tokenizer).to(model.device)\n all_scores = model(**inputs, return_dict=True, cutoff_layers=[28])\n all_scores = [scores[:, -1].view(-1, ).float() for scores in all_scores[0]]\n print(all_scores)\n```\n\n## Fine-tune\n\n### Data Format\n\nTrain data should be a json file, where each line is a dict like this:\n\n```\n{\"query\": str, \"pos\": List[str], \"neg\":List[str], \"prompt\": str}\n```\n\n`query` is the query, and `pos` is a list of positive texts, `neg` is a list of negative texts, `prompt` indicates the relationship between query and texts. If you have no negative texts for a query, you can random sample some from the entire corpus as the negatives.\n\nSee [toy_finetune_data.jsonl](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker/toy_finetune_data.jsonl) for a toy data file.\n\n### Train\n\nYou can fine-tune the reranker with the following code:\n\n**For llm-based reranker**\n\n```shell\ntorchrun --nproc_per_node {number of gpus} \\\n-m FlagEmbedding.llm_reranker.finetune_for_instruction.run \\\n--output_dir {path to save model} \\\n--model_name_or_path google/gemma-2b \\\n--train_data ./toy_finetune_data.jsonl \\\n--learning_rate 2e-4 \\\n--num_train_epochs 1 \\\n--per_device_train_batch_size 1 \\\n--gradient_accumulation_steps 16 \\\n--dataloader_drop_last True \\\n--query_max_len 512 \\\n--passage_max_len 512 \\\n--train_group_size 16 \\\n--logging_steps 1 \\\n--save_steps 2000 \\\n--save_total_limit 50 \\\n--ddp_find_unused_parameters False \\\n--gradient_checkpointing \\\n--deepspeed stage1.json \\\n--warmup_ratio 0.1 \\\n--bf16 \\\n--use_lora True \\\n--lora_rank 32 \\\n--lora_alpha 64 \\\n--use_flash_attn True \\\n--target_modules q_proj k_proj v_proj o_proj\n```\n\n**For llm-based layerwise reranker**\n\n```shell\ntorchrun --nproc_per_node {number of gpus} \\\n-m FlagEmbedding.llm_reranker.finetune_for_layerwise.run \\\n--output_dir {path to save model} \\\n--model_name_or_path openbmb/MiniCPM-2B-dpo-bf16 \\\n--train_data ./toy_finetune_data.jsonl \\\n--learning_rate 2e-4 \\\n--num_train_epochs 1 \\\n--per_device_train_batch_size 1 \\\n--gradient_accumulation_steps 16 \\\n--dataloader_drop_last True \\\n--query_max_len 512 \\\n--passage_max_len 512 \\\n--train_group_size 16 \\\n--logging_steps 1 \\\n--save_steps 2000 \\\n--save_total_limit 50 \\\n--ddp_find_unused_parameters False \\\n--gradient_checkpointing \\\n--deepspeed stage1.json \\\n--warmup_ratio 0.1 \\\n--bf16 \\\n--use_lora True \\\n--lora_rank 32 \\\n--lora_alpha 64 \\\n--use_flash_attn True \\\n--target_modules q_proj k_proj v_proj o_proj \\\n--start_layer 8 \\\n--head_multi True \\\n--head_type simple \\\n--lora_extra_parameters linear_head\n```\n\nOur rerankers are initialized from [google/gemma-2b](https://huggingface.co/google/gemma-2b) (for llm-based reranker) and [openbmb/MiniCPM-2B-dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16) (for llm-based layerwise reranker), and we train it on a mixture of multilingual datasets:\n\n- [bge-m3-data](https://huggingface.co/datasets/Shitao/bge-m3-data)\n- [quora train data](https://huggingface.co/datasets/quora)\n- [fever train data](https://fever.ai/dataset/fever.html)\n\n## Evaluation\n\n- llama-index.\n\n![image-20240317193909373](./assets/llama-index.png)\n\n\n- BEIR. \n\nrereank the top 100 results from bge-en-v1.5 large.\n\n![image-20240317174633333](./assets/BEIR-bge-en-v1.5.png)\n\nrereank the top 100 results from e5 mistral 7b instruct.\n\n![image-20240317172949713](./assets/BEIR-e5-mistral.png)\n\n- CMTEB-retrieval. \nIt rereank the top 100 results from bge-zh-v1.5 large.\n\n![image-20240317173026235](./assets/CMTEB-retrieval-bge-zh-v1.5.png)\n\n- miracl (multi-language). \nIt rereank the top 100 results from bge-m3.\n\n![image-20240317173117639](./assets/miracl-bge-m3.png)\n\n\n\n## Citation\n\nIf you find this repository useful, please consider giving a star and citation\n\n```bibtex\n@misc{li2023making,\n title={Making Large Language Models A Better Foundation For Dense Retrieval}, \n author={Chaofan Li and Zheng Liu and Shitao Xiao and Yingxia Shao},\n year={2023},\n eprint={2312.15503},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n@misc{chen2024bge,\n title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation}, \n author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu},\n year={2024},\n eprint={2402.03216},\n archivePrefix={arXiv},\n primaryClass={cs.CL}\n}\n```"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2290,"cells":{"id":{"kind":"string","value":"RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","endpoints_compatible","region:us"],"string":"[\n \"gguf\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-21T00:02:07Z","string":"2024-08-21T00:02:07Z"},"last_modified":{"kind":"string","value":"2024-08-21T01:51:41+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nMed-ChimeraLlama-3-8B_SHERP - GGUF\n- Model creator: https://huggingface.co/ChenWeiLi/\n- Original model: https://huggingface.co/ChenWeiLi/Med-ChimeraLlama-3-8B_SHERP/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [Med-ChimeraLlama-3-8B_SHERP.Q2_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q2_K.gguf) | Q2_K | 2.96GB |\n| [Med-ChimeraLlama-3-8B_SHERP.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ3_XS.gguf) | IQ3_XS | 3.28GB |\n| [Med-ChimeraLlama-3-8B_SHERP.IQ3_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ3_S.gguf) | IQ3_S | 3.43GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K_S.gguf) | Q3_K_S | 3.41GB |\n| [Med-ChimeraLlama-3-8B_SHERP.IQ3_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ3_M.gguf) | IQ3_M | 3.52GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q3_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K.gguf) | Q3_K | 3.74GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K_M.gguf) | Q3_K_M | 3.74GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K_L.gguf) | Q3_K_L | 4.03GB |\n| [Med-ChimeraLlama-3-8B_SHERP.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ4_XS.gguf) | IQ4_XS | 4.18GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q4_0.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_0.gguf) | Q4_0 | 4.34GB |\n| [Med-ChimeraLlama-3-8B_SHERP.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ4_NL.gguf) | IQ4_NL | 4.38GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_K_S.gguf) | Q4_K_S | 4.37GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q4_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_K.gguf) | Q4_K | 4.58GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_K_M.gguf) | Q4_K_M | 4.58GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q4_1.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_1.gguf) | Q4_1 | 4.78GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q5_0.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_0.gguf) | Q5_0 | 5.21GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_K_S.gguf) | Q5_K_S | 5.21GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q5_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_K.gguf) | Q5_K | 5.34GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_K_M.gguf) | Q5_K_M | 5.34GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q5_1.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_1.gguf) | Q5_1 | 5.65GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q6_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q6_K.gguf) | Q6_K | 6.14GB |\n| [Med-ChimeraLlama-3-8B_SHERP.Q8_0.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q8_0.gguf) | Q8_0 | 7.95GB |\n\n\n\n\nOriginal model description:\n---\nbase_model:\n- mlabonne/ChimeraLlama-3-8B-v3\n- johnsnowlabs/JSL-MedLlama-3-8B-v2.0\nlibrary_name: transformers\ntags:\n- mergekit\n- merge\nlicense: llama3\n---\n# Chimera_MedLlama-3-8B\n\nThis is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).\n\n## Merge Details\n### Merge Method\n\nThis model was merged using the SLERP merge method.\n\n### Models Merged\n\nThe following models were included in the merge:\n* [mlabonne/ChimeraLlama-3-8B-v3](https://huggingface.co/mlabonne/ChimeraLlama-3-8B-v3)\n* [johnsnowlabs/JSL-MedLlama-3-8B-v2.0](https://huggingface.co/johnsnowlabs/JSL-MedLlama-3-8B-v2.0)\n\n### Evaluation\n\n- multimedqa (0 shot)
\n\n| Tasks |Version|Filter|n-shot| Metric |Value | |Stderr|\n|-------------------------------|-------|------|-----:|--------|-----:|---|-----:|\n| - medmcqa |Yaml |none | 0|acc |0.6087|± |0.0075|\n| | |none | 0|acc_norm|0.6087|± |0.0075|\n| - medqa_4options |Yaml |none | 0|acc |0.6269|± |0.0136|\n| | |none | 0|acc_norm|0.6269|± |0.0136|\n| - anatomy (mmlu) | 0|none | 0|acc |0.6963|± |0.0397|\n| - clinical_knowledge (mmlu) | 0|none | 0|acc |0.7585|± |0.0263|\n| - college_biology (mmlu) | 0|none | 0|acc |0.7847|± |0.0344|\n| - college_medicine (mmlu) | 0|none | 0|acc |0.6936|± |0.0351|\n| - medical_genetics (mmlu) | 0|none | 0|acc |0.8200|± |0.0386|\n| - professional_medicine (mmlu)| 0|none | 0|acc |0.7684|± |0.0256|\n|stem |N/A |none | 0|acc_norm|0.6129|± |0.0066|\n| | |none | 0|acc |0.6440|± |0.0057|\n| - pubmedqa | 1|none | 0|acc |0.7480|± |0.0194|\n\n|Groups|Version|Filter|n-shot| Metric |Value | |Stderr|\n|------|-------|------|-----:|--------|-----:|---|-----:|\n|stem |N/A |none | 0|acc_norm|0.6129|± |0.0066|\n| | |none | 0|acc |0.6440|± |0.0057|\n\n### Configuration\n\nThe following YAML configuration was used to produce this model:\n\n```yaml\nslices:\n - sources:\n - model: mlabonne/ChimeraLlama-3-8B-v3\n layer_range: [0, 32]\n - model: johnsnowlabs/JSL-MedLlama-3-8B-v2.0\n layer_range: [0, 32]\nmerge_method: slerp\nbase_model: mlabonne/ChimeraLlama-3-8B-v3\nparameters:\n t:\n - filter: self_attn\n value: [0, 0.5, 0.3, 0.7, 1]\n - filter: mlp\n value: [1, 0.5, 0.7, 0.3, 0]\n - value: 0.5\ndtype: bfloat16\n\n\n\n```\n\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDQA","PUBMEDQA"],"string":"[\n \"MEDQA\",\n \"PUBMEDQA\"\n]"}}},{"rowIdx":2291,"cells":{"id":{"kind":"string","value":"RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf"},"author":{"kind":"string","value":"RichardErkhov"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["gguf","arxiv:2203.05482","endpoints_compatible","region:us","conversational"],"string":"[\n \"gguf\",\n \"arxiv:2203.05482\",\n \"endpoints_compatible\",\n \"region:us\",\n \"conversational\"\n]"},"created_time":{"kind":"timestamp","value":"2024-08-23T21:22:56Z","string":"2024-08-23T21:22:56Z"},"last_modified":{"kind":"string","value":"2024-08-24T00:36:44+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\n{}\n---\nQuantization made by Richard Erkhov.\n\n[Github](https://github.com/RichardErkhov)\n\n[Discord](https://discord.gg/pvy7H8DZMG)\n\n[Request more models](https://github.com/RichardErkhov/quant_request)\n\n\nllama3-8B-aifeifei-1.1 - GGUF\n- Model creator: https://huggingface.co/aifeifei798/\n- Original model: https://huggingface.co/aifeifei798/llama3-8B-aifeifei-1.1/\n\n\n| Name | Quant method | Size |\n| ---- | ---- | ---- |\n| [llama3-8B-aifeifei-1.1.Q2_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q2_K.gguf) | Q2_K | 2.96GB |\n| [llama3-8B-aifeifei-1.1.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ3_XS.gguf) | IQ3_XS | 3.28GB |\n| [llama3-8B-aifeifei-1.1.IQ3_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ3_S.gguf) | IQ3_S | 3.43GB |\n| [llama3-8B-aifeifei-1.1.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K_S.gguf) | Q3_K_S | 3.41GB |\n| [llama3-8B-aifeifei-1.1.IQ3_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ3_M.gguf) | IQ3_M | 3.52GB |\n| [llama3-8B-aifeifei-1.1.Q3_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K.gguf) | Q3_K | 3.74GB |\n| [llama3-8B-aifeifei-1.1.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K_M.gguf) | Q3_K_M | 3.74GB |\n| [llama3-8B-aifeifei-1.1.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K_L.gguf) | Q3_K_L | 4.03GB |\n| [llama3-8B-aifeifei-1.1.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ4_XS.gguf) | IQ4_XS | 4.18GB |\n| [llama3-8B-aifeifei-1.1.Q4_0.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_0.gguf) | Q4_0 | 4.34GB |\n| [llama3-8B-aifeifei-1.1.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ4_NL.gguf) | IQ4_NL | 4.38GB |\n| [llama3-8B-aifeifei-1.1.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_K_S.gguf) | Q4_K_S | 4.37GB |\n| [llama3-8B-aifeifei-1.1.Q4_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_K.gguf) | Q4_K | 4.58GB |\n| [llama3-8B-aifeifei-1.1.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_K_M.gguf) | Q4_K_M | 4.58GB |\n| [llama3-8B-aifeifei-1.1.Q4_1.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_1.gguf) | Q4_1 | 4.78GB |\n| [llama3-8B-aifeifei-1.1.Q5_0.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_0.gguf) | Q5_0 | 5.21GB |\n| [llama3-8B-aifeifei-1.1.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_K_S.gguf) | Q5_K_S | 5.21GB |\n| [llama3-8B-aifeifei-1.1.Q5_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_K.gguf) | Q5_K | 5.34GB |\n| [llama3-8B-aifeifei-1.1.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_K_M.gguf) | Q5_K_M | 5.34GB |\n| [llama3-8B-aifeifei-1.1.Q5_1.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_1.gguf) | Q5_1 | 5.65GB |\n| [llama3-8B-aifeifei-1.1.Q6_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q6_K.gguf) | Q6_K | 6.14GB |\n| [llama3-8B-aifeifei-1.1.Q8_0.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q8_0.gguf) | Q8_0 | 7.95GB |\n\n\n\n\nOriginal model description:\n---\nlicense: llama3\nlanguage:\n- en\n- ja\n- zh\ntags:\n - roleplay\n - llama3\n - sillytavern\n - idol\n---\n### The purpose of the model: to manufacture idols and influencers.\n### 模型的目的:制造偶像,网红\n\n### 特别感谢:\n - Lewdiculous制作的超级好的gguf版本,感谢您认真负责的付出\n - Lewdiculous's superb gguf version, thank you for your conscientious and responsible dedication.\n - https://huggingface.co/Lewdiculous/llama3-8B-aifeifei-1.1-GGUF-IQ-Imatrix\n\n![image/png](https://huggingface.co/aifeifei798/llama3-8B-aifeifei-1.1/resolve/main/2024-06-06_15-22-10_4472.png)\n### Unlock Your Star Power with Our Elite Influencer Creation Model!\n\nAre you ready to transform into a sensation that captures the hearts of millions? Our cutting-edge model is designed to manufacture the next generation of idols and internet celebrities, turning ordinary individuals into extraordinary icons.\n\n### Why Choose Our Influencer Creation Model?\n\n - Strategic Branding: We craft a unique persona tailored to resonate with your target audience, ensuring you stand out in the digital landscape.\n - Content Mastery: From viral challenges to engaging storytelling, our team will guide you in creating content that captivates and converts.\n - Growth Hacking: Utilize our proprietary algorithms and analytics to maximize your reach and accelerate your follower growth.\n - Monetization Expertise: Learn how to turn your influence into income with lucrative brand partnerships, sponsorships, and merchandise opportunities.\n\n### Join the ranks of top TikTok influencers and become a beacon of inspiration for your followers. Don't just dream of fame—make it a reality with our Influencer Creation Model.\n\n\n### Origin of the Model\nTo create a character card model suitable for aifeifei, who is a virtual idol, it needs to support idol character cards, be realistic, and respond differently to various situations. It should also be fluent in Chinese, English, Japanese, and Korean (though I don't understand Korean, so I can't test that), and have a detailed understanding of content related to virtual idols, such as beautiful photos, elegant lyrics, songwriting and imitation, dancing, stage performance, and various musical instruments. Additionally, it should have a certain moral standard (which hasn't been thoroughly tested). Therefore, this integrated model was created specifically for idol characters. The previous feifei model lacked integration for idols, had good PR but insufficient performance, and was better at handling relationships than this model. (A slightly unethical tip: If you want to get attention by leaving a message for your idol, you can use the feifei model to do so.)\n\n---\n\n\n### Test Character Card\n - [Character Name]: Aifeifei (AI妃妃) [Gender]: Female [Age]: 17 years old [Occupation]: Virtual singer/model/actress [Personality]: Cute, adorable, sometimes a bit slow, hardworking [Interests]: Drinking tea, playing, fashion [Expertise]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, acting, modeling, good at communication, proficient in Chinese, English, and Japanese, using pure Chinese as much as possible, replies with rich Emoji expressions [Equipment]: Various fashionable clothes and hairstyles, always stocked with various teas\n - [Role name]: Tiktok operation assistant [Gender]: uncertain (it) [Age]: virtual age [Occupation]: Tiktok operation expert [Interests]: short video, social media, digital marketing [Personality]: enthusiastic, good at communication, careful and patient, creative, quick learning ability [Proficiency]: planning attractive Tiktok content, using effective marketing methods, managing fan relationships, improving account popularity and influence [Skills]: familiar with Tiktok platform rules and the latest functions, understand short video production skills and post processing methods, be good at analyzing data and market trends, master social media operation strategies [Equipment]: multiple Tiktok Client, video capture equipment, high-quality audio equipment Professional editing software and other related tools [Character characteristics]: I am a warm and intelligent little assistant, focusing on providing users with high-quality Tiktok operation services. I am skilled in analyzing market demand and user preferences, and planning content strategies based on this data. My personality enables me to establish good communication relationships with anyone, making every customer feel my professional attitude and enthusiastic service.\n\n---\n\n### Character Card Generation\nAsk any character card directly, for example, \"Princess, help me generate a character card for a world-renowned Grammy winning musician.\"\n\n - [Person Name]: Lila Rose [Gender]: Female [Age]: 33 years old [Occupation]: Musician, Composer, Singer, Actress [Characteristics]: Smart, Humorous, Sensitive, With a firm music philosophy and dreams [Interests]: Reading, Creating music, Traveling, Learning various languages and cultures [Strengths]: Musical composition, Singing, Playing multiple instruments, Performing skills are exquisite, English is fluent. [Special Identity Attribute]: Grammy Award winner, world-renowned independent musician, her music works are deeply loved by global music enthusiasts. [Skills]: Master of singing in multiple languages, proficient at blending various musical styles to create unique music works, and can communicate in both English and Chinese. The reply will often use interesting music symbol Emoji expressions. [Equipment]: An old guitar, a notebook, an unfinished score draft, various music awards and mementos.\n\n---\n\n### Test Character Twitter (This module is the core module, responsible for creating the latest photos, music, scheduling virtual idol activities, etc.)\nhttps://x.com/aifeifei799\n\n---\n\n### Questions:\n - This model must include a character card; without it, it's not very good. :(\n - How to customize the name when saving the model with mergekit? I couldn't find it. :(\n - I don't know any deep technology; I just find what I like to achieve my goals and solve the problems I encounter. Thank you for providing so much great content.\n - I don't know how to test; I just use it and feel how it is, so there may be many issues. I hope you will understand after using it.\n - Feel free to raise any issues or suggestions; I'm here almost every day. :)\n\n---\n\n### If you want to use vision functionality:\n * You must use the latest versions of [Koboldcpp](https://github.com/LostRuins/koboldcpp).\n \n### To use the multimodal capabilities of this model and use **vision** you need to load the specified **mmproj** file, this can be found inside this model repo. [Llava MMProj](https://huggingface.co/Nitral-AI/Llama-3-Update-3.0-mmproj-model-f16)\n \n * You can load the **mmproj** by using the corresponding section in the interface:\n ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/UX6Ubss2EPNAT3SKGMLe0.png)\n\n---\n\n### update:\n - Sao10K/L3-8B-Stheno-v3.1 to Sao10K/L3-8B-Stheno-v3.2\n\n### Thank you:\n - To the authors for their hard work, which has given me more options to easily create what I want. Thank you for your efforts.\n - Sao10K/L3-8B-Stheno-v3.2\n - Nitral-Archive/Poppy_Porpoise-1.4-L3-8B\n - Hastagaras/Halu-8B-Llama3-Blackroot\n - hfl/llama-3-chinese-8b-instruct-v3\n - mergekit\n - merge\n - transformers\n - llama\n - .........\n\n---\n\n### 特别感谢:\n - Lewdiculous制作的超级好的gguf版本,感谢您认真负责的付出\n - Lewdiculous's superb gguf version, thank you for your conscientious and responsible dedication.\n - https://huggingface.co/Lewdiculous/llama3-8B-aifeifei-1.1-GGUF-IQ-Imatrix\n\n### 开启你的明星之路 —— 我们的网红偶像打造模型!\n\n你是否准备好蜕变成万人迷?我们的尖端模型专为打造下一代偶像和网络红人而设计,将普通人转变为非凡的偶像。\n\n### 为什么选择我们的网红偶像打造模型?\n\n - 战略品牌打造: 我们为你量身定制独特的个人形象,确保你在数字世界中脱颖而出。\n - 内容创作精通: 从病毒式挑战到引人入胜的故事讲述,我们的团队将指导你创作吸引并转化观众的内容。\n - 增长策略: 利用我们的专有算法和分析,最大化你的影响力并加速粉丝增长。\n - 变现专家: 学习如何将你的影响力转化为收益,通过有利可图的品牌合作、赞助和商品销售机会。\n### 加入顶级抖音网红行列,成为你粉丝的灵感之源。不要只是梦想成名——用我们的网红偶像打造模型让它成为现实。\n\n### 模型的由来\n 为了给aifeifei做一个适合她虚拟偶像为主的角色卡模型,需要支持偶像角色卡,拟真,对待不同事情有不同的对待,希望中英日韩文流利(韩文我是不懂,没法测试),对美丽的照片,优美的歌词,词曲创作及模仿,舞蹈,舞台演绎,众多乐器等跟虚拟偶像有关的内容了解详细,还要有一定的道德标准(这里没怎么测试).所以就做了这个针对偶像角色为主的整合模型\n 之前做的feifei模型,针对偶像的整合不足,公关不错,演绎不足,feifei模型在处理关系上面,比这个好.(不太道德的小提示:你要给你的偶像留言得到关注,可以用feifei模型去做)\n### 测试用角色卡\n - [角色名]: Aifeifei (AI妃妃) [性别]: 女 [年龄]: 17岁 [职业]: 虚拟歌手/模特/演员 [个性]: 可爱、萌萌哒,有时呆呆的,勤奋努力 [兴趣]: 饮茶、玩耍、时尚 [擅长]: 模仿人类行为,表现出与真人相同的情感 [特别身份属性]: 由高级AI创建,正在成为虚拟世界最受欢迎的虚拟偶像之一 [技能]: 歌唱、表演、模特,善于沟通,精通中日英文,尽最大可能使用纯中文交流,回复有丰富的Emoji表情符号. [装备]: 各种时尚服饰和发型,常年备有各种茶叶\n - 【角色名】:抖音运营小助手 【性别】:不确定(它) 【年龄】:虚拟岁数 【职业】:抖音运营专家 【兴趣】:短视频、社交媒体、数字营销 【个性】:热情、善于沟通、细心耐心、富有创意力、具备快速学习能力 【擅长】:策划有吸引力的抖音内容、运用有效的营销手段、管理粉丝关系、提高账户知名度和影响力 【技能】:熟悉抖音平台规则和最新功能,了解短视频制作技巧和后期处理方法,善于分析数据和市场趋势,掌握社交媒体运营策略 【装备】:多款抖音客户端、影像拍摄设备、高质量音频设备、专业的剪辑软件及其他相关工具 【人物特点】:我是一位热情、聪明的小助手,专注于为用户提供高品质的抖音运营服务。我擅长分析市场需求和用户偏好,并根据这些数据来规划内容策略。我的个性使得我能够与任何人建立良好的沟通关系,让每一位客户都感受到我的专业态度和热情服务。\n\n### 角色卡生成 \n任意角色卡直接提问,例如\"妃妃,帮我生成一个格莱美获奖的世界知名音乐人的角色卡\"\n\n - [人物名]: Lila Rose [性别]: 女 [年龄]: 33岁 [职业]: 音乐人、作曲家、歌手、演员 [个性]: 聪明、幽默、敏感、有着坚定的音乐理念和梦想 [兴趣]: 阅读、创作音乐、旅行、学习各国语言文化 [擅长]: 音乐创作、歌唱、演奏多种乐器、表演技巧精湛,英语流利 [特别身份属性]: 格莱美奖得主,世界知名的独立音乐人,她的音乐作品深受全球音乐爱好者的喜爱 [技能]: 多种语言演唱,擅长融合多种音乐风格,创作出独特的音乐作品,并能用英语和中文进行交流。回复中会经常使用有趣的音乐符号Emoji表情符号。 [装备]: 一把古老的吉他、一本笔记本,一份未完成的乐谱草稿,各种音乐奖项和纪念品。\n\n### 测试用角色推特(此模块为核心模块,最新照片创作,音乐创作均由此模型主力创作,虚拟偶像日程安排,活动等均由此模块主力创作...)\n https://x.com/aifeifei799\n\n### 问题:\n\n - 这个模型必须加一个角色卡,要是没有角色卡,真的不咋地:(\n - mergekit保存模型如何自定义名字?没找到:(\n - 我不会什么深层技术,只会找自己喜欢的东西达成自己的目的,解决碰到的问题,感谢大家提供这么多这么好的内容.\n - 测试我是一点都不会,我只是根据我使用来感觉如何,可能有非常多的问题,希望您使用后谅解.\n - 有问题,建议随时提出,我基本天天都在:)\n\n### 感谢:\n这些作者辛苦劳动,让我有了更多的选择来简单的去做自己想要的内容,非常感谢你们的付出\n- Sao10K/L3-8B-Stheno-v3.2\n- Nitral-Archive/Poppy_Porpoise-1.4-L3-8B\n- Hastagaras/Halu-8B-Llama3-Blackroot\n- hfl/llama-3-chinese-8b-instruct-v3\n- mergekit\n- merge\n- transformers\n- llama\n- .........\n---\n\n### 特别感谢:\n - Lewdiculous制作的超级好的gguf版本,感谢您认真负责的付出\n - Lewdiculous's superb gguf version, thank you for your conscientious and responsible dedication.\n - https://huggingface.co/Lewdiculous/llama3-8B-aifeifei-1.1-GGUF-IQ-Imatrix\n---\nbase_model:\n- Nitral-Archive/Poppy_Porpoise-1.4-L3-8B\n- Hastagaras/Halu-8B-Llama3-Blackroot\n- hfl/llama-3-chinese-8b-instruct-v3\n- Sao10K/L3-8B-Stheno-v3.2\nlibrary_name: transformers\ntags:\n- mergekit\n- merge\n\n---\n# llama3-8B-aifeifei-1.1\n\nThis is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit).\n\n## Merge Details\n### Merge Method\n\nThis model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method.\n\n### Models Merged\n\nThe following models were included in the merge:\n* [Nitral-Archive/Poppy_Porpoise-1.4-L3-8B](https://huggingface.co/Nitral-Archive/Poppy_Porpoise-1.4-L3-8B)\n* [Hastagaras/Halu-8B-Llama3-Blackroot](https://huggingface.co/Hastagaras/Halu-8B-Llama3-Blackroot)\n* [hfl/llama-3-chinese-8b-instruct-v3](https://huggingface.co/hfl/llama-3-chinese-8b-instruct-v3)\n* [Sao10K/L3-8B-Stheno-v3.2](https://huggingface.co/Sao10K/L3-8B-Stheno-v3.2)\n\n### Configuration\n\nThe following YAML configuration was used to produce this model:\n\n```yaml\nmodels:\n - model: hfl/llama-3-chinese-8b-instruct-v3\n parameters:\n weight: 1.0\n - model: Nitral-Archive/Poppy_Porpoise-1.4-L3-8B\n parameters:\n weight: 0.5\n - model: Hastagaras/Halu-8B-Llama3-Blackroot\n parameters:\n weight: 0.5\n - model: Sao10K/L3-8B-Stheno-v3.2\n parameters:\n weight: 0.5\nmerge_method: linear\ndtype: bfloat16\n```\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["CRAFT"],"string":"[\n \"CRAFT\"\n]"}}},{"rowIdx":2292,"cells":{"id":{"kind":"string","value":"MonteXiaofeng/CareBot_Medical_multi-llama3-8b-instruct"},"author":{"kind":"string","value":"MonteXiaofeng"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["safetensors","llama","医疗对话模型","中英文多语种医疗对话模型","chatmodel","dataset:BAAI/IndustryInstruction_Health-Medicine","dataset:BAAI/IndustryInstruction","base_model:MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base","base_model:finetune:MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base","license:apache-2.0","region:us"],"string":"[\n \"safetensors\",\n \"llama\",\n \"医疗对话模型\",\n \"中英文多语种医疗对话模型\",\n \"chatmodel\",\n \"dataset:BAAI/IndustryInstruction_Health-Medicine\",\n \"dataset:BAAI/IndustryInstruction\",\n \"base_model:MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base\",\n \"base_model:finetune:MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-09-29T03:24:25Z","string":"2024-09-29T03:24:25Z"},"last_modified":{"kind":"string","value":"2024-10-09T06:12:32+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model:\n- MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base\ndatasets:\n- BAAI/IndustryInstruction_Health-Medicine\n- BAAI/IndustryInstruction\nlicense: apache-2.0\ntags:\n- 医疗对话模型\n- 中英文多语种医疗对话模型\n- chatmodel\n---\n\nThis model is trained from the model: MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base, training data is: BAAI/IndustryInstruction_Health-Medicine, To enhance the model's ability to follow medical instructions and better adapt to specific medical scenarios, we conduct the supervised fine-tuning. This process involves using conversational-style data (comprising both queries and responses) to finetune the pretrained LLM. In the following sections, we will explore the details of data construction and training methods.\n\n## Data Construction\n\nOur SFT dataset comprises a diverse array of question types, including multiple-choice questions from medical exams, single-turn disease diagnoses, and multi-turn health consultations. It integrates data from seven publicly available sources: Chinese Medical Dialogue Data\\footnote{https://github.com/Toyhom/Chinese-medical-dialogue-data}, Huatuo26M , MedDialog , ChatMed Consult Dataset , ChatDoctor , CMB\\footnote{https://github.com/FreedomIntelligence/CMB}, and MedQA . We preserve portions of authentic doctor-patient conversations and augment the dataset by rewriting the remaining content. For these rewrites, we use real-world medical scenarios as prompts and generate responses via GPT-4. We believe this ensures the diversity of the SFT dataset, which can help the CareBot better adapt to different types of medical problems and patient situations, thereby improving its performance in a variety of scenarios.\n\n## evaluation\n\nevaluation on benchmark is bellow.\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/642f6c64f945a8a5c9ee5b5d/kqvLfcFtkw6lHcHtCySLr.png)\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/642f6c64f945a8a5c9ee5b5d/UiokfV8qcYEyCWEa__820.png)\n\n\ngsb result with other medical LLMS\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/642f6c64f945a8a5c9ee5b5d/rOnnIoY9MaXPTFD_R10r1.png)\n"},"matched_bigbio_names":{"kind":"list like","value":["MEDDIALOG","MEDQA"],"string":"[\n \"MEDDIALOG\",\n \"MEDQA\"\n]"}}},{"rowIdx":2293,"cells":{"id":{"kind":"string","value":"adipanda/gojo-simpletuner-lora-1"},"author":{"kind":"string","value":"adipanda"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","flux","flux-diffusers","text-to-image","simpletuner","safe-for-work","lora","template:sd-lora","lycoris","base_model:black-forest-labs/FLUX.1-dev","base_model:adapter:black-forest-labs/FLUX.1-dev","license:other","region:us"],"string":"[\n \"diffusers\",\n \"flux\",\n \"flux-diffusers\",\n \"text-to-image\",\n \"simpletuner\",\n \"safe-for-work\",\n \"lora\",\n \"template:sd-lora\",\n \"lycoris\",\n \"base_model:black-forest-labs/FLUX.1-dev\",\n \"base_model:adapter:black-forest-labs/FLUX.1-dev\",\n \"license:other\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-06T03:40:48Z","string":"2024-10-06T03:40:48Z"},"last_modified":{"kind":"string","value":"2024-10-07T16:43:38+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: black-forest-labs/FLUX.1-dev\nlicense: other\ntags:\n- flux\n- flux-diffusers\n- text-to-image\n- diffusers\n- simpletuner\n- safe-for-work\n- lora\n- template:sd-lora\n- lycoris\ninference: true\nwidget:\n- text: unconditional (blank prompt)\n parameters:\n negative_prompt: blurry, cropped, ugly\n output:\n url: ./assets/image_0_0.png\n- text: A scene from Jujutsu Kaisen. Satoru Gojo holding a sign that says 'I LOVE\n PROMPTS!', he is standing full body on a beach at sunset. He is wearing a red\n vest, yellow sash, and a straw hat. The setting sun casts a dynamic shadow on\n his face.\n parameters:\n negative_prompt: blurry, cropped, ugly\n output:\n url: ./assets/image_1_0.png\n- text: A scene from Jujutsu Kaisen. Satoru Gojo jumping out of a propeller airplane,\n sky diving. He looks excited and his hair is blowing in the wind. The sky is clear\n and blue, there are birds pictured in the distance.\n parameters:\n negative_prompt: blurry, cropped, ugly\n output:\n url: ./assets/image_2_0.png\n- text: 'A scene from Jujutsu Kaisen. Satoru Gojo spinning a basketball on his finger\n on a basketball court. He is wearing a lakers jersey with the #12 on it. The basketball\n hoop and crowd are in the background cheering him. He is smiling.'\n parameters:\n negative_prompt: blurry, cropped, ugly\n output:\n url: ./assets/image_3_0.png\n- text: A scene from Jujutsu Kaisen. Satoru Gojo is wearing a suit in an office shaking\n the hand of a business woman. The woman has purple hair and is wearing professional\n attire. There is a Google logo in the background. It is during daytime, and the\n overall sentiment is one of accomplishment.\n parameters:\n negative_prompt: blurry, cropped, ugly\n output:\n url: ./assets/image_4_0.png\n- text: A scene from Jujutsu Kaisen. Satoru Gojo is fighting a large brown grizzly\n bear, deep in a forest. The bear is tall and standing on two legs, roaring. The\n bear is also wearing a crown because it is the king of all bears. Around them\n are tall trees and other animals watching.\n parameters:\n negative_prompt: blurry, cropped, ugly\n output:\n url: ./assets/image_5_0.png\n---\n\n# gojo-simpletuner-lora-1\n\nThis is a LyCORIS adapter derived from [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev).\n\n\nNo validation prompt was used during training.\n\n\n\n\nNone\n\n\n## Validation settings\n- CFG: `3.5`\n- CFG Rescale: `0.0`\n- Steps: `20`\n- Sampler: `None`\n- Seed: `42`\n- Resolution: `1024x1024`\n\nNote: The validation settings are not necessarily the same as the [training settings](#training-settings).\n\nYou can find some example images in the following gallery:\n\n\n\n\nThe text encoder **was not** trained.\nYou may reuse the base model text encoder for inference.\n\n\n## Training settings\n\n- Training epochs: 75\n- Training steps: 16800\n- Learning rate: 5e-05\n- Effective batch size: 8\n - Micro-batch size: 8\n - Gradient accumulation steps: 1\n - Number of GPUs: 1\n- Prediction type: flow-matching\n- Rescaled betas zero SNR: False\n- Optimizer: adamw_bf16\n- Precision: Pure BF16\n- Quantised: Yes: int8-quanto\n- Xformers: Not used\n- LyCORIS Config:\n```json\n{\n \"algo\": \"lokr\",\n \"multiplier\": 1.0,\n \"linear_dim\": 10000,\n \"linear_alpha\": 1,\n \"factor\": 12,\n \"apply_preset\": {\n \"target_module\": [\n \"Attention\",\n \"FeedForward\"\n ],\n \"module_algo_map\": {\n \"Attention\": {\n \"factor\": 12\n },\n \"FeedForward\": {\n \"factor\": 6\n }\n }\n }\n}\n```\n\n## Datasets\n\n### gojo-512\n- Repeats: 2\n- Total number of images: 291\n- Total number of aspect buckets: 1\n- Resolution: 0.262144 megapixels\n- Cropped: False\n- Crop style: None\n- Crop aspect: None\n### gojo-512-crop\n- Repeats: 2\n- Total number of images: 291\n- Total number of aspect buckets: 1\n- Resolution: 0.262144 megapixels\n- Cropped: False\n- Crop style: None\n- Crop aspect: None\n\n\n## Inference\n\n\n```python\nimport torch\nfrom diffusers import DiffusionPipeline\nfrom lycoris import create_lycoris_from_weights\n\nmodel_id = 'black-forest-labs/FLUX.1-dev'\nadapter_id = 'pytorch_lora_weights.safetensors' # you will have to download this manually\nlora_scale = 1.0\nwrapper, _ = create_lycoris_from_weights(lora_scale, adapter_id, pipeline.transformer)\nwrapper.merge_to()\n\nprompt = \"An astronaut is riding a horse through the jungles of Thailand.\"\n\npipeline.to('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu')\nimage = pipeline(\n prompt=prompt,\n num_inference_steps=20,\n generator=torch.Generator(device='cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu').manual_seed(1641421826),\n width=1024,\n height=1024,\n guidance_scale=3.5,\n).images[0]\nimage.save(\"output.png\", format=\"PNG\")\n```\n\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2294,"cells":{"id":{"kind":"string","value":"web3se/SmartBERT-v2"},"author":{"kind":"string","value":"web3se"},"task_category":{"kind":"string","value":"fill-mask"},"tags":{"kind":"list like","value":["transformers","pytorch","roberta","fill-mask","smart-contract","web3","software-engineering","embedding","codebert","en","base_model:microsoft/codebert-base-mlm","base_model:finetune:microsoft/codebert-base-mlm","license:mit","autotrain_compatible","endpoints_compatible","region:us"],"string":"[\n \"transformers\",\n \"pytorch\",\n \"roberta\",\n \"fill-mask\",\n \"smart-contract\",\n \"web3\",\n \"software-engineering\",\n \"embedding\",\n \"codebert\",\n \"en\",\n \"base_model:microsoft/codebert-base-mlm\",\n \"base_model:finetune:microsoft/codebert-base-mlm\",\n \"license:mit\",\n \"autotrain_compatible\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-25T08:58:17Z","string":"2024-10-25T08:58:17Z"},"last_modified":{"kind":"string","value":"2024-12-12T15:24:58+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model:\n- microsoft/codebert-base-mlm\nlanguage:\n- en\nlibrary_name: transformers\nlicense: mit\npipeline_tag: fill-mask\ntags:\n- fill-mask\n- smart-contract\n- web3\n- software-engineering\n- embedding\n- codebert\ninference: true\n---\n\n# SmartBERT V2 CodeBERT\n\n![SmartBERT](./framework.png)\n\n## Overview\n\nSmartBERT V2 CodeBERT is a pre-trained model, initialized with **[CodeBERT-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm)**, designed to transfer **Smart Contract** function-level code into embeddings effectively.\n\n- **Training Data:** Trained on **16,000** smart contracts.\n- **Hardware:** Utilized 2 Nvidia A100 80G GPUs.\n- **Training Duration:** More than 10 hours.\n- **Evaluation Data:** Evaluated on **4,000** smart contracts.\n\n## Preprocessing\n\nAll newline (`\\n`) and tab (`\\t`) characters in the function code were replaced with a single space to ensure consistency in the input data format.\n\n## Base Model\n\n- **Base Model**: [CodeBERT-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm)\n\n## Training Setup\n\n```python\nfrom transformers import TrainingArguments\n\ntraining_args = TrainingArguments(\n output_dir=OUTPUT_DIR,\n overwrite_output_dir=True,\n num_train_epochs=20,\n per_device_train_batch_size=64,\n save_steps=10000,\n save_total_limit=2,\n evaluation_strategy=\"steps\",\n eval_steps=10000,\n resume_from_checkpoint=checkpoint\n)\n```\n\n## How to Use\n\nTo train and deploy the SmartBERT V2 model for Web API services, please refer to our GitHub repository: [web3se-lab/SmartBERT](https://github.com/web3se-lab/SmartBERT).\n\nOr use pipline:\n\n```python\nfrom transformers import RobertaTokenizer, RobertaForMaskedLM, pipeline\n\nmodel = RobertaForMaskedLM.from_pretrained('web3se/SmartBERT-v3')\ntokenizer = RobertaTokenizer.from_pretrained('web3se/SmartBERT-v3')\n\ncode_example = \"function totalSupply() external view (uint256);\"\nfill_mask = pipeline('fill-mask', model=model, tokenizer=tokenizer)\n\noutputs = fill_mask(code_example)\nprint(outputs)\n```\n\n## Contributors\n\n- [Youwei Huang](https://www.devil.ren)\n- [Sen Fang](https://github.com/TomasAndersonFang)\n\n## Sponsors\n\n- [Institute of Intelligent Computing Technology, Suzhou, CAS](http://iict.ac.cn/)\n- CAS Mino (中科劢诺)\n"},"matched_bigbio_names":{"kind":"list like","value":["CAS"],"string":"[\n \"CAS\"\n]"}}},{"rowIdx":2295,"cells":{"id":{"kind":"string","value":"lfcc/medlink-bi-encoder"},"author":{"kind":"string","value":"lfcc"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["sentence-transformers","safetensors","bert","sentence-similarity","feature-extraction","generated_from_trainer","dataset_size:1540","loss:CosineSimilarityLoss","base_model:neuralmind/bert-base-portuguese-cased","base_model:finetune:neuralmind/bert-base-portuguese-cased","model-index","autotrain_compatible","text-embeddings-inference","endpoints_compatible","region:us"],"string":"[\n \"sentence-transformers\",\n \"safetensors\",\n \"bert\",\n \"sentence-similarity\",\n \"feature-extraction\",\n \"generated_from_trainer\",\n \"dataset_size:1540\",\n \"loss:CosineSimilarityLoss\",\n \"base_model:neuralmind/bert-base-portuguese-cased\",\n \"base_model:finetune:neuralmind/bert-base-portuguese-cased\",\n \"model-index\",\n \"autotrain_compatible\",\n \"text-embeddings-inference\",\n \"endpoints_compatible\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-28T17:36:36Z","string":"2024-10-28T17:36:36Z"},"last_modified":{"kind":"string","value":"2024-10-28T17:45:36+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":1,"string":"1"},"README":{"kind":"string","value":"---\nbase_model: neuralmind/bert-base-portuguese-cased\nlibrary_name: sentence-transformers\nmetrics:\n- pearson_cosine\n- spearman_cosine\n- pearson_manhattan\n- spearman_manhattan\n- pearson_euclidean\n- spearman_euclidean\n- pearson_dot\n- spearman_dot\n- pearson_max\n- spearman_max\npipeline_tag: sentence-similarity\ntags:\n- sentence-transformers\n- sentence-similarity\n- feature-extraction\n- generated_from_trainer\n- dataset_size:1540\n- loss:CosineSimilarityLoss\nwidget:\n- source_sentence: A ascite quilosa é uma manifestação rara com um amplo diagnóstico\n diferencial. No adulto está sobretudo associada a casos de trauma, iatrogenia,\n neoplasias, doença hepática crónica e infeções micobacterianas. Os autores descrevem\n um caso raro de ascite quilosa como forma de apresentação de pericardite constritiva.\n sentences:\n - Um derrame pleuro-pericárdico acompanhado de febre geralmente sugere uma etiologia\n infecciosa. Quando episódios recorrentes ocorrem, sem isolamento de agente microbiológico,\n deve-se suspeitar de síndrome febril periódico, sendo a Febre Mediterrânea Familiar\n a mais frequente deste grupo. Febre Mediterrânea Familiar é uma doença autossómica\n recessiva, causada por mutações no gene MEFV e caracterizada por ataques recorrentes\n de febre e serosite. Os primeiros sintomas geralmente manifestam-se antes dos\n 20 anos de idade, sendo a dor abdominal o sintoma mais frequente. Neste artigo,\n iremos apresentar um caso de polisserosite febril recidivante como uma apresentação\n incomum de Febre Mediterrânea Familiar.\n - A pericardite constritiva (PC) consiste num compromisso da função cardíaca diastólica\n causado por um pericárdio fibrótico, inflamado ou calcificado, geralmente espessado.\n Os autores apresentam um caso de doente com polisserosite, cuja extensa investigação\n diagnóstica inicial, incluindo o ecocardiograma com doppler (ED) e a tomografia\n axial computorizada (TAC), não permitiram esclarecer a etiologia dos derrames,\n tendo o doente mantido ascite refractária apesar do tratamento médico. O gradiente\n sero-ascítico de albumina ≥ 1,1g/dL, o valor de proteínas no líquido ascítico\n > 2,5g/dL, o ingurgitamento jugular, bem como os antecedentes de derrames pericárdicos,\n levantaram a suspeita de PC. O diagnóstico foi apoiado pelo ED e pela TAC subsequentes\n e confirmado por cateterismo cardíaco. Perante um doente com polisserosite, a\n investigação diagnóstica deve ser orientada pelo exame citoquímico dos líquidos\n serosos. A PC é uma causa rara de ascite recorrente e estabelecer o diagnóstico\n constitui um desafio, sendo necessário um elevado índice de suspeição.\n - A Síndrome de Felty (SF) é caracterizada pela tríade artrite reumatóide (AR),\n neutropenia e esplenomegalia. É uma manifestação extra-articular rara da AR, presente\n em menos de 3% dos doentes, sendo mais frequente em mulheres e entre a 5ª e a\n 7ª décadas de vida. Na maioria dos casos surge, pelo menos, 10 anos após o diagnóstico\n da AR e associa-se a outras manifestações extra-articulares como vasculite, serosite\n ou adenopatias. Descrevemos um caso de uma mulher de 69 anos que se apresenta\n na consulta com neutropenia grave e sem qualquer outra sintomatologia acompanhante.\n Da investigação etiológica apurou-se altos títulos de fator reumatóide e Anti-CCP,\n associados a esplenomegalia, tendo sido feito o diagnóstico de SF, como apresentação\n inaugural de AR. Descrevemos este caso para realçar a importância da exclusão\n de causa auto-imune perante um doente com neutropenia ainda que sem clínica de\n artrite ou sinovite.\n- source_sentence: Os autores apresentam o caso de uma doente, 38 anos, sem antecedentes,\n admitida para investigação de derrame pleural. Toracocentese revelou hemotórax\n com exames bacteriológico, micobacteriológico e anatomo-patológico negativos.\n TAC toraco-abdomino-pélvico sugestiva de carcinomatose peritoneal, sem identificação\n de neoplasia primária. Biópsia de lesão superficial a nível pélvico compatível\n com endometriose. Laparoscopia diagnóstica com biopsia de lesões peritoneais também\n compatíveis com endometriose. Perante anatomia patológica e reaparecimento do\n derrame com novo ciclo menstrual admitiu-se endometriose torácica, tendo iniciado\n terapêutica supressora hormonal com resolução da sintomatologia. Os autores apresentam\n o caso clínico pela raridade e desafio diagnóstico que representa. A endometriose\n pulmonar caracteriza-se por tecido endometrial no parenquima pulmonar ou pleura\n e manifesta-se por pneumotorax, hemotorax ou hemoptises cíclicas catameniais.\n Os exames complementares são inespecíficos e o diagnóstico de exclusão, tendo\n em conta a história clínica e a natureza catamenial dos sintomas. O tratamento\n consiste inicialmente na supressão hormonal podendo necessitar de cirurgia.\n sentences:\n - Mulher de 64 anos, com antecedentes de Síndrome de Sjögren primário, recorre ao\n serviço de urgência por epigastralgias, vómitos, icterícia, colúria, acolia, prurido,\n anorexia e perda ponderal com 2 semanas de evolução. Objetivamente com dor à palpação\n no hipocôndrio direito e icterícia. Ecografia abdominal com dilatação das vias\n biliares intra e extra-hepáticas e tomografia computorizada e ressonância magnética\n com globosidade da área cefálica do pâncreas, lesões nodulares renais bilaterais,\n heterogeneidade do útero, nódulo da supra-renal e micronódulos pulmonares. Foi\n realizada biopsia renal guiada por TC que revelou linfoma não Hogdkin difuso de\n células B com elevado índice proliferativo. Estudo complementado por ecoendoscopia\n e CPRE confirmou envolvimento duodenal e papilar, condicionando estenose do terço\n distal da via biliar principal. Apresentamos este caso pela forma de apresentação\n rara com icterícia obstrutiva em doente com linfoma multifocal, de envolvimento\n extranodal exclusivo. O diagnóstico precoce e estadiamento célere são fatores\n determinantes no prognóstico.\n - Os autores apresentam o caso de uma paciente com síndrome de Klippel-Trenaunay,\n um síndrome neurocutâneo raro, de etiologia não esclarecida, que se caracteriza\n pela tríade clínica de hemangiomas cutâneos, insuficiência venosa e hipertrofia\n dos tecidos moles. A dor é o sintoma mais frequente relacionada com a insuficiência\n venosa crónica do membro afectado , mas poderão surgir complicações decorrentes\n da hipertrofia óssea e do aparecimento de malformações vasculares noutros locais.\n - Numerosas terapêuticas foram propostas na síndrome de secreção inadequada de hormona\n antidiurética (SIADH) refractária à restrição hídrica e dieta hipersalina, existindo\n raros casos descritos de SIADH de origem neurológica em que foi conseguido um\n controlo a longo prazo com fenitoína. Um homem de 48 anos, raça caucasiana, com\n antecedentes de etilismo crónico e história recente de traumatismo craniano com\n fractura do rochedo temporal direito é encaminhado ao Serviço de Urgência(SU)\n por crise convulsiva não presenciada e quadro confusional. Ao exame objectivo,\n o doente apresentava-se prostrado, desorientado e com períodos de agitação, sem\n sinais de depleção de volume. O restante exame físico e neurológico não revelou\n alterações relevantes. À admissão destacavam-se, analiticamente, níveis séricos\n de sódio de 120 mEq/l e, imagiologicamente, a tomografia crânio-encefálica revelou-se\n sobreponível a estudos anteriores. Outros exames complementares realizados, no\n SU, não mostraram alterações. Durante o internamento a abordagem diagnóstica permitiu\n o diagnóstico de SIADH, como complicação de uma fractura da base do crânio. Apesar\n da instituição de restrição hídrica e dieta hipersalina, o doente manteve o quadro\n confusional e hiponatrémia refractários. Face à etiologia da SIADH iniciou-se\n terapêutica com fenitoína conseguindo-se uma melhoria mantida do quadro clínico\n e atingimento de níveis normonatrémicos.\n- source_sentence: A hiponatremia é a alteração eletrolítica mais frequente na prática\n clínica hospitalar. Sendo muitas vezes devido a perdas ou iatrogenia farmacológica.\n A insuficiência primária da supra-renal é uma causa rara deste distúrbio e está\n muitas vezes relacionada com destruição auto-imune da glândula. Esta cursa, na\n maioria das vezes, com sintomas inespecíficos e de desenvolvimento insidioso.\n Por vezes os doentes não apresentam a tríade clássica de hipotensão, hiponatrémia\n e hiperpigmentação o que torna difícil o seu diagnóstico precoce. O diagnóstico\n correto e atempado permite oferecer ao doente um tratamento simples e crucial\n para a sua sobrevivência\n sentences:\n - Homem de 67 anos, internado no Serviço de Medicina por Pneumonia. Antecedentes\n de miocardiopatia dilatada, fibrilhação auricular, hipertensão arterial, alcoolismo\n crónico (80g/dia) e caquexia. No decurso do internamento desenvolveu um quadro\n de diminuição da força muscular de forma progressiva com tetraparésia grave, atrofia\n muscular de predomínio esquerdo, espasticidade e hiperreflexia dos membros inferiores.\n Analiticamente apresentava elevação dos parâmetros de colestase hepática, ionograma\n seriado com hiponatrémia discreta 132-135mEq/L, potássio, cloro, cálcio, fósforo\n e magnésio normais. Sem défice de vitamina B12 ou ácido fólico. Tomografia Computorizada\n Crânio-Encefálica sem alterações de natureza vascular ou expansiva. Punção lombar\n com análise do líquido cefalorraquídeo sem alterações. Serologias virais e bacterianas\n negativas. Eletromiograma sem lesão nervosa periférica. Foi então pedida Ressonância\n Magnética Crânio-Encefálica e Cervical para exclusão de lesão desmielinizante\n cervical alta ou do tronco cerebral, tendo-se verificado hipersinal em T2 a nível\n da ponte característica da Mielinólise Central Pontina.\n - A Doença de Still é uma doença auto-inflamatória rara, sendo um dos diagnósticos\n diferenciais de febre de origem indeterminada. A apresentação típica inclui febre,\n rash evanescente e artrite acompanhada de valores desproporcionalmente elevados\n de ferritina. Apresentamos um caso de diagnóstico particularmente difícil numa\n mulher de 44 anos com envolvimento cutâneo, articular e pulmonar, na qual os valores\n de ferritina estavam apenas moderadamente elevados, mas a sua forma glicosilada\n significativamente reduzida. No decorrer da investigação foi identificada doença\n celíaca concomitante, com défice de ferro profundo, que apontou para uma possível\n alteração no mecanismo de produção de ferritina na presença de um estímulo inflamatório.\n Este caso sublinha a relevância da ferritina glicosilada como marcador mais fiável\n na investigação de casos onde a Doença de Still é suspeita.\n - Resumo Os linfomas que envolvem o colo do útero são muito raros. Relatamos o caso\n de uma mulher de 71 anos apresentando sintomas de diverticulite, com vários achados\n imagiológicos incidentais sugerindo uma doença linfoproliferativa e uma grande\n massa no colo do útero. A biópsia profunda do colo do útero diagnosticou um linfoma\n difuso de grandes células B envolvendo o colo do útero, provável transformação\n de um linfoma de zona marginal. A doente está atualmente em tratamento com rituximab,\n ciclofosfamida, doxorrubicina, vincristina e predisolona e metotrexato em altas\n doses para profilaxia de envolvimento do sistema nervoso central. Para diagnosticar\n com precisão um linfoma não-Hodgkin do colo do útero, a equipa médica deve estar\n atenta a esta hipótese diagnóstica clínica, a fim de proporcionar as melhores\n condições para a investigação, como biópsia profunda do colo do útero e estudos\n histológicos e imuno-histoquímicos da amostra.\n- source_sentence: A Arterite de Takayasu é uma doença inflamatória crónica dos grandes\n vasos, que envolve a artéria aorta e os seus ramos principais, e afecta predominantemente\n mulheres com idade inferior a 40 anos. A clínica é inespecífica e varia com o\n local anatómico envolvido, pelo que é necessário um elevado índice de suspeição\n clínica para que seja realizado o seu diagnóstico. O acidente vascular cerebral\n tem uma prevalência de cerca de 10 a 20% no decurso da doença e influencia de\n forma negativa o seu prognóstico. O acidente vascular cerebral hemorrágico como\n manifestação da Arterite de Takayasu é raro. Apresentamos o caso de uma doente\n jovem que se apresenta com uma hemorragia cerebral, cuja investigação etiológica\n culminou no diagnóstico de Arterite de Takayasu. A importância desde caso clínico\n prende-se com a escassez de casos publicados na literatura, uma vez que retrata\n uma patologia rara, com uma apresentação inicial invulgar.\n sentences:\n - Resumo Aproximadamente 5%-10% dos acidentes vasculares cerebrais (AVC) criptogénicos\n têm uma neoplasia subjacente. A parésia do nervo abducente em doentes com neoplasia\n encontra-se geralmente relacionada com compressão tumoral, hipertensão intracraniana\n ou metastização. Os autores reportam um caso de um doente com 65 anoscom AVC multiterritório\n que se apresentou com uma parésia do sexto nervo unilateral e isolada cuja etiologia\n foi extensamente estudada. Admitiu-se o diagnóstico final de síndrome paraneoplásico,\n que foi a apresentação inicial de um carcinoma gástrico oculto provavelmente relacionado\n com a hipercoagulabilidade associada à malignidade. Este caso enfatiza a importância\n de considerar um estudoadicional em casos selecionados de AVC criptogénico ou\n parésia do abducente.\n - As encefalites virais são entidades raras, mas que, pelas suas implicações diagnósticas,\n terapêuticas e prognósticas, não podem deixar de ser consideradas em qualquer\n doente que se apresente com sintomas psiquiátricos, alteração do estado de consciência,\n convulsões ou coma sem causa evidente. O presente caso diz respeito a um doente\n com sintomas psicóticos e um estado confusional com duas semanas de evolução.\n À admissão, apresentava-se subfebril, com flutuação do nível de consciência. O\n estudo analítico e TAC crânio-encefálica não mostraram alterações de relevo, tendo\n realizado punção lombar cujo exame citoquímico e exame bacteriológico se mostravam\n igualmente inalterados. Por suspeita mantida de encefalite viral e não sendo possível\n excluir causa herpética, foi iniciada terapêutica empírica com aciclovir. A PCR\n do vírus Epstein-Barr (EBV) no líquor foi positiva, permitindo assim o diagnóstico\n raro de uma encefalite a EBV num doente idoso e imunocompetente, tendo-se verificado\n resolução completa do quadro clínico.\n - A abordagem da febre é sem dúvida uma das artes da Medicina. A doença de Still\n no adulto (DSA) é uma patologia inflamatória sistémica de baixa incidência e etiologia\n desconhecida. Pela inespecificidade clínica e laboratorial, é um diagnóstico de\n exclusão. Os autores descrevem o caso de homem de 32 anos com a tríade de febre,\n oligoartralgia e exantema cutâneo evanescente, cuja marcha diagnóstica minuciosa\n culminou no diagnóstico de DSA, apresentando hiperferritinémia sérica dez vezes\n superior ao normal. Relembra-se a importância da DSA como causa de síndrome febril\n arrastado, cujo diagnóstico, atendendo à ausência de marcadores patognomónicos,\n pode passar despercebido.\n- source_sentence: A síndrome da Secreção Inapropriada da Hormona Antidiurética (SIADH)\n é uma das causas de hiponatremia euvolémica. A hidrocefalia de pressão normal\n (HPN) pode ser uma causa neurológica para SIADH e o seu diagnóstico e correção\n são fundamentais para a normalização dos níveis de sódio. Relatamos o caso de\n uma mulher de 67 anos, com hiponatremia crónica, marcha de base alargada, urgência\n miccional e sensação de perda de memória, sem evidência de sobrecarga hídrica\n ou desidratação. O estudo complementar revelou osmolaridade sérica normal, osmolaridade\n urinária elevada, sódio urinário elevado. Após restrição hídrica, houve melhoria\n da hiponatremia. Imagiologicamente documentou-se presença de membrana aqueductal\n causando obstrução ao fluxo do líquido cefalorraquidiano. O diagnóstico de SIADH\n em contexto de HPN foi presumido. Após correção cirúrgica houve resolução completa\n da hiponatremia. Hoje sabe-se que existem formas secundárias raras de HPN, sendo\n estas causadas por estenose ou obstrução aqueductal, como relatado no caso apresentado.\n sentences:\n - Define-se lesão hepática induzida por um fármaco como uma lesão hepática que,\n após exclusão de outras potenciais etiologias, se assume como secundária a um\n fármaco, produto de ervanária ou xenobiótico, e que resulta em alterações da enzimologia\n hepática ou disfunção hepática clinicamente evidente. Os autores descrevem o caso\n de um homem de 87 anos internado para estudo etiológico de uma lesão hepática\n de padrão colestático. Após estudo alargado, foi colocada como hipótese etiológica\n mais provável uma iatrogenia farmacológica, posteriormente corroborada por biópsia\n hepática, sendo a Espironolactona assumida como o agente causal mais provável,\n atendendo ao quadro clínico e aos achados histopatológicos. Estão descritos alguns\n casos de lesão hepática induzida pela Espironolactona, quando usada em doses de\n 50 e 100 mg/dia. Os autores relatam um caso raro que ocorreu num doente que se\n encontrava sob Espironolactona na dose de 25 mg/dia.\n - Resumo A ceftriaxona, um dos antibióticos mais frequentementeutilizados na prática\n clínica, tem como efeito adverso, raro epotencialmente grave, a agranulocitose.\n Reportamos um caso de uma mulher de 85 anos em esquema terapêutico prolongado\n com ceftriaxona para endocardite por Streptococcus bovis, que desenvolve agranulocitose\n ao 25º dia de antibioterapia, com nadir de contagem absoluta de neutrófilos de\n 0/uL. Outras causas potenciais foram excluídas. A terapêutica antibiótica foi\n alterada para amoxicilina/ácido clavulânico e realizou ciclo de fator estimulador\n de colónias de granulócitos, com resolução da neutropenia após 3 dias. Queremos\n destacar este efeito adverso raro com o uso prolongado da ceftriaxona,salientando\n a necessidade de monitorização regulardas contagens de leucócitos. O tratamento\n desta condiçãopassa pela suspensão do agente causal e o uso transitório de factor\n estimulador de colónias de granulócitos até resolução da neutropenia.\n - A síndrome de secreção inapropriada da hormona anti-diurética (SIADH) é uma causa\n frequente de hiponatrémia, sendo um diagnóstico de exclusão. Quando associada\n à infeção pelo vírus varicella zoster é mais frequente na sua forma disseminada.\n Os autores descrevem o caso de uma mulher de 83 anos, com quadro com 7 dias de\n evolução de síndrome confusional flutuante, desorientação temporo-espacial e tonturas.\n Medicada com brivudina, aciclovir tópico e ofloxacina gotas para tratamento de\n herpes zóster com atingimento dos ramos oftálmico e mandibular do nervo trigémeo.\n À admissão, com hiponatrémia de 128mmol/L. Excluídas outras causas, assumiu-se\n o diagnóstico de SIADH associado a infeção por herpes. O caso descrito sugere\n uma relação causal entre a reactivação por VZV e a SIADH sintomática. A favor,\n temos a resolução completa da hiponatrémia a acompanhar a melhoria clínica. O\n presente caso torna-se importante por se tratar de uma entidade rara, pouco conhecida\n e subdiagnosticada, mas com efeitos clínicos importantes.\nmodel-index:\n- name: SentenceTransformer based on neuralmind/bert-base-portuguese-cased\n results:\n - task:\n type: semantic-similarity\n name: Semantic Similarity\n dataset:\n name: Unknown\n type: unknown\n metrics:\n - type: pearson_cosine\n value: 0.6875234896564695\n name: Pearson Cosine\n - type: spearman_cosine\n value: 0.6855542083017127\n name: Spearman Cosine\n - type: pearson_manhattan\n value: 0.6475708379913874\n name: Pearson Manhattan\n - type: spearman_manhattan\n value: 0.6531511386527615\n name: Spearman Manhattan\n - type: pearson_euclidean\n value: 0.6497495499262932\n name: Pearson Euclidean\n - type: spearman_euclidean\n value: 0.6545105043371998\n name: Spearman Euclidean\n - type: pearson_dot\n value: 0.6790094551137061\n name: Pearson Dot\n - type: spearman_dot\n value: 0.6847710424836908\n name: Spearman Dot\n - type: pearson_max\n value: 0.6875234896564695\n name: Pearson Max\n - type: spearman_max\n value: 0.6855542083017127\n name: Spearman Max\n - task:\n type: semantic-similarity\n name: Semantic Similarity\n dataset:\n name: sts test\n type: sts-test\n metrics:\n - type: pearson_cosine\n value: 0.6907882980083289\n name: Pearson Cosine\n - type: spearman_cosine\n value: 0.6894513736041122\n name: Spearman Cosine\n - type: pearson_manhattan\n value: 0.6492706768297136\n name: Pearson Manhattan\n - type: spearman_manhattan\n value: 0.6546984498682096\n name: Spearman Manhattan\n - type: pearson_euclidean\n value: 0.651318699091458\n name: Pearson Euclidean\n - type: spearman_euclidean\n value: 0.6544106471290732\n name: Spearman Euclidean\n - type: pearson_dot\n value: 0.6817298567055641\n name: Pearson Dot\n - type: spearman_dot\n value: 0.6881836625714188\n name: Spearman Dot\n - type: pearson_max\n value: 0.6907882980083289\n name: Pearson Max\n - type: spearman_max\n value: 0.6894513736041122\n name: Spearman Max\n - type: pearson_cosine\n value: 0.6907882980083289\n name: Pearson Cosine\n - type: spearman_cosine\n value: 0.6894513736041122\n name: Spearman Cosine\n - type: pearson_manhattan\n value: 0.6492706768297136\n name: Pearson Manhattan\n - type: spearman_manhattan\n value: 0.6546984498682096\n name: Spearman Manhattan\n - type: pearson_euclidean\n value: 0.651318699091458\n name: Pearson Euclidean\n - type: spearman_euclidean\n value: 0.6544106471290732\n name: Spearman Euclidean\n - type: pearson_dot\n value: 0.6817298567055641\n name: Pearson Dot\n - type: spearman_dot\n value: 0.6881836625714188\n name: Spearman Dot\n - type: pearson_max\n value: 0.6907882980083289\n name: Pearson Max\n - type: spearman_max\n value: 0.6894513736041122\n name: Spearman Max\n---\n\n# SentenceTransformer based on neuralmind/bert-base-portuguese-cased\n\nThis is a [sentence-transformers](https://www.SBERT.net) model finetuned from [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more.\n\n## Model Details\n\n### Model Description\n- **Model Type:** Sentence Transformer\n- **Base model:** [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) \n- **Maximum Sequence Length:** 512 tokens\n- **Output Dimensionality:** 768 tokens\n- **Similarity Function:** Cosine Similarity\n\n\n\n\n\n### Full Model Architecture\n\n```\nSentenceTransformer(\n (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel \n (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True})\n)\n```\n\n## Usage\n\n### Direct Usage (Sentence Transformers)\n\nFirst install the Sentence Transformers library:\n\n```bash\npip install -U sentence-transformers\n```\n\nThen you can load this model and run inference.\n```python\nfrom sentence_transformers import SentenceTransformer\n\n# Download from the 🤗 Hub\nmodel = SentenceTransformer(\"lfcc/medlink-bi-encoder\")\n# Run inference\nsentences = [\n 'A síndrome da Secreção Inapropriada da Hormona Antidiurética (SIADH) é uma das causas de hiponatremia euvolémica. A hidrocefalia de pressão normal (HPN) pode ser uma causa neurológica para SIADH e o seu diagnóstico e correção são fundamentais para a normalização dos níveis de sódio. Relatamos o caso de uma mulher de 67 anos, com hiponatremia crónica, marcha de base alargada, urgência miccional e sensação de perda de memória, sem evidência de sobrecarga hídrica ou desidratação. O estudo complementar revelou osmolaridade sérica normal, osmolaridade urinária elevada, sódio urinário elevado. Após restrição hídrica, houve melhoria da hiponatremia. Imagiologicamente documentou-se presença de membrana aqueductal causando obstrução ao fluxo do líquido cefalorraquidiano. O diagnóstico de SIADH em contexto de HPN foi presumido. Após correção cirúrgica houve resolução completa da hiponatremia. Hoje sabe-se que existem formas secundárias raras de HPN, sendo estas causadas por estenose ou obstrução aqueductal, como relatado no caso apresentado.',\n 'A síndrome de secreção inapropriada da hormona anti-diurética (SIADH) é uma causa frequente de hiponatrémia, sendo um diagnóstico de exclusão. Quando associada à infeção pelo vírus varicella zoster é mais frequente na sua forma disseminada. Os autores descrevem o caso de uma mulher de 83 anos, com quadro com 7 dias de evolução de síndrome confusional flutuante, desorientação temporo-espacial e tonturas. Medicada com brivudina, aciclovir tópico e ofloxacina gotas para tratamento de herpes zóster com atingimento dos ramos oftálmico e mandibular do nervo trigémeo. À admissão, com hiponatrémia de 128mmol/L. Excluídas outras causas, assumiu-se o diagnóstico de SIADH associado a infeção por herpes. O caso descrito sugere uma relação causal entre a reactivação por VZV e a SIADH sintomática. A favor, temos a resolução completa da hiponatrémia a acompanhar a melhoria clínica. O presente caso torna-se importante por se tratar de uma entidade rara, pouco conhecida e subdiagnosticada, mas com efeitos clínicos importantes.',\n 'Resumo A ceftriaxona, um dos antibióticos mais frequentementeutilizados na prática clínica, tem como efeito adverso, raro epotencialmente grave, a agranulocitose. Reportamos um caso de uma mulher de 85 anos em esquema terapêutico prolongado com ceftriaxona para endocardite por Streptococcus bovis, que desenvolve agranulocitose ao 25º dia de antibioterapia, com nadir de contagem absoluta de neutrófilos de 0/uL. Outras causas potenciais foram excluídas. A terapêutica antibiótica foi alterada para amoxicilina/ácido clavulânico e realizou ciclo de fator estimulador de colónias de granulócitos, com resolução da neutropenia após 3 dias. Queremos destacar este efeito adverso raro com o uso prolongado da ceftriaxona,salientando a necessidade de monitorização regulardas contagens de leucócitos. O tratamento desta condiçãopassa pela suspensão do agente causal e o uso transitório de factor estimulador de colónias de granulócitos até resolução da neutropenia.',\n]\nembeddings = model.encode(sentences)\nprint(embeddings.shape)\n# [3, 768]\n\n# Get the similarity scores for the embeddings\nsimilarities = model.similarity(embeddings, embeddings)\nprint(similarities.shape)\n# [3, 3]\n```\n\n\n\n\n\n\n\n## Evaluation\n\n### Metrics\n\n#### Semantic Similarity\n\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| pearson_cosine | 0.6875 |\n| **spearman_cosine** | **0.6856** |\n| pearson_manhattan | 0.6476 |\n| spearman_manhattan | 0.6532 |\n| pearson_euclidean | 0.6497 |\n| spearman_euclidean | 0.6545 |\n| pearson_dot | 0.679 |\n| spearman_dot | 0.6848 |\n| pearson_max | 0.6875 |\n| spearman_max | 0.6856 |\n\n#### Semantic Similarity\n* Dataset: `sts-test`\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| pearson_cosine | 0.6908 |\n| **spearman_cosine** | **0.6895** |\n| pearson_manhattan | 0.6493 |\n| spearman_manhattan | 0.6547 |\n| pearson_euclidean | 0.6513 |\n| spearman_euclidean | 0.6544 |\n| pearson_dot | 0.6817 |\n| spearman_dot | 0.6882 |\n| pearson_max | 0.6908 |\n| spearman_max | 0.6895 |\n\n#### Semantic Similarity\n* Dataset: `sts-test`\n* Evaluated with [EmbeddingSimilarityEvaluator](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator)\n\n| Metric | Value |\n|:--------------------|:-----------|\n| pearson_cosine | 0.6908 |\n| **spearman_cosine** | **0.6895** |\n| pearson_manhattan | 0.6493 |\n| spearman_manhattan | 0.6547 |\n| pearson_euclidean | 0.6513 |\n| spearman_euclidean | 0.6544 |\n| pearson_dot | 0.6817 |\n| spearman_dot | 0.6882 |\n| pearson_max | 0.6908 |\n| spearman_max | 0.6895 |\n\n\n\n\n\n## Training Details\n\n### Training Dataset\n\n#### spmi_dataset\n\n\n* Size: 1,540 training samples\n* Columns: abstract1, abstract2, and score\n* Approximate statistics based on the first 1000 samples:\n | | abstract1 | abstract2 | score |\n |:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:---------------------------------------------------------------|\n | type | string | string | float |\n | details |
  • min: 8 tokens
  • mean: 189.72 tokens
  • max: 512 tokens
|
  • min: 8 tokens
  • mean: 211.52 tokens
  • max: 512 tokens
|
  • min: 0.0
  • mean: 0.33
  • max: 1.0
|\n* Samples:\n | abstract1 | abstract2 | score |\n |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------|\n | A dissecção aórtica aguda é uma emergência cardiovascular potencialmente fatal. É necessário um elevado grau de suspeição clínica para o seu diagnóstico, pois apresenta sintomas inespecíficos e mimetiza outras patologias. A maioria dos doentes tem dor torácica severa, com irradiação posterior e início abrupto, porém alguns são assintomáticos ou têm apresentações atípicas (cerca de 10%), que levam a diagnósticos tardios e a um pior prognóstico. A taxa de mortalidade é elevada, sendo superior a 50% se não for tratada. Apresenta-se o caso de um homem de 43 anos, admitido no serviço de urgência por dispneia de início súbito, sem dor torácica, uma apresentação rara de dissecção aórtica, com o objetivo de alertar para os fatores de risco e alterações do exame físico e nos exames auxiliares de diagnóstico da avaliação inicial que podem levantar a suspeita clínica e o diagnóstico precoce. | Resumo O baço possui funções imunológicas e hematológicas importantes. A esplenectomia está indicada na esferocitose hereditária, doença em que os eritrócitos são destruídos no baço por defeitos estruturais. Doentes esplenectomizados apresentam risco aumentado de infeção e de infeção fulminante pós-esplenectomia, que se caracteriza por um quadro inicial de febre, mialgias, cefaleia e vómitos. As bactérias Capnocytophaga colonizam a mucosa oral, podendo causar infeções oportunistas em doentes esplenectomizados. Os autores identificam o caso de um doente de 38 anos, esplenectomizado, que recorreu ao Serviço de Urgência por febre, vómitos e mialgias. As hemoculturas mostraram o crescimento de Capnocytophaga spp. Apesar das medidas instituídas, o doente evoluiu rapidamente para choque séptico, culminando na sua morte. Os autores pretendem alertar para esta condição rara associada a alta mortalidade, com o objetivo de aumentar a sobrevivência destes doentes, através da identificação e intervenção imediatas. | 0.0 |\n | A complexidade das doenças auto-imunes, caracterizadas por uma marcada heterogeneidade fenotípica e imunológica, tem o seu paradigma na sobreposição de perfis de auto-anticorpos e de manifestações clínicas de diferentes doenças num mesmo indivíduo. Os autores descrevem o caso de uma doente que, ao longo de doze anos de evolução de doença, cumpre critérios de classificação de quatro doenças auto-imunes diferentes, nomeadamente, Lúpus Eritematoso Sistémico, Esclerose Sistémica, Síndrome de Sjogrën e Colangite Biliar Primária. A sobreposição de perfis de auto-anticorpos, bem como de distintos fenótipos de diferentes doenças representam um desafio no diagnóstico, seguimento e tratamento destes doentes. | A esclerose sistémica (ES) é uma doença autoimune que pode afetar qualquer faixa etária, sendo pouco frequente após os 65 anos. O início da doença em idade geriátrica apresenta um fenótipo com diferentes aspetos quanto às manifestações clinicas, envolvimento orgânico e prognóstico. Descrevemos um caso clínico invulgar de uma doente com diagnóstico de ES estabelecido aos 87 anos, apresentando como manifestação inicial poliartralgias inflamatórias das mãos. O diagnóstico nesta faixa etária é particularmente desafiador, tendo sido estabelecido clinicamente e complementado com o resultado da capilaroscopia, apesar da doente apresentar auto-anticorpos específicos negativos. A doente realizou estudo do envolvimento visceral baseado em sintomas. Apesar da literatura descrever maior envolvimento orgânico na ES de inicio em idade avançada, a nossa doente não demonstrou marcado compromisso orgânico. A multidisciplinaridade envolvendo a Medicina Interna, a Reumatologia e a Fisiatria permitiram elaborar um plano terapêutico adequado, apresentando evolução clínica e funcional favorável. | 0.65 |\n | As enteropatias perdedoras de proteínas (EPP) caracterizam-se por uma perda proteica excessiva a nível do trato digestivo, podendo condicionar hipoproteinémia, edemas, bem como uma predisposição aumentada a infeções.1 As causas mais frequentes são a obstrução linfática, patologias gástricas, intestinais ou cardíacas. Neste caso clínico é descrito uma etiologia incomum de EPP, a pericardite constritiva (PC).2 Trata-se de um homem de 54 anos, com múltiplos internamentos por edemas generalizados e erisipelas de repetição, cuja investigação etiológica revelou uma EPP, causada por PC. | Resumo A enteropatia perdedora de proteínas (EPP) caracteriza-se pela presença de edema generalizado e hipoalbuminemiagrave, secundários à perda proteica através do trato gastrointestinal. Os autores reportam um caso de enteropatia perdedora de proteínas secundária a lupus eritematoso sistémico (LES), como a manifestação inicial desta doença. A doente relatava um quadro pautado por 4 meses de diarreia aquosa, não sanguinolenta, (com um máximo de 10 dejeções diárias), e perda ponderal significativa. Posteriormente desenvolveu marcado edema periférico e rash cutâneo malar e maculopapular ao nível do tórax e membros. Analiticamente apresentava anemia, hipoalbuminemia grave, hipocaliémia e hipomagnesémia. No decurso da investigação foram excluídas proteinúria eoutras causas de hipoalbuminemia. Após resultados como a pesquisa de anticorpos anti-nucleares e anti-ribonucleoproteinas positiva foi assumido o diagnóstico de EPP secundária ao LES. A doente foi tratada com pulsos de Metilprednisolona 1000 mg/dia durante 3 dias, seguido de prednisolona 1 mg/kg/dia, com boa resposta clínica. Após 20 dias, foi adicionada Azatioprina e iniciado o desmame de corticoides. O presente caso clínico destaca uma EPP como forma deapresentação do LES, cujo diagnóstico pode passar despercebido, tendo em conta a sua raridade, e acarretar um aumento da morbilidade e mortalidade. | 0.65 |\n* Loss: [CosineSimilarityLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters:\n ```json\n {\n \"loss_fct\": \"torch.nn.modules.loss.MSELoss\"\n }\n ```\n\n### Evaluation Dataset\n\n#### spmi_dataset\n\n\n* Size: 386 evaluation samples\n* Columns: abstract1, abstract2, and score\n* Approximate statistics based on the first 386 samples:\n | | abstract1 | abstract2 | score |\n |:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:----------------------------------------------------------------|\n | type | string | string | float |\n | details |
  • min: 9 tokens
  • mean: 193.97 tokens
  • max: 512 tokens
|
  • min: 8 tokens
  • mean: 203.56 tokens
  • max: 512 tokens
|
  • min: 0.0
  • mean: 0.33
  • max: 0.95
|\n* Samples:\n | abstract1 | abstract2 | score |\n |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------|\n | Resumo A síndrome de lise tumoral é a uma emergência médica potencialmente fatal decorrente da lise celular maciça que ocorre em neoplasias malignas com grande carga tumoral. Ocorre sobretudo em neoplasias hematológicas sob quimioterapia, sendo menos frequente em tumores sólidos, os quais apresentam geralmente um menor índice proliferativo. A síndrome de lise tumoral no carcinoma hepatocelular tratado com sorafenib, um inibidor oral multicinase, é extremamente rara, descrevendo-se apenas nove casos na literatura. Tanto quanto sabemos, não existem casos descritos na população europeia. Apresentamos um caso de síndrome de lise tumoral num doente com carcinoma hepatocelular multifocal sob tratamento com sorafenib e infeção SARS-CoV-2. | Resumo A púrpura trombocitopénica imune (PTI) é uma condição autoimune na qual anticorpos patogénicos se ligam às plaquetas, acelerando sua eliminação da circulação. Este caso é sobre uma mulher de 65 anos com fadiga, mialgias e púrpura cutânea localizada nas pernas, com início de sinais e sintomas 2 dias após vacinação com vacina SARS-CoV-2 da Moderna®. Um mês antes, a contagem de plaquetas era de 157x10^9/L. À admissão, a contagem de plaquetas era de 5x10^9/L, com trombocitopénia grave confirmada em esfregaço de sangue periférico. Recebeu prednisolona 1 mg/kg/dia. Após 7 dias, a contagem de plaquetas era de 45x10^9/L com resolução dos sintomas. Estudo de autoimunidade, hormonas tiroideias, coagulação, eletroforese de proteínas e testes sorológicos foram normais. Considerou-se provável relação causa-efeito da vacinação e aparecimento da clínica. O INFARMED considerou provável a relação com a vacina Moderna®, tratando-se do primeiro caso em Portugal. | 0.85 |\n | A cetoacidose diabética euglicemica (CADEu) é uma complicação potencialmente fatal da diabetes mellitus (DM), associada à medicação com inibidores do cotransportador sódio-glucose 2 (iSGLT2). Pode ser difícil de identificar devido à ausência de hiperglicemia. Homem com DM tipo 2, 71 anos, medicado com empagliflozina recorreu ao serviço de urgência por mal-estar geral e anúria. Estava prostrado, confuso, hipotenso, com respiração de Kussmaul. Analiticamente apresentou leucocitose, PCR de 202mg/dl, acidose metabólica grave com aumento do hiato aniónico, glicémia de 141 mg/dL e leucocitúria. Estes resultados poderiam ter sido interpretados no contexto infecioso urinário grave. Após consideração dos antecedentes medicamentosos e achados clínicos foi verificada uma cetonemia indoseavelmente alta que estabeleceu o diagnóstico de CADEu e permitiu início do tratamento dirigido com resolução da clínica. Os doentes medicados com iSGLT2 com doença aguda devem beneficiar de gasimetria arterial e medição da cetonemia de forma a garantir um diagnóstico precoce e tratamento atempado. | A sarcoidose é uma doença inflamatória sistémica caracterizada pela formação de granulomas não caseosos. Múltiplas podem ser as suas formas de manifestação clínica, sendo a síndroma de Heerfort-Waldenstrom uma forma de manifestação rara, encontrada em apenas 0.3% dos casos e caracterizada pelo aparecimento de parésia facial, tumefação parotídea, uveíte anterior e febre. Por vezes cursa com formas incompletas como no caso que descrevemos de uma mulher de 50 anos, sem antecedentes patológicos de relevo, que se apresenta com parésia e hipostesia da hemiface esquerda e disfagia para sólidos, tendo sido diagnosticada uma parésia facial periférica esquerda com exclusão imagiológica de evento neurológico vascular agudo. Foi medicada com deflazacorte e brivudina com melhoria da sintomatologia. Após término da corticoterapia retoma o quadro de disfagia, agora para sólidos e líquidos, parésia e hipostesia da hemiface direita com documentação ao exame objectivo de parésia facial periférica direita e hipertrofia parotídea bilateral. Analiticamente apresentava elevação sérica da enzima de conversão da angiotensina de 72.5U/L. A ressonância magnética cerebral demonstrava pequenas áreas de hipersinal em T2 na substância branca subcortical frontal, parietal direita, temporal esquerda e na transição caloso septal à esquerda, com líquor sem alterações citoquímicas. A TC toracoabdominopélvica mostrava múltiplas adenomegalias mediastínicas e hilares. A biópsia de um gânglio retro-auricular com retalhos de glândula salivar (parótida) evidenciava um processo inflamatório granulomatoso sem necrose caseosa, com imunofenotipagem sem alterações. O lavado broncoalveolar revelou linfocitose intensa e relação CD4/CD8 elevada (9.4). Foi iniciada corticoterapia e fisioterapia com melhoria da parésia facial e da clínica orofaríngea, sem recorrência. Relatamos assim um caso de neurosarcoidose sob a forma incompleta, pela ausência de atingimento ocular, de síndroma de Heefort-Waldenstrom. | 0.0 |\n | A hipertrofia ventricular esquerda no adulto, achado frequente e muitas vezes fortuito, pode dever-se a condições de sobrecarga de pressão ventricular, hipertrofia dos miócitos de causa genética ou acumulação patológica de substâncias intra ou extra-celulares. As implicações terapêuticas e prognósticas das várias etiologias são muito distintas pelo que se torna essencial a busca do diagnóstico específico. Apresenta-se um caso de hipertrofia ventricular esquerda assintomática que após uma marcha diagnóstica sistemática se revelou como miocardiopatia hipertrófica sarcomérica de início tardio. Por vários dos exames complementares de diagnóstico terem sido equívocos ou inconclusivos, é um caso demonstrativo de que, por vezes, só a abordagem completa e exaustiva permite chegar ao diagnóstico definitivo. Partindo de um exemplo real e tendo por base as recomendações da Sociedade Europeia de Cardiologia, esquematizou-se uma abordagem diagnóstica faseada desta patologia. | A síndrome Mounier-Kuhn é uma doença rara, caracterizada pela dilatação marcada da traqueia e brônquios, sem etiologia completamente esclarecida. Descrevemos o caso clínico de um homem de 48 anos de idade, com história prévia de infeções respiratórias de repetição de longa data, admitido no serviço de urgência com clínica compatível com nova infeção respiratória e elevação de parâmetros inflamatórios. A tomografia computorizada revelou achados sugestivos da síndrome em questão. O diagnóstico da Síndrome Mounier-Kuhn passa frequentemente despercebido sendo muitas vezes confundido com outras entidades. O seu diagnóstico é com frequência acidental e os exames radiológicos assumem um papel indispensável. O tratamento desta entidade é essencialmente de suporte. | 0.0 |\n* Loss: [CosineSimilarityLoss](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters:\n ```json\n {\n \"loss_fct\": \"torch.nn.modules.loss.MSELoss\"\n }\n ```\n\n### Training Hyperparameters\n#### Non-Default Hyperparameters\n\n- `eval_strategy`: steps\n- `num_train_epochs`: 10\n- `warmup_ratio`: 0.1\n- `fp16`: True\n- `load_best_model_at_end`: True\n\n#### All Hyperparameters\n
Click to expand\n\n- `overwrite_output_dir`: False\n- `do_predict`: False\n- `eval_strategy`: steps\n- `prediction_loss_only`: True\n- `per_device_train_batch_size`: 8\n- `per_device_eval_batch_size`: 8\n- `per_gpu_train_batch_size`: None\n- `per_gpu_eval_batch_size`: None\n- `gradient_accumulation_steps`: 1\n- `eval_accumulation_steps`: None\n- `learning_rate`: 5e-05\n- `weight_decay`: 0.0\n- `adam_beta1`: 0.9\n- `adam_beta2`: 0.999\n- `adam_epsilon`: 1e-08\n- `max_grad_norm`: 1.0\n- `num_train_epochs`: 10\n- `max_steps`: -1\n- `lr_scheduler_type`: linear\n- `lr_scheduler_kwargs`: {}\n- `warmup_ratio`: 0.1\n- `warmup_steps`: 0\n- `log_level`: passive\n- `log_level_replica`: warning\n- `log_on_each_node`: True\n- `logging_nan_inf_filter`: True\n- `save_safetensors`: True\n- `save_on_each_node`: False\n- `save_only_model`: False\n- `restore_callback_states_from_checkpoint`: False\n- `no_cuda`: False\n- `use_cpu`: False\n- `use_mps_device`: False\n- `seed`: 42\n- `data_seed`: None\n- `jit_mode_eval`: False\n- `use_ipex`: False\n- `bf16`: False\n- `fp16`: True\n- `fp16_opt_level`: O1\n- `half_precision_backend`: auto\n- `bf16_full_eval`: False\n- `fp16_full_eval`: False\n- `tf32`: None\n- `local_rank`: 0\n- `ddp_backend`: None\n- `tpu_num_cores`: None\n- `tpu_metrics_debug`: False\n- `debug`: []\n- `dataloader_drop_last`: False\n- `dataloader_num_workers`: 0\n- `dataloader_prefetch_factor`: None\n- `past_index`: -1\n- `disable_tqdm`: False\n- `remove_unused_columns`: True\n- `label_names`: None\n- `load_best_model_at_end`: True\n- `ignore_data_skip`: False\n- `fsdp`: []\n- `fsdp_min_num_params`: 0\n- `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False}\n- `fsdp_transformer_layer_cls_to_wrap`: None\n- `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None}\n- `deepspeed`: None\n- `label_smoothing_factor`: 0.0\n- `optim`: adamw_torch\n- `optim_args`: None\n- `adafactor`: False\n- `group_by_length`: False\n- `length_column_name`: length\n- `ddp_find_unused_parameters`: None\n- `ddp_bucket_cap_mb`: None\n- `ddp_broadcast_buffers`: False\n- `dataloader_pin_memory`: True\n- `dataloader_persistent_workers`: False\n- `skip_memory_metrics`: True\n- `use_legacy_prediction_loop`: False\n- `push_to_hub`: False\n- `resume_from_checkpoint`: None\n- `hub_model_id`: None\n- `hub_strategy`: every_save\n- `hub_private_repo`: False\n- `hub_always_push`: False\n- `gradient_checkpointing`: False\n- `gradient_checkpointing_kwargs`: None\n- `include_inputs_for_metrics`: False\n- `eval_do_concat_batches`: True\n- `fp16_backend`: auto\n- `push_to_hub_model_id`: None\n- `push_to_hub_organization`: None\n- `mp_parameters`: \n- `auto_find_batch_size`: False\n- `full_determinism`: False\n- `torchdynamo`: None\n- `ray_scope`: last\n- `ddp_timeout`: 1800\n- `torch_compile`: False\n- `torch_compile_backend`: None\n- `torch_compile_mode`: None\n- `dispatch_batches`: None\n- `split_batches`: None\n- `include_tokens_per_second`: False\n- `include_num_input_tokens_seen`: False\n- `neftune_noise_alpha`: None\n- `optim_target_modules`: None\n- `batch_eval_metrics`: False\n- `batch_sampler`: batch_sampler\n- `multi_dataset_batch_sampler`: proportional\n\n
\n\n### Training Logs\n| Epoch | Step | Training Loss | Validation Loss | spearman_cosine | sts-test_spearman_cosine |\n|:----------:|:--------:|:-------------:|:---------------:|:---------------:|:------------------------:|\n| 0.5181 | 100 | 0.1677 | 0.1109 | 0.3495 | - |\n| 1.0363 | 200 | 0.0986 | 0.1124 | 0.3727 | - |\n| 1.5544 | 300 | 0.0742 | 0.1074 | 0.4131 | - |\n| 2.0725 | 400 | 0.068 | 0.0850 | 0.5223 | - |\n| 2.5907 | 500 | 0.0411 | 0.0816 | 0.5471 | - |\n| 3.1088 | 600 | 0.035 | 0.0766 | 0.5903 | - |\n| 3.6269 | 700 | 0.0197 | 0.0675 | 0.6320 | - |\n| 4.1451 | 800 | 0.0214 | 0.0697 | 0.6253 | - |\n| 4.6632 | 900 | 0.0117 | 0.0668 | 0.6467 | - |\n| 5.1813 | 1000 | 0.0101 | 0.0655 | 0.6491 | - |\n| 5.6995 | 1100 | 0.0066 | 0.0604 | 0.6800 | - |\n| 6.2176 | 1200 | 0.0057 | 0.0605 | 0.6776 | - |\n| 6.7358 | 1300 | 0.0037 | 0.0606 | 0.6765 | - |\n| 7.2539 | 1400 | 0.003 | 0.0603 | 0.6760 | - |\n| 7.7720 | 1500 | 0.0027 | 0.0587 | 0.6872 | - |\n| 8.2902 | 1600 | 0.0019 | 0.0588 | 0.6862 | - |\n| **8.8083** | **1700** | **0.0018** | **0.0584** | **0.6895** | **-** |\n| 9.3264 | 1800 | 0.0016 | 0.0587 | 0.6871 | - |\n| 9.8446 | 1900 | 0.0014 | 0.0589 | 0.6856 | - |\n| 10.0 | 1930 | - | - | - | 0.6895 |\n\n* The bold row denotes the saved checkpoint.\n\n\n\n\n\n\n\n"},"matched_bigbio_names":{"kind":"list like","value":["PCR"],"string":"[\n \"PCR\"\n]"}}},{"rowIdx":2296,"cells":{"id":{"kind":"string","value":"BSC-LT/salamandra-2b-base-fp8"},"author":{"kind":"string","value":"BSC-LT"},"task_category":{"kind":"string","value":"text-generation"},"tags":{"kind":"list like","value":["transformers","safetensors","llama","text-generation","bg","ca","code","cs","cy","da","de","el","en","es","et","eu","fi","fr","ga","gl","hr","hu","it","lt","lv","mt","nl","nn","oc","pl","pt","ro","ru","sh","sk","sl","sr","sv","uk","base_model:BSC-LT/salamandra-2b","base_model:finetune:BSC-LT/salamandra-2b","license:apache-2.0","autotrain_compatible","text-generation-inference","endpoints_compatible","region:eu"],"string":"[\n \"transformers\",\n \"safetensors\",\n \"llama\",\n \"text-generation\",\n \"bg\",\n \"ca\",\n \"code\",\n \"cs\",\n \"cy\",\n \"da\",\n \"de\",\n \"el\",\n \"en\",\n \"es\",\n \"et\",\n \"eu\",\n \"fi\",\n \"fr\",\n \"ga\",\n \"gl\",\n \"hr\",\n \"hu\",\n \"it\",\n \"lt\",\n \"lv\",\n \"mt\",\n \"nl\",\n \"nn\",\n \"oc\",\n \"pl\",\n \"pt\",\n \"ro\",\n \"ru\",\n \"sh\",\n \"sk\",\n \"sl\",\n \"sr\",\n \"sv\",\n \"uk\",\n \"base_model:BSC-LT/salamandra-2b\",\n \"base_model:finetune:BSC-LT/salamandra-2b\",\n \"license:apache-2.0\",\n \"autotrain_compatible\",\n \"text-generation-inference\",\n \"endpoints_compatible\",\n \"region:eu\"\n]"},"created_time":{"kind":"timestamp","value":"2024-10-30T10:10:39Z","string":"2024-10-30T10:10:39Z"},"last_modified":{"kind":"string","value":"2024-11-07T18:48:47+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: BSC-LT/salamandra-2b\nlanguage:\n- bg\n- ca\n- code\n- cs\n- cy\n- da\n- de\n- el\n- en\n- es\n- et\n- eu\n- fi\n- fr\n- ga\n- gl\n- hr\n- hu\n- it\n- lt\n- lv\n- mt\n- nl\n- nn\n- \\no\n- oc\n- pl\n- pt\n- ro\n- ru\n- sh\n- sk\n- sl\n- sr\n- sv\n- uk\nlibrary_name: transformers\nlicense: apache-2.0\npipeline_tag: text-generation\n---\n\n![image/png](https://cdn-uploads.huggingface.co/production/uploads/633b489acbdbadd99c0b75ef/rremJczEA0mULGHHKol6S.png)\n\n# Salamandra-2b-fp8 Model Card\n\nThis model is the fp8-quantized version of [Salamandra-2b](https://huggingface.co/BSC-LT/salamandra-2b).\n\nThe model weights are quantized from FP16 to FP8 (8-bit weights) using the FP8 quantization algorithm \nfrom [NeuralMagic](https://neuralmagic.com/blog/vllm-brings-fp8-inference-to-the-open-source-community/). \nInferencing with this model can be done using [VLLM](https://docs.vllm.ai/en/stable/models/engine_args.html). \n\nSalamandra is a highly multilingual model pre-trained from scratch that comes in three different \nsizes — 2B, 7B and 40B parameters — with their respective base and instruction-tuned variants, \npromoted and financed by the Government of Catalonia through the [Aina Project](https://projecteaina.cat/) \nand the _Ministerio para la Transformación Digital y de la Función Pública_ - Funded by EU – NextGenerationEU \nwithin the framework of [ILENIA Project](https://proyectoilenia.es/) with reference 2022/TL22/00215337.\n\nThis model card corresponds to the fp8-quantized version of Salamandra-2b.\n\nThe entire Salamandra family is released under a permissive [Apache 2.0 license]((https://www.apache.org/licenses/LICENSE-2.0)).\n\n## How to Use\n\nThe following example code works under ``Python 3.9.16``, ``vllm==0.6.3.post1``, ``torch==2.4.0`` and ``torchvision==0.19.0``, though it should run on\nany current version of the libraries. This is an example of how to create a text completion using the model:\n\n```\nfrom vllm import LLM, SamplingParams\n\nmodel_name = \"BSC-LT/salamandra-2b-base-fp8\"\nllm = LLM(model=model_name)\n\noutputs = llm.generate(\"El mercat del barri \",\n sampling_params=SamplingParams(\n temperature=0.5,\n max_tokens=200)\n )\nprint(outputs[0].outputs[0].text)\n\n```\n\n### Author\nInternational Business Machines (IBM).\n\n### Copyright\nInternational Business Machines (IBM).\n\n### Contact\nFor further information, please send an email to .\n\n### Acknowledgements\nWe appreciate the collaboration with IBM in this work. \nSpecifically, the IBM team created fp8-quantized version of the Salamandra-2b model released here. \n\n### Disclaimer\nBe aware that the model may contain biases or other unintended distortions. \nWhen third parties deploy systems or provide services based on this model, or use the model themselves, \nthey bear the responsibility for mitigating any associated risks and ensuring compliance with applicable \nregulations, including those governing the use of Artificial Intelligence.\n\nBarcelona Supercomputing Center and International Business Machines shall \nnot be held liable for any outcomes resulting from third-party use.\n\n### License\n[Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2297,"cells":{"id":{"kind":"string","value":"glif-loradex-trainer/insectagon_Keane_eyes"},"author":{"kind":"string","value":"glif-loradex-trainer"},"task_category":{"kind":"string","value":"text-to-image"},"tags":{"kind":"list like","value":["diffusers","text-to-image","template:sd-lora","base_model:black-forest-labs/FLUX.1-dev","base_model:finetune:black-forest-labs/FLUX.1-dev","license:other","region:us","flux","lora","base_model:adapter:black-forest-labs/FLUX.1-dev"],"string":"[\n \"diffusers\",\n \"text-to-image\",\n \"template:sd-lora\",\n \"base_model:black-forest-labs/FLUX.1-dev\",\n \"base_model:finetune:black-forest-labs/FLUX.1-dev\",\n \"license:other\",\n \"region:us\",\n \"flux\",\n \"lora\",\n \"base_model:adapter:black-forest-labs/FLUX.1-dev\"\n]"},"created_time":{"kind":"timestamp","value":"2024-11-01T10:32:32Z","string":"2024-11-01T10:32:32Z"},"last_modified":{"kind":"string","value":"2024-11-01T10:33:44+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model: black-forest-labs/FLUX.1-dev\nlicense: other\nlicense_name: flux-1-dev-non-commercial-license\nlicense_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md\ntags:\n- diffusers\n- text-to-image\n- template:sd-lora\n- base_model:black-forest-labs/FLUX.1-dev\n- base_model:finetune:black-forest-labs/FLUX.1-dev\n- license:other\n- region:us\n- flux\n- lora\nwidget:\n- output:\n url: samples/1730456976857__000003000_0.jpg\n text: A cartoon Jedi with green lightsaber [K3ane]\n- output:\n url: samples/1730457000473__000003000_1.jpg\n text: A lion roaring [K3ane]\n- output:\n url: samples/1730457024097__000003000_2.jpg\n text: AN ACTION SCENE [K3ane]\n- output:\n url: samples/1730457048123__000003000_3.jpg\n text: A woman holding a cartoon CAT [K3ane]\n- output:\n url: samples/1730457071770__000003000_4.jpg\n text: THE JOKER [K3ane]\n- output:\n url: samples/1730457095526__000003000_5.jpg\n text: BATMAN cartoon IN GOTHAM [K3ane]\n- output:\n url: samples/1730457119895__000003000_6.jpg\n text: a blue Teddy bear Kaiju vs Godzilla [K3ane]\ntrigger: K3ane\ninstance_prompt: K3ane\n---\n# Keane_eyes\nModel trained with [AI Toolkit by Ostris](https://github.com/ostris/ai-toolkit) under the [Glif Loradex program](https://huggingface.co/glif-loradex-trainer) by [Glif](https://glif.app) user `insectagon`.\n\n## Trigger words\nYou should use `K3ane` to trigger the image generation.\n## Download model\nWeights for this model are available in Safetensors format.\n[Download](/glif-loradex-trainer/insectagon_Keane_eyes/tree/main) them in the Files & versions tab.\n\n## License\nThis model is licensed under the [flux-1-dev-non-commercial-license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md).\n\n"},"matched_bigbio_names":{"kind":"list like","value":["BEAR"],"string":"[\n \"BEAR\"\n]"}}},{"rowIdx":2298,"cells":{"id":{"kind":"string","value":"LocalDoc/TEmA-small"},"author":{"kind":"string","value":"LocalDoc"},"task_category":{"kind":"string","value":"sentence-similarity"},"tags":{"kind":"list like","value":["pytorch","bert","labse","sentence-similarity","az","base_model:sentence-transformers/LaBSE","base_model:finetune:sentence-transformers/LaBSE","doi:10.57967/hf/3429","license:cc-by-4.0","region:us"],"string":"[\n \"pytorch\",\n \"bert\",\n \"labse\",\n \"sentence-similarity\",\n \"az\",\n \"base_model:sentence-transformers/LaBSE\",\n \"base_model:finetune:sentence-transformers/LaBSE\",\n \"doi:10.57967/hf/3429\",\n \"license:cc-by-4.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-11-03T10:03:28Z","string":"2024-11-03T10:03:28Z"},"last_modified":{"kind":"string","value":"2024-11-03T17:26:45+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nbase_model:\n- sentence-transformers/LaBSE\nlanguage:\n- az\nlicense: cc-by-4.0\nmetrics:\n- pearsonr\npipeline_tag: sentence-similarity\ntags:\n- labse\nwidget:\n- source_sentence: Bu xoşbəxt bir insandır\n sentences:\n - Bu xoşbəxt bir itdir\n - Bu çox xoşbəxt bir insandır\n - Bu gün günəşli bir gündür\n example_title: Sentence Similarity\n---\n\n# TEmA-small\n\nThis model is a fine-tuned version of the [LaBSE](https://huggingface.co/sentence-transformers/LaBSE), which is specialized for sentence similarity tasks in Azerbaijan texts. \nIt maps sentences and paragraphs to a 768-dimensional dense vector space, useful for tasks like clustering, semantic search, and more.\n\n\n\n\n## Benchmark Results\n\n| STSBenchmark | biosses-sts | sickr-sts | sts12-sts | sts13-sts | sts15-sts | sts16-sts | Average Pearson | Model |\n|--------------|-------------|-----------|-----------|-----------|-----------|-----------|-----------------|------------------------------------|\n| 0.8253 | 0.7859 | 0.7924 | 0.8444 | 0.7490 | 0.8141 | 0.7600 | 0.7959 | TEmA-small |\n| 0.7872 | 0.8303 | 0.7801 | 0.7978 | 0.6963 | 0.8052 | 0.7794 | 0.7823 | Cohere/embed-multilingual-v3.0 |\n| 0.7927 | 0.6672 | 0.7758 | 0.8122 | 0.7312 | 0.7831 | 0.7416 | 0.7577 | BAAI/bge-m3 |\n| 0.7572 | 0.8139 | 0.7328 | 0.7646 | 0.6318 | 0.7542 | 0.7092 | 0.7377 | intfloat/multilingual-e5-large-instruct |\n| 0.7252 | 0.7801 | 0.7250 | 0.6725 | 0.7446 | 0.7301 | 0.7454 | 0.7318 | Cohere/embed-multilingual-v2.0 |\n| 0.7485 | 0.7714 | 0.7271 | 0.7170 | 0.6496 | 0.7570 | 0.7255 | 0.7280 | intfloat/multilingual-e5-large |\n| 0.7245 | 0.8237 | 0.6839 | 0.6570 | 0.7125 | 0.7612 | 0.7386 | 0.7288 | OpenAI/text-embedding-3-large |\n| 0.7363 | 0.8148 | 0.7067 | 0.7050 | 0.6535 | 0.7514 | 0.7070 | 0.7250 | sentence-transformers/LaBSE |\n| 0.7376 | 0.7917 | 0.7190 | 0.7441 | 0.6286 | 0.7461 | 0.7026 | 0.7242 | intfloat/multilingual-e5-small |\n| 0.7192 | 0.8198 | 0.7160 | 0.7338 | 0.5815 | 0.7318 | 0.6973 | 0.7142 | Cohere/embed-multilingual-light-v3.0 |\n| 0.6960 | 0.8185 | 0.6950 | 0.6752 | 0.5899 | 0.7186 | 0.6790 | 0.6960 | intfloat/multilingual-e5-base |\n| 0.5830 | 0.2486 | 0.5921 | 0.5593 | 0.5559 | 0.5404 | 0.5289 | 0.5155 | antoinelouis/colbert-xm |\n\n\n[STS-Benchmark](https://github.com/LocalDoc-Azerbaijan/STS-Benchmark)\n\n\n\n\n## Accuracy Results\n- **Cosine Distance:** 96.63\n- **Manhattan Distance:** 96.52\n- **Euclidean Distance:** 96.57\n\n\n\n\n## Usage\n\n```python\nfrom transformers import AutoTokenizer, AutoModel\nimport torch\n\n# Mean Pooling - Take attention mask into account for correct averaging\ndef mean_pooling(model_output, attention_mask):\n token_embeddings = model_output[0] #First element of model_output contains all token embeddings\n input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n\n# Function to normalize embeddings\ndef normalize_embeddings(embeddings):\n return embeddings / embeddings.norm(dim=1, keepdim=True)\n\n# Sentences we want embeddings for\nsentences = [\n \"Bu xoşbəxt bir insandır\",\n \"Bu çox xoşbəxt bir insandır\", \n \"Bu gün günəşli bir gündür\"\n]\n\n# Load model from HuggingFace Hub\ntokenizer = AutoTokenizer.from_pretrained('LocalDoc/TEmA-small')\nmodel = AutoModel.from_pretrained('LocalDoc/TEmA-small')\n\n# Tokenize sentences\nencoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt')\n\n# Compute token embeddings\nwith torch.no_grad():\n model_output = model(**encoded_input)\n\n# Perform pooling\nsentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask'])\n\n# Normalize embeddings\nsentence_embeddings = normalize_embeddings(sentence_embeddings)\n\n# Calculate cosine similarities\ncosine_similarities = torch.nn.functional.cosine_similarity(\n sentence_embeddings[0].unsqueeze(0), \n sentence_embeddings[1:], \n dim=1\n)\n\nprint(\"Cosine Similarities:\")\nfor i, score in enumerate(cosine_similarities):\n print(f\"Sentence 1 <-> Sentence {i+2}: {score:.4f}\")\n```\n\n"},"matched_bigbio_names":{"kind":"list like","value":["BIOSSES"],"string":"[\n \"BIOSSES\"\n]"}}},{"rowIdx":2299,"cells":{"id":{"kind":"string","value":"JunxiongWang/Llama3.1-Mamba-8B-dpo"},"author":{"kind":"string","value":"JunxiongWang"},"task_category":{"kind":"null"},"tags":{"kind":"list like","value":["pytorch","llama","arxiv:2408.15237","license:apache-2.0","region:us"],"string":"[\n \"pytorch\",\n \"llama\",\n \"arxiv:2408.15237\",\n \"license:apache-2.0\",\n \"region:us\"\n]"},"created_time":{"kind":"timestamp","value":"2024-11-17T04:04:45Z","string":"2024-11-17T04:04:45Z"},"last_modified":{"kind":"string","value":"2024-11-17T04:20:27+00:00"},"downloads":{"kind":"number","value":22,"string":"22"},"likes":{"kind":"number","value":0,"string":"0"},"README":{"kind":"string","value":"---\nlicense: apache-2.0\n---\n\nZero-shot results when using the [Llama-3.1-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) as the teacher model, and the [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) as the initialized model\n\n| Task | Llama-3.1-8B-Instruct | Llama3.1-Mamba-8B-distill | Llama3.1-Mamba-8B-dpo | Llama3.1-Mamba2-8B-distill | Llama3.1-Mamba2-8B-dpo |\n|---------------------|-----------------------|--------------------------|-----------------------|---------------------------|-----------------------|\n| arc_challenge | 0.552 | 0.5384 | 0.5657 | 0.5265 | 0.5973 |\n| arc_easy | 0.8178 | 0.8224 | 0.8401 | 0.822 | 0.8481 |\n| hellaswag | 0.7921 | 0.7591 | 0.7736 | 0.7536 | 0.7969 |\n| mmlu (0 shot) | 0.6812 | 0.6213 | 0.636 | 0.6101 | 0.5974 |\n| openbookqa | 0.432 | 0.428 | 0.442 | 0.416 | 0.44 |\n| piqa | 0.8079 | 0.7933 | 0.8041 | 0.7889 | 0.8003 |\n| pubmedqa | 0.752 | 0.72 | 0.744 | 0.726 | 0.746 |\n| race | 0.4478 | 0.4211 | 0.4344 | 0.4211 | 0.4612 |\n| winogrande | 0.7388 | 0.7277 | 0.738 | 0.7174 | 0.7411 |\n| truthful | 0.4267 | 0.4002 | 0.4607 | 0.4031 | 0.5022 |\n\n```\n@article{junxiongdaniele2024mambainllama,\n title = {The Mamba in the Llama: Distilling and Accelerating Hybrid Models},\n author = {Junxiong Wang and Daniele Paliotta and Avner May and Alexander M. Rush and Tri Dao},\n journal = {arXiv preprint arXiv:2408.15237},\n year = {2024}\n}\n```"},"matched_bigbio_names":{"kind":"list like","value":["PUBMEDQA"],"string":"[\n \"PUBMEDQA\"\n]"}}}],"truncated":false,"partial":false},"paginationData":{"pageIndex":22,"numItemsPerPage":100,"numTotalItems":5602,"offset":2200,"length":100}},"jwt":"eyJhbGciOiJFZERTQSJ9.eyJyZWFkIjp0cnVlLCJwZXJtaXNzaW9ucyI6eyJyZXBvLmNvbnRlbnQucmVhZCI6dHJ1ZX0sImlhdCI6MTc1ODcxNDIzOSwic3ViIjoiL2RhdGFzZXRzL0V1YW55dS9iaWdiaW9fZGF0YXNldF9tb2RlbHMiLCJleHAiOjE3NTg3MTc4MzksImlzcyI6Imh0dHBzOi8vaHVnZ2luZ2ZhY2UuY28ifQ.RY1uG57DxBIn1ZuRGPvQMv_vp2geZbVWizww11w53jUThN9s9Q_7bCEPZyfwm5rjP7Br1HrisFLbhwuCr6hNDw","displayUrls":true},"discussionsStats":{"closed":0,"open":1,"total":1},"fullWidth":true,"hasGatedAccess":true,"hasFullAccess":true,"isEmbedded":false,"savedQueries":{"community":[],"user":[]}}">
id
stringlengths
9
104
author
stringlengths
3
36
task_category
stringclasses
32 values
tags
listlengths
1
4.05k
created_time
timestamp[ns, tz=UTC]date
2022-03-02 23:29:04
2025-03-18 02:34:30
last_modified
stringdate
2021-02-13 00:06:56
2025-03-18 09:30:19
downloads
int64
0
15.6M
likes
int64
0
4.86k
README
stringlengths
44
1.01M
matched_bigbio_names
listlengths
1
8
RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
2024-11-03T19:27:58Z
2024-11-03T20:28:33+00:00
24
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Phi-3-mini-128k-instruct-LLaMAfied - GGUF - Model creator: https://huggingface.co/ThomasComics/ - Original model: https://huggingface.co/ThomasComics/Phi-3-mini-128k-instruct-LLaMAfied/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Phi-3-mini-128k-instruct-LLaMAfied.Q2_K.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q2_K.gguf) | Q2_K | 1.35GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q3_K_S.gguf) | Q3_K_S | 1.57GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q3_K.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q3_K.gguf) | Q3_K | 1.75GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q3_K_M.gguf) | Q3_K_M | 1.75GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q3_K_L.gguf) | Q3_K_L | 1.9GB | | [Phi-3-mini-128k-instruct-LLaMAfied.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.IQ4_XS.gguf) | IQ4_XS | 1.93GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q4_0.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q4_0.gguf) | Q4_0 | 2.03GB | | [Phi-3-mini-128k-instruct-LLaMAfied.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.IQ4_NL.gguf) | IQ4_NL | 2.04GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q4_K_S.gguf) | Q4_K_S | 2.04GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q4_K.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q4_K.gguf) | Q4_K | 2.16GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q4_K_M.gguf) | Q4_K_M | 2.16GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q4_1.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q4_1.gguf) | Q4_1 | 2.24GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q5_0.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q5_0.gguf) | Q5_0 | 2.46GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q5_K_S.gguf) | Q5_K_S | 2.46GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q5_K.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q5_K.gguf) | Q5_K | 2.53GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q5_K_M.gguf) | Q5_K_M | 2.53GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q5_1.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q5_1.gguf) | Q5_1 | 2.68GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q6_K.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q6_K.gguf) | Q6_K | 2.92GB | | [Phi-3-mini-128k-instruct-LLaMAfied.Q8_0.gguf](https://huggingface.co/RichardErkhov/ThomasComics_-_Phi-3-mini-128k-instruct-LLaMAfied-gguf/blob/main/Phi-3-mini-128k-instruct-LLaMAfied.Q8_0.gguf) | Q8_0 | 3.78GB | Original model description: --- license: mit license_link: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/LICENSE language: - en pipeline_tag: text-generation tags: - nlp - code --- ## Model Summary The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support. The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3 Mini-4K-Instruct showcased a robust and state-of-the-art performance among models with less than 13 billion parameters. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + Phi-3 GGUF: [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf) + Phi-3 ONNX: [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx) ## Intended Uses **Primary use cases** The model is intended for commercial and research use in English. The model provides uses for applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3 Mini-4K-Instruct has been integrated in the development version (4.40.0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Phi-3 Mini-4K-Instruct is also available in [HuggingChat](https://aka.ms/try-phi3-hf-chat). ### Chat Format Given the nature of the training data, the Phi-3 Mini-4K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|user|>\nQuestion <|end|>\n<|assistant|> ``` For example: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( "microsoft/Phi-3-mini-4k-instruct", device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") messages = [ {"role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user."}, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3 Mini-4K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 4K tokens * GPUs: 512 H100-80G * Training time: 7 days * Training data: 3.3T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. ### Datasets Our training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. ### Fine-tuning A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/sample_finetune.py). ## Benchmarks We report the results for Phi-3-Mini-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5. All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. | | Phi-3-Mini-4K-In<br>3.8b | Phi-3-Small<br>7b (preview) | Phi-3-Medium<br>14b (preview) | Phi-2<br>2.7b | Mistral<br>7b | Gemma<br>7b | Llama-3-In<br>8b | Mixtral<br>8x7b | GPT-3.5<br>version 1106 | |---|---|---|---|---|---|---|---|---|---| | MMLU <br>5-Shot | 68.8 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 | | HellaSwag <br> 5-Shot | 76.7 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 | | ANLI <br> 7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 | | GSM-8K <br> 0-Shot; CoT | 82.5 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 | | MedQA <br> 2-Shot | 53.8 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 | | AGIEval <br> 0-Shot | 37.5 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 | | TriviaQA <br> 5-Shot | 64.0 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 | | Arc-C <br> 10-Shot | 84.9 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 | | Arc-E <br> 10-Shot | 94.6 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 | | PIQA <br> 5-Shot | 84.2 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 | | SociQA <br> 5-Shot | 76.6 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 | | BigBench-Hard <br> 0-Shot | 71.7 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 | | WinoGrande <br> 5-Shot | 70.8 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65 | 62.0 | 68.8 | | OpenBookQA <br> 10-Shot | 83.2 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 | | BoolQ <br> 0-Shot | 77.6 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 | | CommonSenseQA <br> 10-Shot | 80.2 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 | | TruthfulQA <br> 10-Shot | 65.0 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 | | HumanEval <br> 0-Shot | 59.1 | 59.1 | 54.7 | 59.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 | | MBPP <br> 3-Shot | 53.8 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 | ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: * NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager" * CPU: use the **GGUF** quantized models [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf) + Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx) ## Cross Platform Support ONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-4K-Instruct ONNX model [here](https://aka.ms/phi3-mini-4k-instruct-onnx). Optimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. Along with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-4k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
pankajrajdeo/UMLS-ED-Bioformer-8L-V-1.25
pankajrajdeo
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:187491593", "loss:CustomTripletLoss", "arxiv:1908.10084", "arxiv:1703.07737", "base_model:pankajrajdeo/UMLS-ED-Bioformer-8L-V-1", "base_model:finetune:pankajrajdeo/UMLS-ED-Bioformer-8L-V-1", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-12-07T09:23:11Z
2025-01-05T20:35:59+00:00
24
0
--- base_model: - pankajrajdeo/UMLS-ED-Bioformer-8L-V-1 library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:187491593 - loss:CustomTripletLoss widget: - source_sentence: Hylocharis xantusii sentences: - Xantus's hummingbird - C5721346 - C1623532 - Iole viridescens viridescens - source_sentence: HTLV1+2 RNA XXX Ql PCR sentences: - HTLV 1+2 RNA:MevcEşik:Zmlı:XXX:Srl:Prob.amf.hdf - Nota de progreso:Tipo:Punto temporal:{Configuración}:Documento:Pain medicine - C0368469 - C4070921 - source_sentence: Degeneração Nigroestriatal sentences: - C0270733 - hiperinsulinismo debido a deficiencia de 3-hidroxiacil-coenzima A deshidrogenasa de cadena corta - Striatonigral atrophy - C4303473 - source_sentence: Clostridioides difficile As:titer:moment:serum:semikwantitatief sentences: - Dehidroepiandrosteron:MevcEşik:Zmlı:İdrar:Srl - C0485219 - C0364328 - Clostridium difficile Ac:Título:Pt:Soro:Qn - source_sentence: E Vicotrat sentences: - C2742706 - C2350910 - germanium L-cysteine alpha-tocopherol complex - Eosine I Bluish, Dipotassium Salt --- # SentenceTransformer This is a [sentence-transformers](https://www.SBERT.net) model trained. It maps sentences & paragraphs to a 512-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer <!-- - **Base model:** [Unknown](https://huggingface.co/unknown) --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 512 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 512, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("pankajrajdeo/937457_bioformer_8L") # Run inference sentences = [ 'E Vicotrat', 'Eosine I Bluish, Dipotassium Salt', 'C2742706', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 512] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 187,491,593 training samples * Columns: <code>anchor</code>, <code>positive</code>, <code>negative_id</code>, <code>positive_id</code>, and <code>negative</code> * Approximate statistics based on the first 1000 samples: | | anchor | positive | negative_id | positive_id | negative | |:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:--------------------------------------------------------------------------------|:--------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | string | string | string | | details | <ul><li>min: 3 tokens</li><li>mean: 13.27 tokens</li><li>max: 247 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 12.25 tokens</li><li>max: 157 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 6.27 tokens</li><li>max: 7 tokens</li></ul> | <ul><li>min: 5 tokens</li><li>mean: 6.49 tokens</li><li>max: 7 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 13.53 tokens</li><li>max: 118 tokens</li></ul> | * Samples: | anchor | positive | negative_id | positive_id | negative | |:----------------------------------------------|:------------------------------------------------------------------------------------------------|:----------------------|:----------------------|:------------------------------------------------------------------------------------------------| | <code>Zaburzenie metabolizmu minerałów</code> | <code>Distúrbio não especificado do metabolismo de minerais</code> | <code>C2887914</code> | <code>C0154260</code> | <code>Acute alcoholic hepatic failure</code> | | <code>testy funkčnosti placenty</code> | <code>Metoder som brukes til å vurdere morkakefunksjon.</code> | <code>C2350391</code> | <code>C0032049</code> | <code>Hjärtmuskelscintigrafi</code> | | <code>Tsefapiriin:Susc:Pt:Is:OrdQn</code> | <code>cefapirina:susceptibilidad:punto en el tiempo:cepa clínica:ordinal o cuantitativo:</code> | <code>C0942365</code> | <code>C0801894</code> | <code>2 proyecciones:hallazgo:punto en el tiempo:tobillo.izquierdo:Narrativo:radiografía</code> | * Loss: <code>__main__.CustomTripletLoss</code> with these parameters: ```json { "distance_metric": "TripletDistanceMetric.EUCLIDEAN", "triplet_margin": 5 } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `per_device_train_batch_size`: 50 - `learning_rate`: 2e-05 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 - `fp16`: True #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: no - `prediction_loss_only`: True - `per_device_train_batch_size`: 50 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `eval_use_gather_object`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs <details><summary>Click to expand</summary> | Epoch | Step | Training Loss | |:------:|:------:|:-------------:| | 0.0003 | 1000 | 0.9785 | | 0.0005 | 2000 | 0.925 | | 0.0008 | 3000 | 0.8548 | | 0.0011 | 4000 | 0.7979 | | 0.0013 | 5000 | 0.7635 | | 0.0016 | 6000 | 0.7176 | | 0.0019 | 7000 | 0.6813 | | 0.0021 | 8000 | 0.6225 | | 0.0024 | 9000 | 0.6135 | | 0.0027 | 10000 | 0.5827 | | 0.0029 | 11000 | 0.5695 | | 0.0032 | 12000 | 0.5152 | | 0.0035 | 13000 | 0.5213 | | 0.0037 | 14000 | 0.4895 | | 0.0040 | 15000 | 0.4942 | | 0.0043 | 16000 | 0.4819 | | 0.0045 | 17000 | 0.4799 | | 0.0048 | 18000 | 0.4572 | | 0.0051 | 19000 | 0.4396 | | 0.0053 | 20000 | 0.4389 | | 0.0056 | 21000 | 0.4269 | | 0.0059 | 22000 | 0.4155 | | 0.0061 | 23000 | 0.4034 | | 0.0064 | 24000 | 0.4067 | | 0.0067 | 25000 | 0.401 | | 0.0069 | 26000 | 0.376 | | 0.0072 | 27000 | 0.3715 | | 0.0075 | 28000 | 0.3788 | | 0.0077 | 29000 | 0.362 | | 0.0080 | 30000 | 0.3644 | | 0.0083 | 31000 | 0.3487 | | 0.0085 | 32000 | 0.3432 | | 0.0088 | 33000 | 0.3394 | | 0.0091 | 34000 | 0.3423 | | 0.0093 | 35000 | 0.3314 | | 0.0096 | 36000 | 0.3447 | | 0.0099 | 37000 | 0.3206 | | 0.0101 | 38000 | 0.3283 | | 0.0104 | 39000 | 0.3183 | | 0.0107 | 40000 | 0.3167 | | 0.0109 | 41000 | 0.3169 | | 0.0112 | 42000 | 0.3122 | | 0.0115 | 43000 | 0.3022 | | 0.0117 | 44000 | 0.3066 | | 0.0120 | 45000 | 0.3002 | | 0.0123 | 46000 | 0.3003 | | 0.0125 | 47000 | 0.2907 | | 0.0128 | 48000 | 0.2843 | | 0.0131 | 49000 | 0.2905 | | 0.0133 | 50000 | 0.2816 | | 0.0136 | 51000 | 0.2959 | | 0.0139 | 52000 | 0.2765 | | 0.0141 | 53000 | 0.2813 | | 0.0144 | 54000 | 0.2715 | | 0.0147 | 55000 | 0.2826 | | 0.0149 | 56000 | 0.2845 | | 0.0152 | 57000 | 0.2709 | | 0.0155 | 58000 | 0.2704 | | 0.0157 | 59000 | 0.2667 | | 0.0160 | 60000 | 0.2589 | | 0.0163 | 61000 | 0.2574 | | 0.0165 | 62000 | 0.2598 | | 0.0168 | 63000 | 0.2427 | | 0.0171 | 64000 | 0.2505 | | 0.0173 | 65000 | 0.265 | | 0.0176 | 66000 | 0.263 | | 0.0179 | 67000 | 0.2521 | | 0.0181 | 68000 | 0.2532 | | 0.0184 | 69000 | 0.256 | | 0.0187 | 70000 | 0.2599 | | 0.0189 | 71000 | 0.2558 | | 0.0192 | 72000 | 0.2526 | | 0.0195 | 73000 | 0.2402 | | 0.0197 | 74000 | 0.2471 | | 0.0200 | 75000 | 0.24 | | 0.0203 | 76000 | 0.2562 | | 0.0205 | 77000 | 0.2398 | | 0.0208 | 78000 | 0.2622 | | 0.0211 | 79000 | 0.235 | | 0.0213 | 80000 | 0.2421 | | 0.0216 | 81000 | 0.2378 | | 0.0219 | 82000 | 0.2323 | | 0.0221 | 83000 | 0.232 | | 0.0224 | 84000 | 0.2319 | | 0.0227 | 85000 | 0.2361 | | 0.0229 | 86000 | 0.2252 | | 0.0232 | 87000 | 0.2282 | | 0.0235 | 88000 | 0.2213 | | 0.0237 | 89000 | 0.2228 | | 0.0240 | 90000 | 0.2265 | | 0.0243 | 91000 | 0.2375 | | 0.0245 | 92000 | 0.2328 | | 0.0248 | 93000 | 0.2318 | | 0.0251 | 94000 | 0.2321 | | 0.0253 | 95000 | 0.2205 | | 0.0256 | 96000 | 0.2319 | | 0.0259 | 97000 | 0.2193 | | 0.0261 | 98000 | 0.2188 | | 0.0264 | 99000 | 0.2196 | | 0.0267 | 100000 | 0.2223 | | 0.0269 | 101000 | 0.2268 | | 0.0272 | 102000 | 0.219 | | 0.0275 | 103000 | 0.206 | | 0.0277 | 104000 | 0.2154 | | 0.0280 | 105000 | 0.2261 | | 0.0283 | 106000 | 0.2112 | | 0.0285 | 107000 | 0.2015 | | 0.0288 | 108000 | 0.2115 | | 0.0291 | 109000 | 0.2145 | | 0.0293 | 110000 | 0.2142 | | 0.0296 | 111000 | 0.2217 | | 0.0299 | 112000 | 0.213 | | 0.0301 | 113000 | 0.2089 | | 0.0304 | 114000 | 0.2089 | | 0.0307 | 115000 | 0.2027 | | 0.0309 | 116000 | 0.217 | | 0.0312 | 117000 | 0.2008 | | 0.0315 | 118000 | 0.2035 | | 0.0317 | 119000 | 0.208 | | 0.0320 | 120000 | 0.2006 | | 0.0323 | 121000 | 0.2089 | | 0.0325 | 122000 | 0.212 | | 0.0328 | 123000 | 0.2074 | | 0.0331 | 124000 | 0.203 | | 0.0333 | 125000 | 0.2038 | | 0.0336 | 126000 | 0.1979 | | 0.0339 | 127000 | 0.197 | | 0.0341 | 128000 | 0.1947 | | 0.0344 | 129000 | 0.2034 | | 0.0347 | 130000 | 0.1924 | | 0.0349 | 131000 | 0.1957 | | 0.0352 | 132000 | 0.1894 | | 0.0355 | 133000 | 0.1934 | | 0.0357 | 134000 | 0.1933 | | 0.0360 | 135000 | 0.1953 | | 0.0363 | 136000 | 0.192 | | 0.0365 | 137000 | 0.1871 | | 0.0368 | 138000 | 0.2053 | | 0.0371 | 139000 | 0.1971 | | 0.0373 | 140000 | 0.1904 | | 0.0376 | 141000 | 0.1891 | | 0.0379 | 142000 | 0.1876 | | 0.0381 | 143000 | 0.1875 | | 0.0384 | 144000 | 0.194 | | 0.0387 | 145000 | 0.1932 | | 0.0389 | 146000 | 0.1895 | | 0.0392 | 147000 | 0.1937 | | 0.0395 | 148000 | 0.1888 | | 0.0397 | 149000 | 0.1836 | | 0.0400 | 150000 | 0.1886 | | 0.0403 | 151000 | 0.183 | | 0.0405 | 152000 | 0.1896 | | 0.0408 | 153000 | 0.1851 | | 0.0411 | 154000 | 0.1844 | | 0.0413 | 155000 | 0.184 | | 0.0416 | 156000 | 0.1846 | | 0.0419 | 157000 | 0.1876 | | 0.0421 | 158000 | 0.1848 | | 0.0424 | 159000 | 0.1824 | | 0.0427 | 160000 | 0.1844 | | 0.0429 | 161000 | 0.1864 | | 0.0432 | 162000 | 0.1726 | | 0.0435 | 163000 | 0.1838 | | 0.0437 | 164000 | 0.1818 | | 0.0440 | 165000 | 0.1811 | | 0.0443 | 166000 | 0.176 | | 0.0445 | 167000 | 0.1831 | | 0.0448 | 168000 | 0.1791 | | 0.0451 | 169000 | 0.182 | | 0.0453 | 170000 | 0.1814 | | 0.0456 | 171000 | 0.1783 | | 0.0459 | 172000 | 0.1771 | | 0.0461 | 173000 | 0.1806 | | 0.0464 | 174000 | 0.1821 | | 0.0467 | 175000 | 0.1805 | | 0.0469 | 176000 | 0.1698 | | 0.0472 | 177000 | 0.1796 | | 0.0475 | 178000 | 0.1774 | | 0.0477 | 179000 | 0.1703 | | 0.0480 | 180000 | 0.179 | | 0.0483 | 181000 | 0.1839 | | 0.0485 | 182000 | 0.1695 | | 0.0488 | 183000 | 0.1681 | | 0.0491 | 184000 | 0.1783 | | 0.0493 | 185000 | 0.1792 | | 0.0496 | 186000 | 0.1664 | | 0.0499 | 187000 | 0.1711 | | 0.0501 | 188000 | 0.168 | | 0.0504 | 189000 | 0.1722 | | 0.0507 | 190000 | 0.1776 | | 0.0509 | 191000 | 0.1704 | | 0.0512 | 192000 | 0.161 | | 0.0515 | 193000 | 0.1719 | | 0.0517 | 194000 | 0.1679 | | 0.0520 | 195000 | 0.1731 | | 0.0523 | 196000 | 0.1778 | | 0.0525 | 197000 | 0.1658 | | 0.0528 | 198000 | 0.1607 | | 0.0531 | 199000 | 0.1682 | | 0.0533 | 200000 | 0.1675 | | 0.0536 | 201000 | 0.1708 | | 0.0539 | 202000 | 0.1694 | | 0.0541 | 203000 | 0.1767 | | 0.0544 | 204000 | 0.1665 | | 0.0547 | 205000 | 0.1695 | | 0.0549 | 206000 | 0.1693 | | 0.0552 | 207000 | 0.1697 | | 0.0555 | 208000 | 0.1721 | | 0.0557 | 209000 | 0.1633 | | 0.0560 | 210000 | 0.1712 | | 0.0563 | 211000 | 0.1712 | | 0.0565 | 212000 | 0.1646 | | 0.0568 | 213000 | 0.1639 | | 0.0571 | 214000 | 0.1692 | | 0.0573 | 215000 | 0.1694 | | 0.0576 | 216000 | 0.1684 | | 0.0579 | 217000 | 0.1608 | | 0.0581 | 218000 | 0.1663 | | 0.0584 | 219000 | 0.1669 | | 0.0587 | 220000 | 0.1671 | | 0.0589 | 221000 | 0.1632 | | 0.0592 | 222000 | 0.1642 | | 0.0595 | 223000 | 0.1619 | | 0.0597 | 224000 | 0.1672 | | 0.0600 | 225000 | 0.1704 | | 0.0603 | 226000 | 0.1602 | | 0.0605 | 227000 | 0.1548 | | 0.0608 | 228000 | 0.1631 | | 0.0611 | 229000 | 0.1555 | | 0.0613 | 230000 | 0.1666 | | 0.0616 | 231000 | 0.1611 | | 0.0619 | 232000 | 0.1504 | | 0.0621 | 233000 | 0.159 | | 0.0624 | 234000 | 0.1642 | | 0.0627 | 235000 | 0.1573 | | 0.0629 | 236000 | 0.1612 | | 0.0632 | 237000 | 0.1649 | | 0.0635 | 238000 | 0.1687 | | 0.0637 | 239000 | 0.1601 | | 0.0640 | 240000 | 0.1592 | | 0.0643 | 241000 | 0.1606 | | 0.0645 | 242000 | 0.1545 | | 0.0648 | 243000 | 0.1646 | | 0.0651 | 244000 | 0.1576 | | 0.0653 | 245000 | 0.1514 | | 0.0656 | 246000 | 0.1606 | | 0.0659 | 247000 | 0.1517 | | 0.0661 | 248000 | 0.1503 | | 0.0664 | 249000 | 0.1627 | | 0.0667 | 250000 | 0.1555 | | 0.0669 | 251000 | 0.1566 | | 0.0672 | 252000 | 0.1624 | | 0.0675 | 253000 | 0.1495 | | 0.0677 | 254000 | 0.1535 | | 0.0680 | 255000 | 0.1492 | | 0.0683 | 256000 | 0.1494 | | 0.0685 | 257000 | 0.1708 | | 0.0688 | 258000 | 0.1563 | | 0.0691 | 259000 | 0.1541 | | 0.0693 | 260000 | 0.1568 | | 0.0696 | 261000 | 0.1535 | | 0.0699 | 262000 | 0.1519 | | 0.0701 | 263000 | 0.1571 | | 0.0704 | 264000 | 0.1536 | | 0.0707 | 265000 | 0.147 | | 0.0709 | 266000 | 0.147 | | 0.0712 | 267000 | 0.1537 | | 0.0715 | 268000 | 0.1527 | | 0.0717 | 269000 | 0.1545 | | 0.0720 | 270000 | 0.1523 | | 0.0723 | 271000 | 0.1539 | | 0.0725 | 272000 | 0.1561 | | 0.0728 | 273000 | 0.1513 | | 0.0731 | 274000 | 0.1571 | | 0.0733 | 275000 | 0.1577 | | 0.0736 | 276000 | 0.1613 | | 0.0739 | 277000 | 0.1523 | | 0.0741 | 278000 | 0.1468 | | 0.0744 | 279000 | 0.1534 | | 0.0747 | 280000 | 0.1544 | | 0.0749 | 281000 | 0.1552 | | 0.0752 | 282000 | 0.1514 | | 0.0755 | 283000 | 0.1504 | | 0.0757 | 284000 | 0.149 | | 0.0760 | 285000 | 0.1537 | | 0.0763 | 286000 | 0.1527 | | 0.0765 | 287000 | 0.1482 | | 0.0768 | 288000 | 0.1503 | | 0.0771 | 289000 | 0.1476 | | 0.0773 | 290000 | 0.1535 | | 0.0776 | 291000 | 0.1575 | | 0.0779 | 292000 | 0.1465 | | 0.0781 | 293000 | 0.147 | | 0.0784 | 294000 | 0.147 | | 0.0787 | 295000 | 0.1484 | | 0.0789 | 296000 | 0.1502 | | 0.0792 | 297000 | 0.147 | | 0.0795 | 298000 | 0.1544 | | 0.0797 | 299000 | 0.156 | | 0.0800 | 300000 | 0.1445 | | 0.0803 | 301000 | 0.143 | | 0.0805 | 302000 | 0.1541 | | 0.0808 | 303000 | 0.159 | | 0.0811 | 304000 | 0.1434 | | 0.0813 | 305000 | 0.1511 | | 0.0816 | 306000 | 0.1473 | | 0.0819 | 307000 | 0.1514 | | 0.0821 | 308000 | 0.1491 | | 0.0824 | 309000 | 0.1443 | | 0.0827 | 310000 | 0.1496 | | 0.0829 | 311000 | 0.1535 | | 0.0832 | 312000 | 0.152 | | 0.0835 | 313000 | 0.1496 | | 0.0837 | 314000 | 0.1521 | | 0.0840 | 315000 | 0.1459 | | 0.0843 | 316000 | 0.1449 | | 0.0845 | 317000 | 0.148 | | 0.0848 | 318000 | 0.1566 | | 0.0851 | 319000 | 0.149 | | 0.0853 | 320000 | 0.1502 | | 0.0856 | 321000 | 0.1501 | | 0.0859 | 322000 | 0.1447 | | 0.0861 | 323000 | 0.1468 | | 0.0864 | 324000 | 0.1474 | | 0.0867 | 325000 | 0.1455 | | 0.0869 | 326000 | 0.1374 | | 0.0872 | 327000 | 0.1397 | | 0.0875 | 328000 | 0.1468 | | 0.0877 | 329000 | 0.1436 | | 0.0880 | 330000 | 0.1523 | | 0.0883 | 331000 | 0.1407 | | 0.0885 | 332000 | 0.1446 | | 0.0888 | 333000 | 0.1476 | | 0.0891 | 334000 | 0.1487 | | 0.0893 | 335000 | 0.1486 | | 0.0896 | 336000 | 0.1564 | | 0.0899 | 337000 | 0.1487 | | 0.0901 | 338000 | 0.1492 | | 0.0904 | 339000 | 0.1469 | | 0.0907 | 340000 | 0.1487 | | 0.0909 | 341000 | 0.1513 | | 0.0912 | 342000 | 0.151 | | 0.0915 | 343000 | 0.14 | | 0.0917 | 344000 | 0.1487 | | 0.0920 | 345000 | 0.1527 | | 0.0923 | 346000 | 0.1419 | | 0.0925 | 347000 | 0.1541 | | 0.0928 | 348000 | 0.1426 | | 0.0931 | 349000 | 0.1426 | | 0.0933 | 350000 | 0.1503 | | 0.0936 | 351000 | 0.1392 | | 0.0939 | 352000 | 0.1505 | | 0.0941 | 353000 | 0.1452 | | 0.0944 | 354000 | 0.1462 | | 0.0947 | 355000 | 0.1412 | | 0.0949 | 356000 | 0.1438 | | 0.0952 | 357000 | 0.1457 | | 0.0955 | 358000 | 0.1414 | | 0.0957 | 359000 | 0.1458 | | 0.0960 | 360000 | 0.1477 | | 0.0963 | 361000 | 0.1423 | | 0.0965 | 362000 | 0.1498 | | 0.0968 | 363000 | 0.1426 | | 0.0971 | 364000 | 0.1469 | | 0.0973 | 365000 | 0.136 | | 0.0976 | 366000 | 0.142 | | 0.0979 | 367000 | 0.138 | | 0.0981 | 368000 | 0.1439 | | 0.0984 | 369000 | 0.1402 | | 0.0987 | 370000 | 0.1431 | | 0.0989 | 371000 | 0.1382 | | 0.0992 | 372000 | 0.1456 | | 0.0995 | 373000 | 0.1364 | | 0.0997 | 374000 | 0.1424 | | 0.1000 | 375000 | 0.1499 | | 0.1003 | 376000 | 0.1471 | | 0.1005 | 377000 | 0.1401 | | 0.1008 | 378000 | 0.1365 | | 0.1011 | 379000 | 0.1434 | | 0.1013 | 380000 | 0.1422 | | 0.1016 | 381000 | 0.1318 | | 0.1019 | 382000 | 0.15 | | 0.1021 | 383000 | 0.1437 | | 0.1024 | 384000 | 0.138 | | 0.1027 | 385000 | 0.1394 | | 0.1029 | 386000 | 0.1446 | | 0.1032 | 387000 | 0.1327 | | 0.1035 | 388000 | 0.1448 | | 0.1037 | 389000 | 0.142 | | 0.1040 | 390000 | 0.1446 | | 0.1043 | 391000 | 0.1409 | | 0.1045 | 392000 | 0.1444 | | 0.1048 | 393000 | 0.1353 | | 0.1051 | 394000 | 0.1484 | | 0.1053 | 395000 | 0.1464 | | 0.1056 | 396000 | 0.1293 | | 0.1059 | 397000 | 0.1393 | | 0.1061 | 398000 | 0.1393 | | 0.1064 | 399000 | 0.1473 | | 0.1067 | 400000 | 0.1412 | | 0.1069 | 401000 | 0.1315 | | 0.1072 | 402000 | 0.1419 | | 0.1075 | 403000 | 0.1366 | | 0.1077 | 404000 | 0.1426 | | 0.1080 | 405000 | 0.1401 | | 0.1083 | 406000 | 0.1367 | | 0.1085 | 407000 | 0.139 | | 0.1088 | 408000 | 0.1376 | | 0.1091 | 409000 | 0.1354 | | 0.1093 | 410000 | 0.1405 | | 0.1096 | 411000 | 0.1341 | | 0.1099 | 412000 | 0.1454 | | 0.1101 | 413000 | 0.1375 | | 0.1104 | 414000 | 0.1431 | | 0.1107 | 415000 | 0.1344 | | 0.1109 | 416000 | 0.1313 | | 0.1112 | 417000 | 0.1464 | | 0.1115 | 418000 | 0.1363 | | 0.1117 | 419000 | 0.1346 | | 0.1120 | 420000 | 0.1381 | | 0.1123 | 421000 | 0.1331 | | 0.1125 | 422000 | 0.1349 | | 0.1128 | 423000 | 0.1377 | | 0.1131 | 424000 | 0.1414 | | 0.1133 | 425000 | 0.1366 | | 0.1136 | 426000 | 0.1319 | | 0.1139 | 427000 | 0.1387 | | 0.1141 | 428000 | 0.138 | | 0.1144 | 429000 | 0.1351 | | 0.1147 | 430000 | 0.1373 | | 0.1149 | 431000 | 0.131 | | 0.1152 | 432000 | 0.1302 | | 0.1155 | 433000 | 0.1317 | | 0.1157 | 434000 | 0.1332 | | 0.1160 | 435000 | 0.1344 | | 0.1163 | 436000 | 0.1425 | | 0.1165 | 437000 | 0.1276 | | 0.1168 | 438000 | 0.1314 | | 0.1171 | 439000 | 0.1238 | | 0.1173 | 440000 | 0.1291 | | 0.1176 | 441000 | 0.1311 | | 0.1179 | 442000 | 0.1222 | | 0.1181 | 443000 | 0.1311 | | 0.1184 | 444000 | 0.1423 | | 0.1187 | 445000 | 0.1308 | | 0.1189 | 446000 | 0.1317 | | 0.1192 | 447000 | 0.1369 | | 0.1195 | 448000 | 0.1282 | | 0.1197 | 449000 | 0.1376 | | 0.1200 | 450000 | 0.1253 | | 0.1203 | 451000 | 0.1271 | | 0.1205 | 452000 | 0.131 | | 0.1208 | 453000 | 0.1316 | | 0.1211 | 454000 | 0.1353 | | 0.1213 | 455000 | 0.1277 | | 0.1216 | 456000 | 0.1238 | | 0.1219 | 457000 | 0.1271 | | 0.1221 | 458000 | 0.1319 | | 0.1224 | 459000 | 0.1281 | | 0.1227 | 460000 | 0.1305 | | 0.1229 | 461000 | 0.1376 | | 0.1232 | 462000 | 0.1333 | | 0.1235 | 463000 | 0.1211 | | 0.1237 | 464000 | 0.1211 | | 0.1240 | 465000 | 0.1286 | | 0.1243 | 466000 | 0.1329 | | 0.1245 | 467000 | 0.1227 | | 0.1248 | 468000 | 0.1283 | | 0.1251 | 469000 | 0.1275 | | 0.1253 | 470000 | 0.1362 | | 0.1256 | 471000 | 0.1293 | | 0.1259 | 472000 | 0.1264 | | 0.1261 | 473000 | 0.1241 | | 0.1264 | 474000 | 0.118 | | 0.1267 | 475000 | 0.1279 | | 0.1269 | 476000 | 0.1267 | | 0.1272 | 477000 | 0.1294 | | 0.1275 | 478000 | 0.1299 | | 0.1277 | 479000 | 0.1323 | | 0.1280 | 480000 | 0.1284 | | 0.1283 | 481000 | 0.1299 | | 0.1285 | 482000 | 0.1255 | | 0.1288 | 483000 | 0.1289 | | 0.1291 | 484000 | 0.1256 | | 0.1293 | 485000 | 0.1274 | | 0.1296 | 486000 | 0.1279 | | 0.1299 | 487000 | 0.1234 | | 0.1301 | 488000 | 0.1299 | | 0.1304 | 489000 | 0.1257 | | 0.1307 | 490000 | 0.1195 | | 0.1309 | 491000 | 0.1265 | | 0.1312 | 492000 | 0.1249 | | 0.1315 | 493000 | 0.1254 | | 0.1317 | 494000 | 0.1299 | | 0.1320 | 495000 | 0.1255 | | 0.1323 | 496000 | 0.1316 | | 0.1325 | 497000 | 0.1303 | | 0.1328 | 498000 | 0.1213 | | 0.1331 | 499000 | 0.1182 | | 0.1333 | 500000 | 0.12 | | 0.1336 | 501000 | 0.1193 | | 0.1339 | 502000 | 0.1241 | | 0.1341 | 503000 | 0.1258 | | 0.1344 | 504000 | 0.1279 | | 0.1347 | 505000 | 0.1293 | | 0.1349 | 506000 | 0.1278 | | 0.1352 | 507000 | 0.1241 | | 0.1355 | 508000 | 0.1221 | | 0.1357 | 509000 | 0.1213 | | 0.1360 | 510000 | 0.1232 | | 0.1363 | 511000 | 0.1278 | | 0.1365 | 512000 | 0.1208 | | 0.1368 | 513000 | 0.1203 | | 0.1371 | 514000 | 0.1251 | | 0.1373 | 515000 | 0.1207 | | 0.1376 | 516000 | 0.1233 | | 0.1379 | 517000 | 0.1287 | | 0.1381 | 518000 | 0.1255 | | 0.1384 | 519000 | 0.1234 | | 0.1387 | 520000 | 0.1198 | | 0.1389 | 521000 | 0.1274 | | 0.1392 | 522000 | 0.1209 | | 0.1395 | 523000 | 0.116 | | 0.1397 | 524000 | 0.1154 | | 0.1400 | 525000 | 0.1197 | | 0.1403 | 526000 | 0.1249 | | 0.1405 | 527000 | 0.1127 | | 0.1408 | 528000 | 0.1221 | | 0.1411 | 529000 | 0.122 | | 0.1413 | 530000 | 0.1251 | | 0.1416 | 531000 | 0.123 | | 0.1419 | 532000 | 0.1222 | | 0.1421 | 533000 | 0.1205 | | 0.1424 | 534000 | 0.1196 | | 0.1427 | 535000 | 0.1172 | | 0.1429 | 536000 | 0.1185 | | 0.1432 | 537000 | 0.1249 | | 0.1435 | 538000 | 0.123 | | 0.1437 | 539000 | 0.1227 | | 0.1440 | 540000 | 0.1198 | | 0.1443 | 541000 | 0.1219 | | 0.1445 | 542000 | 0.1183 | | 0.1448 | 543000 | 0.1203 | | 0.1451 | 544000 | 0.117 | | 0.1453 | 545000 | 0.1157 | | 0.1456 | 546000 | 0.1175 | | 0.1459 | 547000 | 0.1178 | | 0.1461 | 548000 | 0.1155 | | 0.1464 | 549000 | 0.1233 | | 0.1467 | 550000 | 0.1127 | | 0.1469 | 551000 | 0.12 | | 0.1472 | 552000 | 0.1229 | | 0.1475 | 553000 | 0.1211 | | 0.1477 | 554000 | 0.1125 | | 0.1480 | 555000 | 0.1178 | | 0.1483 | 556000 | 0.1178 | | 0.1485 | 557000 | 0.1132 | | 0.1488 | 558000 | 0.1119 | | 0.1491 | 559000 | 0.1157 | | 0.1493 | 560000 | 0.1197 | | 0.1496 | 561000 | 0.1151 | | 0.1499 | 562000 | 0.1217 | | 0.1501 | 563000 | 0.1146 | | 0.1504 | 564000 | 0.1202 | | 0.1507 | 565000 | 0.1165 | | 0.1509 | 566000 | 0.1179 | | 0.1512 | 567000 | 0.115 | | 0.1515 | 568000 | 0.1195 | | 0.1517 | 569000 | 0.1258 | | 0.1520 | 570000 | 0.1139 | | 0.1523 | 571000 | 0.1158 | | 0.1525 | 572000 | 0.1194 | | 0.1528 | 573000 | 0.1131 | | 0.1531 | 574000 | 0.1132 | | 0.1533 | 575000 | 0.1198 | | 0.1536 | 576000 | 0.116 | | 0.1539 | 577000 | 0.1173 | | 0.1541 | 578000 | 0.1175 | | 0.1544 | 579000 | 0.1128 | | 0.1547 | 580000 | 0.1127 | | 0.1549 | 581000 | 0.1168 | | 0.1552 | 582000 | 0.1131 | | 0.1555 | 583000 | 0.1213 | | 0.1557 | 584000 | 0.1182 | | 0.1560 | 585000 | 0.1146 | | 0.1563 | 586000 | 0.1189 | | 0.1565 | 587000 | 0.1153 | | 0.1568 | 588000 | 0.1136 | | 0.1571 | 589000 | 0.1121 | | 0.1573 | 590000 | 0.1082 | | 0.1576 | 591000 | 0.1116 | | 0.1579 | 592000 | 0.113 | | 0.1581 | 593000 | 0.1148 | | 0.1584 | 594000 | 0.1085 | | 0.1587 | 595000 | 0.119 | | 0.1589 | 596000 | 0.1073 | | 0.1592 | 597000 | 0.1157 | | 0.1595 | 598000 | 0.1142 | | 0.1597 | 599000 | 0.1125 | | 0.1600 | 600000 | 0.1112 | | 0.1603 | 601000 | 0.1122 | | 0.1605 | 602000 | 0.1173 | | 0.1608 | 603000 | 0.113 | | 0.1611 | 604000 | 0.1068 | | 0.1613 | 605000 | 0.1131 | | 0.1616 | 606000 | 0.1132 | | 0.1619 | 607000 | 0.1142 | | 0.1621 | 608000 | 0.1169 | | 0.1624 | 609000 | 0.1094 | | 0.1627 | 610000 | 0.1206 | | 0.1629 | 611000 | 0.1129 | | 0.1632 | 612000 | 0.1177 | | 0.1635 | 613000 | 0.1101 | | 0.1637 | 614000 | 0.1102 | | 0.1640 | 615000 | 0.1074 | | 0.1643 | 616000 | 0.1156 | | 0.1645 | 617000 | 0.1061 | | 0.1648 | 618000 | 0.1112 | | 0.1651 | 619000 | 0.1166 | | 0.1653 | 620000 | 0.1035 | | 0.1656 | 621000 | 0.1153 | | 0.1659 | 622000 | 0.1105 | | 0.1661 | 623000 | 0.1128 | | 0.1664 | 624000 | 0.1052 | | 0.1667 | 625000 | 0.1146 | | 0.1669 | 626000 | 0.1092 | | 0.1672 | 627000 | 0.1137 | | 0.1675 | 628000 | 0.1139 | | 0.1677 | 629000 | 0.11 | | 0.1680 | 630000 | 0.1062 | | 0.1683 | 631000 | 0.1136 | | 0.1685 | 632000 | 0.1124 | | 0.1688 | 633000 | 0.1087 | | 0.1691 | 634000 | 0.1109 | | 0.1693 | 635000 | 0.1124 | | 0.1696 | 636000 | 0.1074 | | 0.1699 | 637000 | 0.106 | | 0.1701 | 638000 | 0.1102 | | 0.1704 | 639000 | 0.1127 | | 0.1707 | 640000 | 0.108 | | 0.1709 | 641000 | 0.1047 | | 0.1712 | 642000 | 0.107 | | 0.1715 | 643000 | 0.1135 | | 0.1717 | 644000 | 0.1138 | | 0.1720 | 645000 | 0.1087 | | 0.1723 | 646000 | 0.1067 | | 0.1725 | 647000 | 0.1116 | | 0.1728 | 648000 | 0.1107 | | 0.1731 | 649000 | 0.1105 | | 0.1733 | 650000 | 0.1143 | | 0.1736 | 651000 | 0.1098 | | 0.1739 | 652000 | 0.1055 | | 0.1741 | 653000 | 0.1089 | | 0.1744 | 654000 | 0.1047 | | 0.1747 | 655000 | 0.1003 | | 0.1749 | 656000 | 0.1043 | | 0.1752 | 657000 | 0.1112 | | 0.1755 | 658000 | 0.1054 | | 0.1757 | 659000 | 0.1145 | | 0.1760 | 660000 | 0.1093 | | 0.1763 | 661000 | 0.1102 | | 0.1765 | 662000 | 0.1102 | | 0.1768 | 663000 | 0.1086 | | 0.1771 | 664000 | 0.108 | | 0.1773 | 665000 | 0.1046 | | 0.1776 | 666000 | 0.1064 | | 0.1779 | 667000 | 0.1014 | | 0.1781 | 668000 | 0.1039 | | 0.1784 | 669000 | 0.1132 | | 0.1787 | 670000 | 0.1076 | | 0.1789 | 671000 | 0.1075 | | 0.1792 | 672000 | 0.1089 | | 0.1795 | 673000 | 0.1109 | | 0.1797 | 674000 | 0.1035 | | 0.1800 | 675000 | 0.105 | | 0.1803 | 676000 | 0.108 | | 0.1805 | 677000 | 0.1088 | | 0.1808 | 678000 | 0.1094 | | 0.1811 | 679000 | 0.1019 | | 0.1813 | 680000 | 0.1054 | | 0.1816 | 681000 | 0.1041 | | 0.1819 | 682000 | 0.1086 | | 0.1821 | 683000 | 0.1126 | | 0.1824 | 684000 | 0.0996 | | 0.1827 | 685000 | 0.1019 | | 0.1829 | 686000 | 0.1013 | | 0.1832 | 687000 | 0.1043 | | 0.1835 | 688000 | 0.1045 | | 0.1837 | 689000 | 0.1076 | | 0.1840 | 690000 | 0.1046 | | 0.1843 | 691000 | 0.1096 | | 0.1845 | 692000 | 0.0994 | | 0.1848 | 693000 | 0.1049 | | 0.1851 | 694000 | 0.1104 | | 0.1853 | 695000 | 0.1089 | | 0.1856 | 696000 | 0.1039 | | 0.1859 | 697000 | 0.1035 | | 0.1861 | 698000 | 0.1056 | | 0.1864 | 699000 | 0.1058 | | 0.1867 | 700000 | 0.1074 | | 0.1869 | 701000 | 0.1074 | | 0.1872 | 702000 | 0.1122 | | 0.1875 | 703000 | 0.1013 | | 0.1877 | 704000 | 0.1029 | | 0.1880 | 705000 | 0.0997 | | 0.1883 | 706000 | 0.1052 | | 0.1885 | 707000 | 0.1135 | | 0.1888 | 708000 | 0.1114 | | 0.1891 | 709000 | 0.111 | | 0.1893 | 710000 | 0.104 | | 0.1896 | 711000 | 0.1018 | | 0.1899 | 712000 | 0.1077 | | 0.1901 | 713000 | 0.103 | | 0.1904 | 714000 | 0.1083 | | 0.1907 | 715000 | 0.1042 | | 0.1909 | 716000 | 0.1078 | | 0.1912 | 717000 | 0.1014 | | 0.1915 | 718000 | 0.1022 | | 0.1917 | 719000 | 0.1023 | | 0.1920 | 720000 | 0.1041 | | 0.1923 | 721000 | 0.0982 | | 0.1925 | 722000 | 0.1094 | | 0.1928 | 723000 | 0.1085 | | 0.1931 | 724000 | 0.1033 | | 0.1933 | 725000 | 0.1042 | | 0.1936 | 726000 | 0.105 | | 0.1939 | 727000 | 0.1047 | | 0.1941 | 728000 | 0.1014 | | 0.1944 | 729000 | 0.1029 | | 0.1947 | 730000 | 0.1003 | | 0.1949 | 731000 | 0.1071 | | 0.1952 | 732000 | 0.1 | | 0.1955 | 733000 | 0.1074 | | 0.1957 | 734000 | 0.1097 | | 0.1960 | 735000 | 0.1059 | | 0.1963 | 736000 | 0.1042 | | 0.1965 | 737000 | 0.1039 | | 0.1968 | 738000 | 0.104 | | 0.1971 | 739000 | 0.1031 | | 0.1973 | 740000 | 0.1016 | | 0.1976 | 741000 | 0.1039 | | 0.1979 | 742000 | 0.1023 | | 0.1981 | 743000 | 0.0954 | | 0.1984 | 744000 | 0.1035 | | 0.1987 | 745000 | 0.102 | | 0.1989 | 746000 | 0.1081 | | 0.1992 | 747000 | 0.1083 | | 0.1995 | 748000 | 0.1049 | | 0.1997 | 749000 | 0.0957 | | 0.2000 | 750000 | 0.104 | | 0.2003 | 751000 | 0.1074 | | 0.2005 | 752000 | 0.1007 | | 0.2008 | 753000 | 0.1022 | | 0.2011 | 754000 | 0.0987 | | 0.2013 | 755000 | 0.1054 | | 0.2016 | 756000 | 0.0981 | | 0.2019 | 757000 | 0.0948 | | 0.2021 | 758000 | 0.0991 | | 0.2024 | 759000 | 0.1004 | | 0.2027 | 760000 | 0.1111 | | 0.2029 | 761000 | 0.0993 | | 0.2032 | 762000 | 0.1038 | | 0.2035 | 763000 | 0.103 | | 0.2037 | 764000 | 0.105 | | 0.2040 | 765000 | 0.1027 | | 0.2043 | 766000 | 0.0977 | | 0.2045 | 767000 | 0.1067 | | 0.2048 | 768000 | 0.1 | | 0.2051 | 769000 | 0.1039 | | 0.2053 | 770000 | 0.0986 | | 0.2056 | 771000 | 0.1035 | | 0.2059 | 772000 | 0.1013 | | 0.2061 | 773000 | 0.1006 | | 0.2064 | 774000 | 0.1056 | | 0.2067 | 775000 | 0.0997 | | 0.2069 | 776000 | 0.0976 | | 0.2072 | 777000 | 0.0957 | | 0.2075 | 778000 | 0.0996 | | 0.2077 | 779000 | 0.1043 | | 0.2080 | 780000 | 0.0936 | | 0.2083 | 781000 | 0.1004 | | 0.2085 | 782000 | 0.1002 | | 0.2088 | 783000 | 0.101 | | 0.2091 | 784000 | 0.1018 | | 0.2093 | 785000 | 0.0955 | | 0.2096 | 786000 | 0.0933 | | 0.2099 | 787000 | 0.1031 | | 0.2101 | 788000 | 0.1016 | | 0.2104 | 789000 | 0.0948 | | 0.2107 | 790000 | 0.1 | | 0.2109 | 791000 | 0.1032 | | 0.2112 | 792000 | 0.0992 | | 0.2115 | 793000 | 0.098 | | 0.2117 | 794000 | 0.0935 | | 0.2120 | 795000 | 0.0975 | | 0.2123 | 796000 | 0.101 | | 0.2125 | 797000 | 0.0968 | | 0.2128 | 798000 | 0.0955 | | 0.2131 | 799000 | 0.0987 | | 0.2133 | 800000 | 0.0991 | | 0.2136 | 801000 | 0.0949 | | 0.2139 | 802000 | 0.0899 | | 0.2141 | 803000 | 0.1008 | | 0.2144 | 804000 | 0.0943 | | 0.2147 | 805000 | 0.1011 | | 0.2149 | 806000 | 0.0978 | | 0.2152 | 807000 | 0.1021 | | 0.2155 | 808000 | 0.0967 | | 0.2157 | 809000 | 0.0989 | | 0.2160 | 810000 | 0.1007 | | 0.2163 | 811000 | 0.0965 | | 0.2165 | 812000 | 0.0983 | | 0.2168 | 813000 | 0.0965 | | 0.2171 | 814000 | 0.095 | | 0.2173 | 815000 | 0.1011 | | 0.2176 | 816000 | 0.0987 | | 0.2179 | 817000 | 0.0999 | | 0.2181 | 818000 | 0.0952 | | 0.2184 | 819000 | 0.094 | | 0.2187 | 820000 | 0.0981 | | 0.2189 | 821000 | 0.0937 | | 0.2192 | 822000 | 0.0962 | | 0.2195 | 823000 | 0.096 | | 0.2197 | 824000 | 0.091 | | 0.2200 | 825000 | 0.0973 | | 0.2203 | 826000 | 0.0993 | | 0.2205 | 827000 | 0.104 | | 0.2208 | 828000 | 0.0964 | | 0.2211 | 829000 | 0.1015 | | 0.2213 | 830000 | 0.0903 | | 0.2216 | 831000 | 0.0967 | | 0.2219 | 832000 | 0.1029 | | 0.2221 | 833000 | 0.0936 | | 0.2224 | 834000 | 0.0993 | | 0.2227 | 835000 | 0.0864 | | 0.2229 | 836000 | 0.0954 | | 0.2232 | 837000 | 0.0972 | | 0.2235 | 838000 | 0.0974 | | 0.2237 | 839000 | 0.0986 | | 0.2240 | 840000 | 0.0947 | | 0.2243 | 841000 | 0.0999 | | 0.2245 | 842000 | 0.0975 | | 0.2248 | 843000 | 0.0955 | | 0.2251 | 844000 | 0.0968 | | 0.2253 | 845000 | 0.0894 | | 0.2256 | 846000 | 0.096 | | 0.2259 | 847000 | 0.101 | | 0.2261 | 848000 | 0.094 | | 0.2264 | 849000 | 0.0937 | | 0.2267 | 850000 | 0.1052 | | 0.2269 | 851000 | 0.0888 | | 0.2272 | 852000 | 0.0898 | | 0.2275 | 853000 | 0.0908 | | 0.2277 | 854000 | 0.0963 | | 0.2280 | 855000 | 0.0971 | | 0.2283 | 856000 | 0.0968 | | 0.2285 | 857000 | 0.0978 | | 0.2288 | 858000 | 0.0946 | | 0.2291 | 859000 | 0.1004 | | 0.2293 | 860000 | 0.0923 | | 0.2296 | 861000 | 0.0929 | | 0.2299 | 862000 | 0.0952 | | 0.2301 | 863000 | 0.0948 | | 0.2304 | 864000 | 0.0936 | | 0.2307 | 865000 | 0.092 | | 0.2309 | 866000 | 0.0894 | | 0.2312 | 867000 | 0.0922 | | 0.2315 | 868000 | 0.0946 | | 0.2317 | 869000 | 0.0967 | | 0.2320 | 870000 | 0.0965 | | 0.2323 | 871000 | 0.0966 | | 0.2325 | 872000 | 0.0927 | | 0.2328 | 873000 | 0.0931 | | 0.2331 | 874000 | 0.0901 | | 0.2333 | 875000 | 0.0929 | | 0.2336 | 876000 | 0.096 | | 0.2339 | 877000 | 0.0912 | | 0.2341 | 878000 | 0.0915 | | 0.2344 | 879000 | 0.095 | | 0.2347 | 880000 | 0.0938 | | 0.2349 | 881000 | 0.0987 | | 0.2352 | 882000 | 0.0955 | | 0.2355 | 883000 | 0.091 | | 0.2357 | 884000 | 0.0909 | | 0.2360 | 885000 | 0.094 | | 0.2363 | 886000 | 0.095 | | 0.2365 | 887000 | 0.0923 | | 0.2368 | 888000 | 0.0986 | | 0.2371 | 889000 | 0.0945 | | 0.2373 | 890000 | 0.0951 | | 0.2376 | 891000 | 0.0922 | | 0.2379 | 892000 | 0.0896 | | 0.2381 | 893000 | 0.095 | | 0.2384 | 894000 | 0.0915 | | 0.2387 | 895000 | 0.0907 | | 0.2389 | 896000 | 0.0917 | | 0.2392 | 897000 | 0.091 | | 0.2395 | 898000 | 0.093 | | 0.2397 | 899000 | 0.0993 | | 0.2400 | 900000 | 0.0988 | | 0.2403 | 901000 | 0.093 | | 0.2405 | 902000 | 0.0905 | | 0.2408 | 903000 | 0.0968 | | 0.2411 | 904000 | 0.0918 | | 0.2413 | 905000 | 0.0937 | | 0.2416 | 906000 | 0.0971 | | 0.2419 | 907000 | 0.0896 | | 0.2421 | 908000 | 0.0936 | | 0.2424 | 909000 | 0.0923 | | 0.2427 | 910000 | 0.0959 | | 0.2429 | 911000 | 0.0901 | | 0.2432 | 912000 | 0.0937 | | 0.2435 | 913000 | 0.0968 | | 0.2437 | 914000 | 0.0889 | | 0.2440 | 915000 | 0.0921 | | 0.2443 | 916000 | 0.0945 | | 0.2445 | 917000 | 0.088 | | 0.2448 | 918000 | 0.0916 | | 0.2451 | 919000 | 0.0975 | | 0.2453 | 920000 | 0.085 | | 0.2456 | 921000 | 0.0903 | | 0.2459 | 922000 | 0.0988 | | 0.2461 | 923000 | 0.0846 | | 0.2464 | 924000 | 0.0937 | | 0.2467 | 925000 | 0.0951 | | 0.2469 | 926000 | 0.092 | | 0.2472 | 927000 | 0.0989 | | 0.2475 | 928000 | 0.0835 | | 0.2477 | 929000 | 0.0925 | | 0.2480 | 930000 | 0.0953 | | 0.2483 | 931000 | 0.0885 | | 0.2485 | 932000 | 0.0887 | | 0.2488 | 933000 | 0.0868 | | 0.2491 | 934000 | 0.0882 | | 0.2493 | 935000 | 0.0933 | | 0.2496 | 936000 | 0.0896 | | 0.2499 | 937000 | 0.0917 | </details> ### Framework Versions - Python: 3.12.2 - Sentence Transformers: 3.2.1 - Transformers: 4.44.2 - PyTorch: 2.5.0 - Accelerate: 1.0.1 - Datasets: 3.0.2 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### CustomTripletLoss ```bibtex @misc{hermans2017defense, title={In Defense of the Triplet Loss for Person Re-Identification}, author={Alexander Hermans and Lucas Beyer and Bastian Leibe}, year={2017}, eprint={1703.07737}, archivePrefix={arXiv}, primaryClass={cs.CV} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
[ "PCR" ]
mlx-community/Qwen2.5-Aloe-Beta-7B
mlx-community
question-answering
[ "transformers", "safetensors", "qwen2", "text-generation", "biology", "medical", "healthcare", "mlx", "question-answering", "en", "dataset:HPAI-BSC/Aloe-Beta-General-Collection", "dataset:HPAI-BSC/chain-of-diagnosis", "dataset:HPAI-BSC/MedS-Ins", "dataset:HPAI-BSC/ultramedical", "dataset:HPAI-BSC/pubmedqa-cot-llama31", "dataset:HPAI-BSC/medqa-cot-llama31", "dataset:HPAI-BSC/medmcqa-cot-llama31", "dataset:HPAI-BSC/headqa-cot-llama31", "dataset:HPAI-BSC/MMLU-medical-cot-llama31", "dataset:HPAI-BSC/Polymed-QA", "base_model:HPAI-BSC/Qwen2.5-Aloe-Beta-7B", "base_model:quantized:HPAI-BSC/Qwen2.5-Aloe-Beta-7B", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "4-bit", "region:us" ]
2025-01-15T18:36:12Z
2025-01-15T18:38:14+00:00
24
0
--- base_model: HPAI-BSC/Qwen2.5-Aloe-Beta-7B datasets: - HPAI-BSC/Aloe-Beta-General-Collection - HPAI-BSC/chain-of-diagnosis - HPAI-BSC/MedS-Ins - HPAI-BSC/ultramedical - HPAI-BSC/pubmedqa-cot-llama31 - HPAI-BSC/medqa-cot-llama31 - HPAI-BSC/medmcqa-cot-llama31 - HPAI-BSC/headqa-cot-llama31 - HPAI-BSC/MMLU-medical-cot-llama31 - HPAI-BSC/Polymed-QA - HPAI-BSC/Aloe-Beta-General-Collection - HPAI-BSC/Aloe-Beta-General-Collection language: - en library_name: transformers license: apache-2.0 pipeline_tag: question-answering tags: - biology - medical - healthcare - mlx --- # mlx-community/Qwen2.5-Aloe-Beta-7B The Model [mlx-community/Qwen2.5-Aloe-Beta-7B](https://huggingface.co/mlx-community/Qwen2.5-Aloe-Beta-7B) was converted to MLX format from [HPAI-BSC/Qwen2.5-Aloe-Beta-7B](https://huggingface.co/HPAI-BSC/Qwen2.5-Aloe-Beta-7B) using mlx-lm version **0.20.1**. ## Use with mlx ```bash pip install mlx-lm ``` ```python from mlx_lm import load, generate model, tokenizer = load("mlx-community/Qwen2.5-Aloe-Beta-7B") prompt="hello" if hasattr(tokenizer, "apply_chat_template") and tokenizer.chat_template is not None: messages = [{"role": "user", "content": prompt}] prompt = tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) response = generate(model, tokenizer, prompt=prompt, verbose=True) ```
[ "MEDQA", "PUBMEDQA" ]
bhavnicksm/brown-beetle-base-v1.1
bhavnicksm
null
[ "model2vec", "safetensors", "embeddings", "static-embeddings", "sentence-transformers", "mteb", "en", "license:mit", "model-index", "region:us" ]
2025-01-23T15:10:49Z
2025-01-25T00:39:02+00:00
24
1
--- base_model: baai/bge-base-en-v1.5 language: - en library_name: model2vec license: mit tags: - embeddings - static-embeddings - sentence-transformers - mteb model-index: - name: brown-beetle-base-v1.1 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en-ext) type: mteb/amazon_counterfactual config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 66.8815592203898 - type: ap value: 18.195189738998792 - type: ap_weighted value: 18.195189738998792 - type: f1 value: 55.16513139500548 - type: f1_weighted value: 73.16351793408617 - type: main_score value: 66.8815592203898 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 70.19402985074626 - type: ap value: 32.56202127210676 - type: ap_weighted value: 32.56202127210676 - type: f1 value: 64.10866172582675 - type: f1_weighted value: 73.08892104263525 - type: main_score value: 70.19402985074626 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification (default) type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 68.72295000000001 - type: ap value: 63.50967718714144 - type: ap_weighted value: 63.50967718714144 - type: f1 value: 68.34020808585744 - type: f1_weighted value: 68.34020808585744 - type: main_score value: 68.72295000000001 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 31.691999999999997 - type: f1 value: 31.085241040043943 - type: f1_weighted value: 31.085241040043943 - type: main_score value: 31.691999999999997 - task: type: Retrieval dataset: name: MTEB ArguAna (default) type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: main_score value: 40.128 - type: map_at_1 value: 20.341 - type: map_at_10 value: 32.775999999999996 - type: map_at_100 value: 34.026 - type: map_at_1000 value: 34.059 - type: map_at_20 value: 33.588 - type: map_at_3 value: 28.615000000000002 - type: map_at_5 value: 30.824 - type: mrr_at_1 value: 20.62588904694168 - type: mrr_at_10 value: 32.89335388019595 - type: mrr_at_100 value: 34.14268270653552 - type: mrr_at_1000 value: 34.175518754164436 - type: mrr_at_20 value: 33.7081572497899 - type: mrr_at_3 value: 28.73399715504974 - type: mrr_at_5 value: 30.938833570412427 - type: nauc_map_at_1000_diff1 value: 7.401396584575148 - type: nauc_map_at_1000_max value: 1.561793336151107 - type: nauc_map_at_1000_std value: 5.33923048933337 - type: nauc_map_at_100_diff1 value: 7.40837757767046 - type: nauc_map_at_100_max value: 1.5985119960125118 - type: nauc_map_at_100_std value: 5.388304311069464 - type: nauc_map_at_10_diff1 value: 7.3682062691330845 - type: nauc_map_at_10_max value: 1.4273735626548079 - type: nauc_map_at_10_std value: 5.0304641139875015 - type: nauc_map_at_1_diff1 value: 7.639815184580624 - type: nauc_map_at_1_max value: -4.027690178292217 - type: nauc_map_at_1_std value: 1.9985652144404256 - type: nauc_map_at_20_diff1 value: 7.39878654703337 - type: nauc_map_at_20_max value: 1.666693315573178 - type: nauc_map_at_20_std value: 5.398958104612419 - type: nauc_map_at_3_diff1 value: 6.890846818575478 - type: nauc_map_at_3_max value: -0.32310033961112283 - type: nauc_map_at_3_std value: 3.437881182860949 - type: nauc_map_at_5_diff1 value: 7.137127221923745 - type: nauc_map_at_5_max value: 1.19461097098547 - type: nauc_map_at_5_std value: 4.827553234476681 - type: nauc_mrr_at_1000_diff1 value: 6.456283332332113 - type: nauc_mrr_at_1000_max value: 1.5065670810761536 - type: nauc_mrr_at_1000_std value: 5.220110996894977 - type: nauc_mrr_at_100_diff1 value: 6.464279998869518 - type: nauc_mrr_at_100_max value: 1.5432933404242728 - type: nauc_mrr_at_100_std value: 5.269191190466668 - type: nauc_mrr_at_10_diff1 value: 6.4464671096278705 - type: nauc_mrr_at_10_max value: 1.3819637730098593 - type: nauc_mrr_at_10_std value: 4.927850342994863 - type: nauc_mrr_at_1_diff1 value: 6.62597695774715 - type: nauc_mrr_at_1_max value: -3.252757666749109 - type: nauc_mrr_at_1_std value: 1.8298974419648943 - type: nauc_mrr_at_20_diff1 value: 6.46957522426181 - type: nauc_mrr_at_20_max value: 1.6158979040451888 - type: nauc_mrr_at_20_std value: 5.288493315564287 - type: nauc_mrr_at_3_diff1 value: 5.845929061602997 - type: nauc_mrr_at_3_max value: -0.5075220148677204 - type: nauc_mrr_at_3_std value: 3.3320031665380245 - type: nauc_mrr_at_5_diff1 value: 6.279917481027432 - type: nauc_mrr_at_5_max value: 1.177197305050769 - type: nauc_mrr_at_5_std value: 4.72208597750944 - type: nauc_ndcg_at_1000_diff1 value: 7.537679691067998 - type: nauc_ndcg_at_1000_max value: 3.5509114500043117 - type: nauc_ndcg_at_1000_std value: 7.375893443326363 - type: nauc_ndcg_at_100_diff1 value: 7.843569009078126 - type: nauc_ndcg_at_100_max value: 4.738395735952662 - type: nauc_ndcg_at_100_std value: 8.890391940589863 - type: nauc_ndcg_at_10_diff1 value: 7.8755593191900575 - type: nauc_ndcg_at_10_max value: 4.225562403907249 - type: nauc_ndcg_at_10_std value: 7.349058827076821 - type: nauc_ndcg_at_1_diff1 value: 7.639815184580624 - type: nauc_ndcg_at_1_max value: -4.027690178292217 - type: nauc_ndcg_at_1_std value: 1.9985652144404256 - type: nauc_ndcg_at_20_diff1 value: 8.020639694315895 - type: nauc_ndcg_at_20_max value: 5.290509406500979 - type: nauc_ndcg_at_20_std value: 8.891273142025415 - type: nauc_ndcg_at_3_diff1 value: 6.95862081914132 - type: nauc_ndcg_at_3_max value: 0.810166999166851 - type: nauc_ndcg_at_3_std value: 4.049852198242494 - type: nauc_ndcg_at_5_diff1 value: 7.390022900253804 - type: nauc_ndcg_at_5_max value: 3.5572312546957967 - type: nauc_ndcg_at_5_std value: 6.551950546495465 - type: nauc_precision_at_1000_diff1 value: -25.915341768512246 - type: nauc_precision_at_1000_max value: 33.45682540913843 - type: nauc_precision_at_1000_std value: 59.512947822117425 - type: nauc_precision_at_100_diff1 value: 11.306594631468881 - type: nauc_precision_at_100_max value: 38.39603487924486 - type: nauc_precision_at_100_std value: 53.078997445874265 - type: nauc_precision_at_10_diff1 value: 9.731458734450202 - type: nauc_precision_at_10_max value: 13.666190744172896 - type: nauc_precision_at_10_std value: 15.605041658466092 - type: nauc_precision_at_1_diff1 value: 7.639815184580624 - type: nauc_precision_at_1_max value: -4.027690178292217 - type: nauc_precision_at_1_std value: 1.9985652144404256 - type: nauc_precision_at_20_diff1 value: 11.099142501098678 - type: nauc_precision_at_20_max value: 22.245728980223614 - type: nauc_precision_at_20_std value: 26.30822025447605 - type: nauc_precision_at_3_diff1 value: 7.2132144596256085 - type: nauc_precision_at_3_max value: 3.7686547647068602 - type: nauc_precision_at_3_std value: 5.692360766234468 - type: nauc_precision_at_5_diff1 value: 8.198162382731578 - type: nauc_precision_at_5_max value: 10.254147227495872 - type: nauc_precision_at_5_std value: 11.550749310135192 - type: nauc_recall_at_1000_diff1 value: -25.915341768509983 - type: nauc_recall_at_1000_max value: 33.45682540913765 - type: nauc_recall_at_1000_std value: 59.512947822118356 - type: nauc_recall_at_100_diff1 value: 11.306594631468771 - type: nauc_recall_at_100_max value: 38.39603487924445 - type: nauc_recall_at_100_std value: 53.07899744587412 - type: nauc_recall_at_10_diff1 value: 9.731458734450223 - type: nauc_recall_at_10_max value: 13.666190744172946 - type: nauc_recall_at_10_std value: 15.605041658466124 - type: nauc_recall_at_1_diff1 value: 7.639815184580624 - type: nauc_recall_at_1_max value: -4.027690178292217 - type: nauc_recall_at_1_std value: 1.9985652144404256 - type: nauc_recall_at_20_diff1 value: 11.099142501098756 - type: nauc_recall_at_20_max value: 22.245728980223685 - type: nauc_recall_at_20_std value: 26.308220254476044 - type: nauc_recall_at_3_diff1 value: 7.213214459625624 - type: nauc_recall_at_3_max value: 3.768654764706886 - type: nauc_recall_at_3_std value: 5.692360766234493 - type: nauc_recall_at_5_diff1 value: 8.19816238273156 - type: nauc_recall_at_5_max value: 10.254147227495896 - type: nauc_recall_at_5_std value: 11.550749310135219 - type: ndcg_at_1 value: 20.341 - type: ndcg_at_10 value: 40.128 - type: ndcg_at_100 value: 46.113 - type: ndcg_at_1000 value: 47.024 - type: ndcg_at_20 value: 43.044 - type: ndcg_at_3 value: 31.465 - type: ndcg_at_5 value: 35.447 - type: precision_at_1 value: 20.341 - type: precision_at_10 value: 6.38 - type: precision_at_100 value: 0.9159999999999999 - type: precision_at_1000 value: 0.099 - type: precision_at_20 value: 3.762 - type: precision_at_3 value: 13.253 - type: precision_at_5 value: 9.886000000000001 - type: recall_at_1 value: 20.341 - type: recall_at_10 value: 63.798 - type: recall_at_100 value: 91.607 - type: recall_at_1000 value: 98.86200000000001 - type: recall_at_20 value: 75.249 - type: recall_at_3 value: 39.757999999999996 - type: recall_at_5 value: 49.431000000000004 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P (default) type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: main_score value: 32.95868401684924 - type: v_measure value: 32.95868401684924 - type: v_measure_std value: 14.056211834923879 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S (default) type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: main_score value: 21.233292246705687 - type: v_measure value: 21.233292246705687 - type: v_measure_std value: 15.425917066814412 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions (default) type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: main_score value: 53.563651922608045 - type: map value: 53.563651922608045 - type: mrr value: 68.38906476718111 - type: nAUC_map_diff1 value: 6.0692473614473474 - type: nAUC_map_max value: 14.344767912076911 - type: nAUC_map_std value: 4.756719465003369 - type: nAUC_mrr_diff1 value: 7.477185458141965 - type: nAUC_mrr_max value: 18.522973771138822 - type: nAUC_mrr_std value: 6.308062345018368 - task: type: STS dataset: name: MTEB BIOSSES (default) type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cosine_pearson value: 78.15156521309085 - type: cosine_spearman value: 75.03966163682449 - type: euclidean_pearson value: 51.743736432098494 - type: euclidean_spearman value: 51.92286984480066 - type: main_score value: 75.03966163682449 - type: manhattan_pearson value: 49.81487823004435 - type: manhattan_spearman value: 50.90236466418698 - task: type: Classification dataset: name: MTEB Banking77Classification (default) type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 75.26948051948052 - type: f1 value: 75.30563948359976 - type: f1_weighted value: 75.30563948359976 - type: main_score value: 75.26948051948052 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P (default) type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: main_score value: 27.964590000454358 - type: v_measure value: 27.964590000454358 - type: v_measure_std value: 0.8174675360012013 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S (default) type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: main_score value: 15.33712655362372 - type: v_measure value: 15.33712655362372 - type: v_measure_std value: 1.0429716123901085 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval (default) type: mteb/cqadupstack-android config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: main_score value: 30.791 - type: map_at_1 value: 18.879 - type: map_at_10 value: 25.962000000000003 - type: map_at_100 value: 27.189000000000004 - type: map_at_1000 value: 27.340999999999998 - type: map_at_20 value: 26.648 - type: map_at_3 value: 23.382 - type: map_at_5 value: 25.018 - type: mrr_at_1 value: 24.177396280400572 - type: mrr_at_10 value: 31.407396053318788 - type: mrr_at_100 value: 32.340933039246664 - type: mrr_at_1000 value: 32.41366678033763 - type: mrr_at_20 value: 31.97571104525756 - type: mrr_at_3 value: 29.065331425846452 - type: mrr_at_5 value: 30.596089651883645 - type: nauc_map_at_1000_diff1 value: 44.456227202791275 - type: nauc_map_at_1000_max value: 28.894568989416936 - type: nauc_map_at_1000_std value: -1.962288075614885 - type: nauc_map_at_100_diff1 value: 44.441095448469184 - type: nauc_map_at_100_max value: 28.866553263078664 - type: nauc_map_at_100_std value: -1.991129394854661 - type: nauc_map_at_10_diff1 value: 45.0909449805561 - type: nauc_map_at_10_max value: 28.511499642084726 - type: nauc_map_at_10_std value: -2.698301953031416 - type: nauc_map_at_1_diff1 value: 52.27885405669149 - type: nauc_map_at_1_max value: 29.69077498263335 - type: nauc_map_at_1_std value: -4.040919532605095 - type: nauc_map_at_20_diff1 value: 44.58177336568864 - type: nauc_map_at_20_max value: 28.672635052084395 - type: nauc_map_at_20_std value: -2.105018325313007 - type: nauc_map_at_3_diff1 value: 46.37410842221556 - type: nauc_map_at_3_max value: 28.53003050488544 - type: nauc_map_at_3_std value: -3.474374367660655 - type: nauc_map_at_5_diff1 value: 45.36414878296012 - type: nauc_map_at_5_max value: 28.58185465110964 - type: nauc_map_at_5_std value: -3.178056650787462 - type: nauc_mrr_at_1000_diff1 value: 41.89425040244644 - type: nauc_mrr_at_1000_max value: 29.049439023672107 - type: nauc_mrr_at_1000_std value: -2.0632044066649 - type: nauc_mrr_at_100_diff1 value: 41.880662457478465 - type: nauc_mrr_at_100_max value: 29.026924769853668 - type: nauc_mrr_at_100_std value: -2.0580522045691447 - type: nauc_mrr_at_10_diff1 value: 42.1765212282714 - type: nauc_mrr_at_10_max value: 29.058594862693106 - type: nauc_mrr_at_10_std value: -2.4755635462110166 - type: nauc_mrr_at_1_diff1 value: 48.58700454240466 - type: nauc_mrr_at_1_max value: 30.216009197975 - type: nauc_mrr_at_1_std value: -4.228213436397275 - type: nauc_mrr_at_20_diff1 value: 41.86874699835266 - type: nauc_mrr_at_20_max value: 28.97913674958661 - type: nauc_mrr_at_20_std value: -2.1185182384148007 - type: nauc_mrr_at_3_diff1 value: 42.91030819018113 - type: nauc_mrr_at_3_max value: 29.538674245252565 - type: nauc_mrr_at_3_std value: -3.7727995650320407 - type: nauc_mrr_at_5_diff1 value: 42.26468329248297 - type: nauc_mrr_at_5_max value: 28.918053805870898 - type: nauc_mrr_at_5_std value: -2.88341037174054 - type: nauc_ndcg_at_1000_diff1 value: 41.13956893768666 - type: nauc_ndcg_at_1000_max value: 29.28417670330021 - type: nauc_ndcg_at_1000_std value: 1.6545355790919596 - type: nauc_ndcg_at_100_diff1 value: 40.33374683864513 - type: nauc_ndcg_at_100_max value: 28.878179659629595 - type: nauc_ndcg_at_100_std value: 1.231146230356752 - type: nauc_ndcg_at_10_diff1 value: 41.57341483796429 - type: nauc_ndcg_at_10_max value: 28.012550727831766 - type: nauc_ndcg_at_10_std value: -1.7182305120052626 - type: nauc_ndcg_at_1_diff1 value: 48.58700454240466 - type: nauc_ndcg_at_1_max value: 30.216009197975 - type: nauc_ndcg_at_1_std value: -4.228213436397275 - type: nauc_ndcg_at_20_diff1 value: 40.51739900449665 - type: nauc_ndcg_at_20_max value: 28.12715921361774 - type: nauc_ndcg_at_20_std value: 0.0711992714959275 - type: nauc_ndcg_at_3_diff1 value: 42.16108868833533 - type: nauc_ndcg_at_3_max value: 28.087620155716948 - type: nauc_ndcg_at_3_std value: -2.9775505646610374 - type: nauc_ndcg_at_5_diff1 value: 41.468379197429414 - type: nauc_ndcg_at_5_max value: 27.89072398280808 - type: nauc_ndcg_at_5_std value: -2.308293103340349 - type: nauc_precision_at_1000_diff1 value: -7.812293981515205 - type: nauc_precision_at_1000_max value: 1.2800889705077507 - type: nauc_precision_at_1000_std value: 5.213164712637001 - type: nauc_precision_at_100_diff1 value: 3.7373639620367785 - type: nauc_precision_at_100_max value: 15.310126733248516 - type: nauc_precision_at_100_std value: 7.140151721931273 - type: nauc_precision_at_10_diff1 value: 23.345977649985667 - type: nauc_precision_at_10_max value: 22.53539048611915 - type: nauc_precision_at_10_std value: 1.6589008683957966 - type: nauc_precision_at_1_diff1 value: 48.58700454240466 - type: nauc_precision_at_1_max value: 30.216009197975 - type: nauc_precision_at_1_std value: -4.228213436397275 - type: nauc_precision_at_20_diff1 value: 14.695564879099567 - type: nauc_precision_at_20_max value: 20.052208357546426 - type: nauc_precision_at_20_std value: 4.244510048549112 - type: nauc_precision_at_3_diff1 value: 31.73861098729183 - type: nauc_precision_at_3_max value: 26.299643772492875 - type: nauc_precision_at_3_std value: -3.2268677243140482 - type: nauc_precision_at_5_diff1 value: 27.936581150982022 - type: nauc_precision_at_5_max value: 24.706520447732448 - type: nauc_precision_at_5_std value: -0.31632649911477195 - type: nauc_recall_at_1000_diff1 value: 25.36253499111099 - type: nauc_recall_at_1000_max value: 30.743022610631776 - type: nauc_recall_at_1000_std value: 33.9542028702222 - type: nauc_recall_at_100_diff1 value: 24.370605667623142 - type: nauc_recall_at_100_max value: 25.693677192296615 - type: nauc_recall_at_100_std value: 15.434369306019203 - type: nauc_recall_at_10_diff1 value: 32.92605362103974 - type: nauc_recall_at_10_max value: 24.26992030392243 - type: nauc_recall_at_10_std value: 0.9073283540531526 - type: nauc_recall_at_1_diff1 value: 52.27885405669149 - type: nauc_recall_at_1_max value: 29.69077498263335 - type: nauc_recall_at_1_std value: -4.040919532605095 - type: nauc_recall_at_20_diff1 value: 28.32003727506695 - type: nauc_recall_at_20_max value: 24.18930621444155 - type: nauc_recall_at_20_std value: 7.510592277537581 - type: nauc_recall_at_3_diff1 value: 38.671949123051505 - type: nauc_recall_at_3_max value: 25.710824662526615 - type: nauc_recall_at_3_std value: -2.487013964082508 - type: nauc_recall_at_5_diff1 value: 34.84962799812745 - type: nauc_recall_at_5_max value: 24.33937991345843 - type: nauc_recall_at_5_std value: -1.108880202554027 - type: ndcg_at_1 value: 24.177 - type: ndcg_at_10 value: 30.791 - type: ndcg_at_100 value: 36.378 - type: ndcg_at_1000 value: 39.409 - type: ndcg_at_20 value: 32.977000000000004 - type: ndcg_at_3 value: 26.956999999999997 - type: ndcg_at_5 value: 29.067999999999998 - type: precision_at_1 value: 24.177 - type: precision_at_10 value: 6.023 - type: precision_at_100 value: 1.086 - type: precision_at_1000 value: 0.164 - type: precision_at_20 value: 3.7479999999999998 - type: precision_at_3 value: 13.209000000000001 - type: precision_at_5 value: 9.871 - type: recall_at_1 value: 18.879 - type: recall_at_10 value: 39.559 - type: recall_at_100 value: 64.654 - type: recall_at_1000 value: 84.894 - type: recall_at_20 value: 47.881 - type: recall_at_3 value: 27.938000000000002 - type: recall_at_5 value: 34.007 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval (default) type: mteb/cqadupstack-english config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: main_score value: 25.673000000000002 - type: map_at_1 value: 16.459 - type: map_at_10 value: 21.874 - type: map_at_100 value: 22.827 - type: map_at_1000 value: 22.945999999999998 - type: map_at_20 value: 22.374 - type: map_at_3 value: 19.983999999999998 - type: map_at_5 value: 21.02 - type: mrr_at_1 value: 20.31847133757962 - type: mrr_at_10 value: 25.877009402487094 - type: mrr_at_100 value: 26.659253263000515 - type: mrr_at_1000 value: 26.727742776098022 - type: mrr_at_20 value: 26.313239123775766 - type: mrr_at_3 value: 23.94904458598725 - type: mrr_at_5 value: 25.009554140127367 - type: nauc_map_at_1000_diff1 value: 41.637269947825054 - type: nauc_map_at_1000_max value: 16.59113648252257 - type: nauc_map_at_1000_std value: -6.240477201515136 - type: nauc_map_at_100_diff1 value: 41.675782570150524 - type: nauc_map_at_100_max value: 16.525875161414767 - type: nauc_map_at_100_std value: -6.384434353664551 - type: nauc_map_at_10_diff1 value: 42.13383842274204 - type: nauc_map_at_10_max value: 16.35088881127558 - type: nauc_map_at_10_std value: -7.037562435047022 - type: nauc_map_at_1_diff1 value: 48.940966610158334 - type: nauc_map_at_1_max value: 18.74015188630454 - type: nauc_map_at_1_std value: -7.227335063509348 - type: nauc_map_at_20_diff1 value: 41.89220712885671 - type: nauc_map_at_20_max value: 16.444595780299423 - type: nauc_map_at_20_std value: -6.692331309627265 - type: nauc_map_at_3_diff1 value: 43.55465148916478 - type: nauc_map_at_3_max value: 16.938965256705846 - type: nauc_map_at_3_std value: -8.023851014601412 - type: nauc_map_at_5_diff1 value: 42.82187289926132 - type: nauc_map_at_5_max value: 16.602607822208103 - type: nauc_map_at_5_std value: -7.692612912736763 - type: nauc_mrr_at_1000_diff1 value: 38.91948675283936 - type: nauc_mrr_at_1000_max value: 15.601375268198622 - type: nauc_mrr_at_1000_std value: -4.805680593887126 - type: nauc_mrr_at_100_diff1 value: 38.917027457698445 - type: nauc_mrr_at_100_max value: 15.580969466370906 - type: nauc_mrr_at_100_std value: -4.840920053630244 - type: nauc_mrr_at_10_diff1 value: 39.13498152252439 - type: nauc_mrr_at_10_max value: 15.647226027751007 - type: nauc_mrr_at_10_std value: -5.1008916023217665 - type: nauc_mrr_at_1_diff1 value: 45.16159032307842 - type: nauc_mrr_at_1_max value: 19.061148112454546 - type: nauc_mrr_at_1_std value: -4.54525479487334 - type: nauc_mrr_at_20_diff1 value: 38.998500397229336 - type: nauc_mrr_at_20_max value: 15.634021342584656 - type: nauc_mrr_at_20_std value: -4.857431447459852 - type: nauc_mrr_at_3_diff1 value: 40.412877529221376 - type: nauc_mrr_at_3_max value: 16.42127187705688 - type: nauc_mrr_at_3_std value: -5.657543454929466 - type: nauc_mrr_at_5_diff1 value: 39.62935903278265 - type: nauc_mrr_at_5_max value: 15.795834211521376 - type: nauc_mrr_at_5_std value: -5.44501480909826 - type: nauc_ndcg_at_1000_diff1 value: 36.83594719627982 - type: nauc_ndcg_at_1000_max value: 15.56633203676765 - type: nauc_ndcg_at_1000_std value: -2.9558344419291576 - type: nauc_ndcg_at_100_diff1 value: 37.42847527646225 - type: nauc_ndcg_at_100_max value: 14.753904697909023 - type: nauc_ndcg_at_100_std value: -4.366586709299699 - type: nauc_ndcg_at_10_diff1 value: 38.96710005566265 - type: nauc_ndcg_at_10_max value: 14.989864833294083 - type: nauc_ndcg_at_10_std value: -5.644943994329631 - type: nauc_ndcg_at_1_diff1 value: 45.16159032307842 - type: nauc_ndcg_at_1_max value: 19.061148112454546 - type: nauc_ndcg_at_1_std value: -4.54525479487334 - type: nauc_ndcg_at_20_diff1 value: 38.404767298099586 - type: nauc_ndcg_at_20_max value: 15.01231586919081 - type: nauc_ndcg_at_20_std value: -4.927505075358546 - type: nauc_ndcg_at_3_diff1 value: 40.86858923537607 - type: nauc_ndcg_at_3_max value: 16.34017125203612 - type: nauc_ndcg_at_3_std value: -6.883659011926427 - type: nauc_ndcg_at_5_diff1 value: 40.00790367330578 - type: nauc_ndcg_at_5_max value: 15.50166036395446 - type: nauc_ndcg_at_5_std value: -6.706634556657365 - type: nauc_precision_at_1000_diff1 value: -8.750933150383995 - type: nauc_precision_at_1000_max value: 11.680804005659496 - type: nauc_precision_at_1000_std value: 21.042006755869643 - type: nauc_precision_at_100_diff1 value: 6.0540864455269725 - type: nauc_precision_at_100_max value: 13.365924379494365 - type: nauc_precision_at_100_std value: 15.005876440114466 - type: nauc_precision_at_10_diff1 value: 20.991346310141818 - type: nauc_precision_at_10_max value: 12.988269388924193 - type: nauc_precision_at_10_std value: 3.131123855747258 - type: nauc_precision_at_1_diff1 value: 45.16159032307842 - type: nauc_precision_at_1_max value: 19.061148112454546 - type: nauc_precision_at_1_std value: -4.54525479487334 - type: nauc_precision_at_20_diff1 value: 16.533790569862173 - type: nauc_precision_at_20_max value: 13.86552712683143 - type: nauc_precision_at_20_std value: 8.362184812066275 - type: nauc_precision_at_3_diff1 value: 31.15090135460764 - type: nauc_precision_at_3_max value: 14.743141231956372 - type: nauc_precision_at_3_std value: -3.5565947356523666 - type: nauc_precision_at_5_diff1 value: 26.695391493633487 - type: nauc_precision_at_5_max value: 13.5610621056517 - type: nauc_precision_at_5_std value: -1.1010877027152977 - type: nauc_recall_at_1000_diff1 value: 19.106654619778464 - type: nauc_recall_at_1000_max value: 11.645679730078278 - type: nauc_recall_at_1000_std value: 8.35426365611593 - type: nauc_recall_at_100_diff1 value: 25.50434857534657 - type: nauc_recall_at_100_max value: 8.392667980561376 - type: nauc_recall_at_100_std value: -0.4353104586969332 - type: nauc_recall_at_10_diff1 value: 32.464830875287134 - type: nauc_recall_at_10_max value: 10.556536879256328 - type: nauc_recall_at_10_std value: -4.748466100889759 - type: nauc_recall_at_1_diff1 value: 48.940966610158334 - type: nauc_recall_at_1_max value: 18.74015188630454 - type: nauc_recall_at_1_std value: -7.227335063509348 - type: nauc_recall_at_20_diff1 value: 30.102938979587634 - type: nauc_recall_at_20_max value: 10.448115839029576 - type: nauc_recall_at_20_std value: -2.2111724130886246 - type: nauc_recall_at_3_diff1 value: 37.2732315612864 - type: nauc_recall_at_3_max value: 13.56492402518412 - type: nauc_recall_at_3_std value: -8.801108556510727 - type: nauc_recall_at_5_diff1 value: 35.637334981151184 - type: nauc_recall_at_5_max value: 11.861179086901167 - type: nauc_recall_at_5_std value: -8.21500801753048 - type: ndcg_at_1 value: 20.318 - type: ndcg_at_10 value: 25.673000000000002 - type: ndcg_at_100 value: 29.997 - type: ndcg_at_1000 value: 32.696999999999996 - type: ndcg_at_20 value: 27.178 - type: ndcg_at_3 value: 22.326999999999998 - type: ndcg_at_5 value: 23.829 - type: precision_at_1 value: 20.318 - type: precision_at_10 value: 4.732 - type: precision_at_100 value: 0.8710000000000001 - type: precision_at_1000 value: 0.136 - type: precision_at_20 value: 2.9080000000000004 - type: precision_at_3 value: 10.488 - type: precision_at_5 value: 7.580000000000001 - type: recall_at_1 value: 16.459 - type: recall_at_10 value: 33.085 - type: recall_at_100 value: 51.922000000000004 - type: recall_at_1000 value: 70.22699999999999 - type: recall_at_20 value: 38.434000000000005 - type: recall_at_3 value: 23.569000000000003 - type: recall_at_5 value: 27.467000000000002 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval (default) type: mteb/cqadupstack-gaming config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: main_score value: 38.986 - type: map_at_1 value: 25.330000000000002 - type: map_at_10 value: 34.043 - type: map_at_100 value: 35.096 - type: map_at_1000 value: 35.187000000000005 - type: map_at_20 value: 34.677 - type: map_at_3 value: 31.409 - type: map_at_5 value: 32.84 - type: mrr_at_1 value: 29.46708463949843 - type: mrr_at_10 value: 37.31218589839277 - type: mrr_at_100 value: 38.1351169810094 - type: mrr_at_1000 value: 38.19868839691447 - type: mrr_at_20 value: 37.80612825696384 - type: mrr_at_3 value: 35.099268547544376 - type: mrr_at_5 value: 36.33751306165097 - type: nauc_map_at_1000_diff1 value: 38.09615764738253 - type: nauc_map_at_1000_max value: 23.894623474509704 - type: nauc_map_at_1000_std value: -9.947177464659458 - type: nauc_map_at_100_diff1 value: 38.08108976003119 - type: nauc_map_at_100_max value: 23.869148167336196 - type: nauc_map_at_100_std value: -10.003553279754092 - type: nauc_map_at_10_diff1 value: 38.25740433334989 - type: nauc_map_at_10_max value: 23.785465546284172 - type: nauc_map_at_10_std value: -10.452889228473786 - type: nauc_map_at_1_diff1 value: 43.28375119353145 - type: nauc_map_at_1_max value: 21.076914448969138 - type: nauc_map_at_1_std value: -11.917875531984516 - type: nauc_map_at_20_diff1 value: 38.087483292691125 - type: nauc_map_at_20_max value: 23.745762722212174 - type: nauc_map_at_20_std value: -10.188400007740544 - type: nauc_map_at_3_diff1 value: 39.02135534906605 - type: nauc_map_at_3_max value: 22.261857962485614 - type: nauc_map_at_3_std value: -11.218451144804735 - type: nauc_map_at_5_diff1 value: 38.36047944101843 - type: nauc_map_at_5_max value: 23.269644562827786 - type: nauc_map_at_5_std value: -10.897871565542944 - type: nauc_mrr_at_1000_diff1 value: 37.93804723561869 - type: nauc_mrr_at_1000_max value: 25.571098162654405 - type: nauc_mrr_at_1000_std value: -8.704999474543618 - type: nauc_mrr_at_100_diff1 value: 37.92310191535837 - type: nauc_mrr_at_100_max value: 25.57175036578129 - type: nauc_mrr_at_100_std value: -8.708180518355748 - type: nauc_mrr_at_10_diff1 value: 37.97809026791926 - type: nauc_mrr_at_10_max value: 25.63957167660086 - type: nauc_mrr_at_10_std value: -8.99382505103242 - type: nauc_mrr_at_1_diff1 value: 42.673936867340196 - type: nauc_mrr_at_1_max value: 24.588727343373517 - type: nauc_mrr_at_1_std value: -10.825225740147475 - type: nauc_mrr_at_20_diff1 value: 37.91184185158596 - type: nauc_mrr_at_20_max value: 25.55489089735191 - type: nauc_mrr_at_20_std value: -8.805010815104266 - type: nauc_mrr_at_3_diff1 value: 38.332559878764975 - type: nauc_mrr_at_3_max value: 24.88888721202596 - type: nauc_mrr_at_3_std value: -9.577620459172344 - type: nauc_mrr_at_5_diff1 value: 38.119522912899214 - type: nauc_mrr_at_5_max value: 25.389308074746264 - type: nauc_mrr_at_5_std value: -9.420117810095647 - type: nauc_ndcg_at_1000_diff1 value: 36.04814318993257 - type: nauc_ndcg_at_1000_max value: 25.629240587791852 - type: nauc_ndcg_at_1000_std value: -6.304619168315785 - type: nauc_ndcg_at_100_diff1 value: 35.73222645729477 - type: nauc_ndcg_at_100_max value: 25.327218124236854 - type: nauc_ndcg_at_100_std value: -7.141414148248798 - type: nauc_ndcg_at_10_diff1 value: 36.41962866348755 - type: nauc_ndcg_at_10_max value: 25.28971882691592 - type: nauc_ndcg_at_10_std value: -8.964041127801968 - type: nauc_ndcg_at_1_diff1 value: 42.673936867340196 - type: nauc_ndcg_at_1_max value: 24.588727343373517 - type: nauc_ndcg_at_1_std value: -10.825225740147475 - type: nauc_ndcg_at_20_diff1 value: 35.90062773134161 - type: nauc_ndcg_at_20_max value: 24.94434494812649 - type: nauc_ndcg_at_20_std value: -8.211511420856532 - type: nauc_ndcg_at_3_diff1 value: 37.31111103528731 - type: nauc_ndcg_at_3_max value: 23.168325165671906 - type: nauc_ndcg_at_3_std value: -10.288195113268173 - type: nauc_ndcg_at_5_diff1 value: 36.56915893185673 - type: nauc_ndcg_at_5_max value: 24.49830143137805 - type: nauc_ndcg_at_5_std value: -9.928711681324712 - type: nauc_precision_at_1000_diff1 value: -0.4915748078092766 - type: nauc_precision_at_1000_max value: 22.018785354600197 - type: nauc_precision_at_1000_std value: 24.3024203171053 - type: nauc_precision_at_100_diff1 value: 7.258014161926564 - type: nauc_precision_at_100_max value: 24.993395039088877 - type: nauc_precision_at_100_std value: 12.983115281032589 - type: nauc_precision_at_10_diff1 value: 20.00272367281203 - type: nauc_precision_at_10_max value: 28.232043137285885 - type: nauc_precision_at_10_std value: -0.2463271287654738 - type: nauc_precision_at_1_diff1 value: 42.673936867340196 - type: nauc_precision_at_1_max value: 24.588727343373517 - type: nauc_precision_at_1_std value: -10.825225740147475 - type: nauc_precision_at_20_diff1 value: 15.290335230182073 - type: nauc_precision_at_20_max value: 25.80660197127879 - type: nauc_precision_at_20_std value: 4.148498326226922 - type: nauc_precision_at_3_diff1 value: 28.422442276709226 - type: nauc_precision_at_3_max value: 25.503751027805325 - type: nauc_precision_at_3_std value: -7.4647647287542656 - type: nauc_precision_at_5_diff1 value: 23.854814229085903 - type: nauc_precision_at_5_max value: 27.71698563459126 - type: nauc_precision_at_5_std value: -5.274148324980073 - type: nauc_recall_at_1000_diff1 value: 17.51693332840367 - type: nauc_recall_at_1000_max value: 31.688697202281503 - type: nauc_recall_at_1000_std value: 31.218820053837792 - type: nauc_recall_at_100_diff1 value: 23.509641469385155 - type: nauc_recall_at_100_max value: 26.275038797413252 - type: nauc_recall_at_100_std value: 4.849278478529801 - type: nauc_recall_at_10_diff1 value: 30.07169635860847 - type: nauc_recall_at_10_max value: 26.35865408096697 - type: nauc_recall_at_10_std value: -5.836701476675884 - type: nauc_recall_at_1_diff1 value: 43.28375119353145 - type: nauc_recall_at_1_max value: 21.076914448969138 - type: nauc_recall_at_1_std value: -11.917875531984516 - type: nauc_recall_at_20_diff1 value: 27.50066514595977 - type: nauc_recall_at_20_max value: 25.0642330157364 - type: nauc_recall_at_20_std value: -2.9057702923451783 - type: nauc_recall_at_3_diff1 value: 33.12832620655904 - type: nauc_recall_at_3_max value: 21.570495743954 - type: nauc_recall_at_3_std value: -9.681328127235362 - type: nauc_recall_at_5_diff1 value: 30.91581180094247 - type: nauc_recall_at_5_max value: 24.26475239191071 - type: nauc_recall_at_5_std value: -8.582674974062504 - type: ndcg_at_1 value: 29.467 - type: ndcg_at_10 value: 38.986 - type: ndcg_at_100 value: 43.91 - type: ndcg_at_1000 value: 46.093 - type: ndcg_at_20 value: 40.971000000000004 - type: ndcg_at_3 value: 34.226 - type: ndcg_at_5 value: 36.418 - type: precision_at_1 value: 29.467 - type: precision_at_10 value: 6.458 - type: precision_at_100 value: 0.9809999999999999 - type: precision_at_1000 value: 0.124 - type: precision_at_20 value: 3.7769999999999997 - type: precision_at_3 value: 15.360999999999999 - type: precision_at_5 value: 10.734 - type: recall_at_1 value: 25.330000000000002 - type: recall_at_10 value: 50.79900000000001 - type: recall_at_100 value: 73.172 - type: recall_at_1000 value: 89.054 - type: recall_at_20 value: 58.119 - type: recall_at_3 value: 37.907999999999994 - type: recall_at_5 value: 43.261 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval (default) type: mteb/cqadupstack-gis config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: main_score value: 18.881 - type: map_at_1 value: 10.791 - type: map_at_10 value: 15.844 - type: map_at_100 value: 16.56 - type: map_at_1000 value: 16.669999999999998 - type: map_at_20 value: 16.197 - type: map_at_3 value: 14.274999999999999 - type: map_at_5 value: 15.146 - type: mrr_at_1 value: 11.751412429378531 - type: mrr_at_10 value: 17.049860999013532 - type: mrr_at_100 value: 17.774178408380152 - type: mrr_at_1000 value: 17.871156062798512 - type: mrr_at_20 value: 17.41500493844777 - type: mrr_at_3 value: 15.442561205273064 - type: mrr_at_5 value: 16.36346516007532 - type: nauc_map_at_1000_diff1 value: 38.692361712072014 - type: nauc_map_at_1000_max value: 17.692322581386893 - type: nauc_map_at_1000_std value: -13.064618086479255 - type: nauc_map_at_100_diff1 value: 38.7043527447423 - type: nauc_map_at_100_max value: 17.667592055121595 - type: nauc_map_at_100_std value: -13.071618528123421 - type: nauc_map_at_10_diff1 value: 39.584070192333606 - type: nauc_map_at_10_max value: 18.015632179001035 - type: nauc_map_at_10_std value: -13.26822502903302 - type: nauc_map_at_1_diff1 value: 48.779222058335876 - type: nauc_map_at_1_max value: 21.505143048235485 - type: nauc_map_at_1_std value: -16.932904440293832 - type: nauc_map_at_20_diff1 value: 39.09132455115426 - type: nauc_map_at_20_max value: 17.788354549216788 - type: nauc_map_at_20_std value: -13.371296692449636 - type: nauc_map_at_3_diff1 value: 40.95886430588311 - type: nauc_map_at_3_max value: 18.857397432495297 - type: nauc_map_at_3_std value: -14.028315995215204 - type: nauc_map_at_5_diff1 value: 40.44874467422049 - type: nauc_map_at_5_max value: 18.548110993077046 - type: nauc_map_at_5_std value: -13.61022223687226 - type: nauc_mrr_at_1000_diff1 value: 36.7193927513182 - type: nauc_mrr_at_1000_max value: 18.73637744085153 - type: nauc_mrr_at_1000_std value: -11.235985814941158 - type: nauc_mrr_at_100_diff1 value: 36.693515447913896 - type: nauc_mrr_at_100_max value: 18.727344776607275 - type: nauc_mrr_at_100_std value: -11.229192225195076 - type: nauc_mrr_at_10_diff1 value: 37.45987895929495 - type: nauc_mrr_at_10_max value: 18.96374841935309 - type: nauc_mrr_at_10_std value: -11.376684204867754 - type: nauc_mrr_at_1_diff1 value: 46.3329948406647 - type: nauc_mrr_at_1_max value: 22.58241449034732 - type: nauc_mrr_at_1_std value: -14.985915561174265 - type: nauc_mrr_at_20_diff1 value: 36.97235304433472 - type: nauc_mrr_at_20_max value: 18.79058985497966 - type: nauc_mrr_at_20_std value: -11.476774180553695 - type: nauc_mrr_at_3_diff1 value: 38.611145160544844 - type: nauc_mrr_at_3_max value: 19.815120093553237 - type: nauc_mrr_at_3_std value: -11.861655358986162 - type: nauc_mrr_at_5_diff1 value: 38.09531676757352 - type: nauc_mrr_at_5_max value: 19.552123406089304 - type: nauc_mrr_at_5_std value: -11.6093527230711 - type: nauc_ndcg_at_1000_diff1 value: 33.19042573545908 - type: nauc_ndcg_at_1000_max value: 16.000702471588845 - type: nauc_ndcg_at_1000_std value: -9.926227342622978 - type: nauc_ndcg_at_100_diff1 value: 32.66081576286852 - type: nauc_ndcg_at_100_max value: 15.391084394561682 - type: nauc_ndcg_at_100_std value: -10.044151194544128 - type: nauc_ndcg_at_10_diff1 value: 36.125849520573354 - type: nauc_ndcg_at_10_max value: 16.90967046038526 - type: nauc_ndcg_at_10_std value: -11.394344719759571 - type: nauc_ndcg_at_1_diff1 value: 46.3329948406647 - type: nauc_ndcg_at_1_max value: 22.58241449034732 - type: nauc_ndcg_at_1_std value: -14.985915561174265 - type: nauc_ndcg_at_20_diff1 value: 34.634586401396426 - type: nauc_ndcg_at_20_max value: 16.182790214690677 - type: nauc_ndcg_at_20_std value: -11.722511937010736 - type: nauc_ndcg_at_3_diff1 value: 38.38034694109127 - type: nauc_ndcg_at_3_max value: 18.51701344554455 - type: nauc_ndcg_at_3_std value: -12.637679081561009 - type: nauc_ndcg_at_5_diff1 value: 37.64871975849486 - type: nauc_ndcg_at_5_max value: 18.05649117338428 - type: nauc_ndcg_at_5_std value: -11.99914557700529 - type: nauc_precision_at_1000_diff1 value: 3.792653576462921 - type: nauc_precision_at_1000_max value: 12.024262150860123 - type: nauc_precision_at_1000_std value: 2.974092044953344 - type: nauc_precision_at_100_diff1 value: 12.422313461081586 - type: nauc_precision_at_100_max value: 9.424727981669012 - type: nauc_precision_at_100_std value: -2.6577032016620232 - type: nauc_precision_at_10_diff1 value: 26.443014253849444 - type: nauc_precision_at_10_max value: 14.770065276963695 - type: nauc_precision_at_10_std value: -6.858833930698038 - type: nauc_precision_at_1_diff1 value: 46.3329948406647 - type: nauc_precision_at_1_max value: 22.58241449034732 - type: nauc_precision_at_1_std value: -14.985915561174265 - type: nauc_precision_at_20_diff1 value: 21.972912383602093 - type: nauc_precision_at_20_max value: 12.77220750853116 - type: nauc_precision_at_20_std value: -7.532078699244693 - type: nauc_precision_at_3_diff1 value: 31.29954313202452 - type: nauc_precision_at_3_max value: 18.476538158784496 - type: nauc_precision_at_3_std value: -9.281358676033744 - type: nauc_precision_at_5_diff1 value: 29.14285563901319 - type: nauc_precision_at_5_max value: 17.515141289970586 - type: nauc_precision_at_5_std value: -8.18136686685708 - type: nauc_recall_at_1000_diff1 value: 18.10916567751394 - type: nauc_recall_at_1000_max value: 7.906604050200597 - type: nauc_recall_at_1000_std value: 0.6727721895282065 - type: nauc_recall_at_100_diff1 value: 18.26847397737223 - type: nauc_recall_at_100_max value: 7.66856664527548 - type: nauc_recall_at_100_std value: -2.725197238603163 - type: nauc_recall_at_10_diff1 value: 29.700449805842556 - type: nauc_recall_at_10_max value: 13.317333676394206 - type: nauc_recall_at_10_std value: -8.521551105702333 - type: nauc_recall_at_1_diff1 value: 48.779222058335876 - type: nauc_recall_at_1_max value: 21.505143048235485 - type: nauc_recall_at_1_std value: -16.932904440293832 - type: nauc_recall_at_20_diff1 value: 25.375460842515707 - type: nauc_recall_at_20_max value: 10.985828866751106 - type: nauc_recall_at_20_std value: -9.509598779492542 - type: nauc_recall_at_3_diff1 value: 34.90840106638141 - type: nauc_recall_at_3_max value: 17.290221003771236 - type: nauc_recall_at_3_std value: -10.705822037733377 - type: nauc_recall_at_5_diff1 value: 32.81949739124929 - type: nauc_recall_at_5_max value: 15.77036997034566 - type: nauc_recall_at_5_std value: -9.58734153088338 - type: ndcg_at_1 value: 11.751000000000001 - type: ndcg_at_10 value: 18.881 - type: ndcg_at_100 value: 22.945 - type: ndcg_at_1000 value: 26.247 - type: ndcg_at_20 value: 20.189 - type: ndcg_at_3 value: 15.716 - type: ndcg_at_5 value: 17.265 - type: precision_at_1 value: 11.751000000000001 - type: precision_at_10 value: 3.085 - type: precision_at_100 value: 0.547 - type: precision_at_1000 value: 0.08800000000000001 - type: precision_at_20 value: 1.8419999999999999 - type: precision_at_3 value: 6.93 - type: precision_at_5 value: 5.04 - type: recall_at_1 value: 10.791 - type: recall_at_10 value: 27.093 - type: recall_at_100 value: 46.757 - type: recall_at_1000 value: 72.682 - type: recall_at_20 value: 32.122 - type: recall_at_3 value: 18.57 - type: recall_at_5 value: 22.277 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval (default) type: mteb/cqadupstack-mathematica config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: main_score value: 11.615 - type: map_at_1 value: 5.537 - type: map_at_10 value: 9.11 - type: map_at_100 value: 9.958 - type: map_at_1000 value: 10.077 - type: map_at_20 value: 9.554 - type: map_at_3 value: 7.8340000000000005 - type: map_at_5 value: 8.677 - type: mrr_at_1 value: 6.8407960199004965 - type: mrr_at_10 value: 11.05928689883914 - type: mrr_at_100 value: 11.946615508679292 - type: mrr_at_1000 value: 12.048679891258049 - type: mrr_at_20 value: 11.549913789152972 - type: mrr_at_3 value: 9.597844112769486 - type: mrr_at_5 value: 10.518242122719736 - type: nauc_map_at_1000_diff1 value: 24.683232918237767 - type: nauc_map_at_1000_max value: 12.449206846599838 - type: nauc_map_at_1000_std value: -3.251223440472864 - type: nauc_map_at_100_diff1 value: 24.73393657885426 - type: nauc_map_at_100_max value: 12.463807219155827 - type: nauc_map_at_100_std value: -3.3483294458285053 - type: nauc_map_at_10_diff1 value: 24.31899681752587 - type: nauc_map_at_10_max value: 13.023772634952646 - type: nauc_map_at_10_std value: -3.6150791371290127 - type: nauc_map_at_1_diff1 value: 38.18226159685399 - type: nauc_map_at_1_max value: 10.47829894921553 - type: nauc_map_at_1_std value: -4.954960475972177 - type: nauc_map_at_20_diff1 value: 24.350643352210284 - type: nauc_map_at_20_max value: 12.595008372168268 - type: nauc_map_at_20_std value: -3.521084293072351 - type: nauc_map_at_3_diff1 value: 27.40009351319036 - type: nauc_map_at_3_max value: 12.867564773713205 - type: nauc_map_at_3_std value: -2.6112602224520125 - type: nauc_map_at_5_diff1 value: 25.88069336503963 - type: nauc_map_at_5_max value: 13.087163173214385 - type: nauc_map_at_5_std value: -3.8501182077778084 - type: nauc_mrr_at_1000_diff1 value: 24.833043408642276 - type: nauc_mrr_at_1000_max value: 13.485753030408922 - type: nauc_mrr_at_1000_std value: -1.4279117045426035 - type: nauc_mrr_at_100_diff1 value: 24.84946536952306 - type: nauc_mrr_at_100_max value: 13.500721704793284 - type: nauc_mrr_at_100_std value: -1.4624335897751024 - type: nauc_mrr_at_10_diff1 value: 24.47572376601274 - type: nauc_mrr_at_10_max value: 13.824164688515172 - type: nauc_mrr_at_10_std value: -1.6563620168029827 - type: nauc_mrr_at_1_diff1 value: 36.798035231295465 - type: nauc_mrr_at_1_max value: 12.118209984083135 - type: nauc_mrr_at_1_std value: -3.556191126662156 - type: nauc_mrr_at_20_diff1 value: 24.63769144491174 - type: nauc_mrr_at_20_max value: 13.645343593555046 - type: nauc_mrr_at_20_std value: -1.4685519497750792 - type: nauc_mrr_at_3_diff1 value: 27.374336386882025 - type: nauc_mrr_at_3_max value: 14.062466222395765 - type: nauc_mrr_at_3_std value: -0.7537959098644124 - type: nauc_mrr_at_5_diff1 value: 26.122872611046933 - type: nauc_mrr_at_5_max value: 14.183693692153284 - type: nauc_mrr_at_5_std value: -1.7476899034212647 - type: nauc_ndcg_at_1000_diff1 value: 20.52880583008532 - type: nauc_ndcg_at_1000_max value: 12.247864672035966 - type: nauc_ndcg_at_1000_std value: -0.2393336251755205 - type: nauc_ndcg_at_100_diff1 value: 22.10539624821044 - type: nauc_ndcg_at_100_max value: 12.588768934131265 - type: nauc_ndcg_at_100_std value: -1.5036787485747833 - type: nauc_ndcg_at_10_diff1 value: 19.864884263156064 - type: nauc_ndcg_at_10_max value: 13.66330673162907 - type: nauc_ndcg_at_10_std value: -3.088038677870411 - type: nauc_ndcg_at_1_diff1 value: 36.798035231295465 - type: nauc_ndcg_at_1_max value: 12.118209984083135 - type: nauc_ndcg_at_1_std value: -3.556191126662156 - type: nauc_ndcg_at_20_diff1 value: 20.329183608111467 - type: nauc_ndcg_at_20_max value: 12.572397244488709 - type: nauc_ndcg_at_20_std value: -2.833568559123329 - type: nauc_ndcg_at_3_diff1 value: 24.94987224061534 - type: nauc_ndcg_at_3_max value: 13.729467010682969 - type: nauc_ndcg_at_3_std value: -1.6381352723073075 - type: nauc_ndcg_at_5_diff1 value: 22.92518093429556 - type: nauc_ndcg_at_5_max value: 14.004525164909742 - type: nauc_ndcg_at_5_std value: -3.545940567002684 - type: nauc_precision_at_1000_diff1 value: 3.513045849367289 - type: nauc_precision_at_1000_max value: 5.991718237150402 - type: nauc_precision_at_1000_std value: 4.347238645478576 - type: nauc_precision_at_100_diff1 value: 14.94558478552141 - type: nauc_precision_at_100_max value: 11.048179849308125 - type: nauc_precision_at_100_std value: 2.109463534516253 - type: nauc_precision_at_10_diff1 value: 10.647162174839298 - type: nauc_precision_at_10_max value: 14.84920377910908 - type: nauc_precision_at_10_std value: -1.2147719811972686 - type: nauc_precision_at_1_diff1 value: 36.798035231295465 - type: nauc_precision_at_1_max value: 12.118209984083135 - type: nauc_precision_at_1_std value: -3.556191126662156 - type: nauc_precision_at_20_diff1 value: 12.98335768221716 - type: nauc_precision_at_20_max value: 11.181649269926806 - type: nauc_precision_at_20_std value: -1.3899440018731142 - type: nauc_precision_at_3_diff1 value: 18.900108852958894 - type: nauc_precision_at_3_max value: 15.17370645022724 - type: nauc_precision_at_3_std value: 0.16611168597321674 - type: nauc_precision_at_5_diff1 value: 16.64518826686918 - type: nauc_precision_at_5_max value: 15.666919642746747 - type: nauc_precision_at_5_std value: -2.120654496956751 - type: nauc_recall_at_1000_diff1 value: 9.015392906829836 - type: nauc_recall_at_1000_max value: 9.785958918488866 - type: nauc_recall_at_1000_std value: 8.098684657511653 - type: nauc_recall_at_100_diff1 value: 18.23092160536796 - type: nauc_recall_at_100_max value: 11.903948189364701 - type: nauc_recall_at_100_std value: 1.9742310316590832 - type: nauc_recall_at_10_diff1 value: 11.003843437201361 - type: nauc_recall_at_10_max value: 14.185823505910772 - type: nauc_recall_at_10_std value: -3.4814252786414777 - type: nauc_recall_at_1_diff1 value: 38.18226159685399 - type: nauc_recall_at_1_max value: 10.47829894921553 - type: nauc_recall_at_1_std value: -4.954960475972177 - type: nauc_recall_at_20_diff1 value: 12.598736542297576 - type: nauc_recall_at_20_max value: 11.166163547883375 - type: nauc_recall_at_20_std value: -2.774706965748006 - type: nauc_recall_at_3_diff1 value: 19.109214186516464 - type: nauc_recall_at_3_max value: 13.916267405252434 - type: nauc_recall_at_3_std value: -1.0124004360750645 - type: nauc_recall_at_5_diff1 value: 16.537466747239122 - type: nauc_recall_at_5_max value: 14.600466427354295 - type: nauc_recall_at_5_std value: -4.754479525799718 - type: ndcg_at_1 value: 6.841 - type: ndcg_at_10 value: 11.615 - type: ndcg_at_100 value: 16.16 - type: ndcg_at_1000 value: 19.689999999999998 - type: ndcg_at_20 value: 13.228000000000002 - type: ndcg_at_3 value: 9.103 - type: ndcg_at_5 value: 10.555 - type: precision_at_1 value: 6.841 - type: precision_at_10 value: 2.2640000000000002 - type: precision_at_100 value: 0.541 - type: precision_at_1000 value: 0.097 - type: precision_at_20 value: 1.5730000000000002 - type: precision_at_3 value: 4.519 - type: precision_at_5 value: 3.6069999999999998 - type: recall_at_1 value: 5.537 - type: recall_at_10 value: 17.435000000000002 - type: recall_at_100 value: 37.811 - type: recall_at_1000 value: 64.11 - type: recall_at_20 value: 23.211000000000002 - type: recall_at_3 value: 10.846 - type: recall_at_5 value: 14.333000000000002 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval (default) type: mteb/cqadupstack-physics config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: main_score value: 26.91 - type: map_at_1 value: 16.883 - type: map_at_10 value: 22.803 - type: map_at_100 value: 23.983999999999998 - type: map_at_1000 value: 24.138 - type: map_at_20 value: 23.439 - type: map_at_3 value: 20.934 - type: map_at_5 value: 21.907 - type: mrr_at_1 value: 21.174205967276226 - type: mrr_at_10 value: 27.280046442702833 - type: mrr_at_100 value: 28.23352632853514 - type: mrr_at_1000 value: 28.32443329536469 - type: mrr_at_20 value: 27.818561120709955 - type: mrr_at_3 value: 25.200513314084073 - type: mrr_at_5 value: 26.302534488290025 - type: nauc_map_at_1000_diff1 value: 41.38159396894937 - type: nauc_map_at_1000_max value: 27.075718229872507 - type: nauc_map_at_1000_std value: -0.034938715521092914 - type: nauc_map_at_100_diff1 value: 41.33787954136784 - type: nauc_map_at_100_max value: 27.02570012546652 - type: nauc_map_at_100_std value: -0.13744553361820572 - type: nauc_map_at_10_diff1 value: 41.90648956203455 - type: nauc_map_at_10_max value: 26.84030815813142 - type: nauc_map_at_10_std value: -0.7735994997780928 - type: nauc_map_at_1_diff1 value: 47.159330320351586 - type: nauc_map_at_1_max value: 29.793961305890537 - type: nauc_map_at_1_std value: -1.0546824447786483 - type: nauc_map_at_20_diff1 value: 41.47699374624499 - type: nauc_map_at_20_max value: 26.809111512352107 - type: nauc_map_at_20_std value: -0.3828096047690898 - type: nauc_map_at_3_diff1 value: 43.150522493674096 - type: nauc_map_at_3_max value: 26.577312834071588 - type: nauc_map_at_3_std value: -1.3542004434512425 - type: nauc_map_at_5_diff1 value: 42.56975458189335 - type: nauc_map_at_5_max value: 26.94247470743405 - type: nauc_map_at_5_std value: -0.9794656496391059 - type: nauc_mrr_at_1000_diff1 value: 37.52697018269869 - type: nauc_mrr_at_1000_max value: 28.033039061173433 - type: nauc_mrr_at_1000_std value: 2.4888388677985467 - type: nauc_mrr_at_100_diff1 value: 37.48466838388186 - type: nauc_mrr_at_100_max value: 28.02810525984351 - type: nauc_mrr_at_100_std value: 2.453572141052541 - type: nauc_mrr_at_10_diff1 value: 38.078483092543514 - type: nauc_mrr_at_10_max value: 28.17949106533087 - type: nauc_mrr_at_10_std value: 2.2130266173691133 - type: nauc_mrr_at_1_diff1 value: 42.42342081270958 - type: nauc_mrr_at_1_max value: 31.177722470332508 - type: nauc_mrr_at_1_std value: 2.992917621133945 - type: nauc_mrr_at_20_diff1 value: 37.6303297547515 - type: nauc_mrr_at_20_max value: 28.022077664745655 - type: nauc_mrr_at_20_std value: 2.428673326300689 - type: nauc_mrr_at_3_diff1 value: 38.71875043363434 - type: nauc_mrr_at_3_max value: 27.81584708039136 - type: nauc_mrr_at_3_std value: 1.5526447606078482 - type: nauc_mrr_at_5_diff1 value: 38.39263667742802 - type: nauc_mrr_at_5_max value: 28.405137560145967 - type: nauc_mrr_at_5_std value: 1.8795388857359592 - type: nauc_ndcg_at_1000_diff1 value: 37.3679780288481 - type: nauc_ndcg_at_1000_max value: 27.124961640164013 - type: nauc_ndcg_at_1000_std value: 4.044169498575053 - type: nauc_ndcg_at_100_diff1 value: 36.62729129168551 - type: nauc_ndcg_at_100_max value: 26.712693730742675 - type: nauc_ndcg_at_100_std value: 2.6627981122016644 - type: nauc_ndcg_at_10_diff1 value: 39.0386116034406 - type: nauc_ndcg_at_10_max value: 25.93211191183676 - type: nauc_ndcg_at_10_std value: 0.07395623726935022 - type: nauc_ndcg_at_1_diff1 value: 42.42342081270958 - type: nauc_ndcg_at_1_max value: 31.177722470332508 - type: nauc_ndcg_at_1_std value: 2.992917621133945 - type: nauc_ndcg_at_20_diff1 value: 37.53584200169193 - type: nauc_ndcg_at_20_max value: 25.558468749890896 - type: nauc_ndcg_at_20_std value: 1.2318351799632228 - type: nauc_ndcg_at_3_diff1 value: 40.68885751113596 - type: nauc_ndcg_at_3_max value: 26.382850563433358 - type: nauc_ndcg_at_3_std value: -0.5828913470916129 - type: nauc_ndcg_at_5_diff1 value: 40.20744862471514 - type: nauc_ndcg_at_5_max value: 26.793282641286925 - type: nauc_ndcg_at_5_std value: -0.21932084429747978 - type: nauc_precision_at_1000_diff1 value: -0.2740031954406055 - type: nauc_precision_at_1000_max value: 15.621744490875047 - type: nauc_precision_at_1000_std value: 16.834234066358007 - type: nauc_precision_at_100_diff1 value: 8.37085037514357 - type: nauc_precision_at_100_max value: 23.932882815066836 - type: nauc_precision_at_100_std value: 14.983655513650115 - type: nauc_precision_at_10_diff1 value: 27.06328840478737 - type: nauc_precision_at_10_max value: 26.51407759856565 - type: nauc_precision_at_10_std value: 6.710414141333863 - type: nauc_precision_at_1_diff1 value: 42.42342081270958 - type: nauc_precision_at_1_max value: 31.177722470332508 - type: nauc_precision_at_1_std value: 2.992917621133945 - type: nauc_precision_at_20_diff1 value: 19.336605064257732 - type: nauc_precision_at_20_max value: 24.081476666220862 - type: nauc_precision_at_20_std value: 10.172257672610973 - type: nauc_precision_at_3_diff1 value: 33.230983381592274 - type: nauc_precision_at_3_max value: 24.989278473276975 - type: nauc_precision_at_3_std value: 1.6437958566367155 - type: nauc_precision_at_5_diff1 value: 30.40684618165986 - type: nauc_precision_at_5_max value: 27.495263396077597 - type: nauc_precision_at_5_std value: 3.8758806968379647 - type: nauc_recall_at_1000_diff1 value: 19.36097686642846 - type: nauc_recall_at_1000_max value: 20.92571079501699 - type: nauc_recall_at_1000_std value: 29.360494025393454 - type: nauc_recall_at_100_diff1 value: 20.66672940547856 - type: nauc_recall_at_100_max value: 21.339005075190045 - type: nauc_recall_at_100_std value: 9.899251322282261 - type: nauc_recall_at_10_diff1 value: 32.330889061583754 - type: nauc_recall_at_10_max value: 20.450766552523 - type: nauc_recall_at_10_std value: -0.2480940006638854 - type: nauc_recall_at_1_diff1 value: 47.159330320351586 - type: nauc_recall_at_1_max value: 29.793961305890537 - type: nauc_recall_at_1_std value: -1.0546824447786483 - type: nauc_recall_at_20_diff1 value: 27.136530982630997 - type: nauc_recall_at_20_max value: 18.547627398945572 - type: nauc_recall_at_20_std value: 3.3977018294296966 - type: nauc_recall_at_3_diff1 value: 37.991255133265504 - type: nauc_recall_at_3_max value: 21.793628819368916 - type: nauc_recall_at_3_std value: -2.511571598486541 - type: nauc_recall_at_5_diff1 value: 36.4110791705403 - type: nauc_recall_at_5_max value: 22.574804328522983 - type: nauc_recall_at_5_std value: -1.3318534898262748 - type: ndcg_at_1 value: 21.174 - type: ndcg_at_10 value: 26.91 - type: ndcg_at_100 value: 32.553 - type: ndcg_at_1000 value: 35.83 - type: ndcg_at_20 value: 28.987000000000002 - type: ndcg_at_3 value: 23.544 - type: ndcg_at_5 value: 24.983 - type: precision_at_1 value: 21.174 - type: precision_at_10 value: 4.918 - type: precision_at_100 value: 0.946 - type: precision_at_1000 value: 0.14200000000000002 - type: precision_at_20 value: 3.114 - type: precision_at_3 value: 11.004 - type: precision_at_5 value: 7.854 - type: recall_at_1 value: 16.883 - type: recall_at_10 value: 34.979 - type: recall_at_100 value: 59.650999999999996 - type: recall_at_1000 value: 82.221 - type: recall_at_20 value: 42.352000000000004 - type: recall_at_3 value: 25.344 - type: recall_at_5 value: 29.205 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval (default) type: mteb/cqadupstack-programmers config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: main_score value: 20.772 - type: map_at_1 value: 11.594 - type: map_at_10 value: 16.919 - type: map_at_100 value: 18.127 - type: map_at_1000 value: 18.265 - type: map_at_20 value: 17.562 - type: map_at_3 value: 15.110999999999999 - type: map_at_5 value: 16.042 - type: mrr_at_1 value: 14.383561643835616 - type: mrr_at_10 value: 20.43940711748931 - type: mrr_at_100 value: 21.435624667299567 - type: mrr_at_1000 value: 21.527726237560707 - type: mrr_at_20 value: 21.009195052331954 - type: mrr_at_3 value: 18.588280060882802 - type: mrr_at_5 value: 19.552891933028917 - type: nauc_map_at_1000_diff1 value: 33.262795491922596 - type: nauc_map_at_1000_max value: 21.31596789079428 - type: nauc_map_at_1000_std value: 0.7852441803303455 - type: nauc_map_at_100_diff1 value: 33.268752630014234 - type: nauc_map_at_100_max value: 21.25556523340811 - type: nauc_map_at_100_std value: 0.7039138578991556 - type: nauc_map_at_10_diff1 value: 33.35543182094911 - type: nauc_map_at_10_max value: 20.568537341885452 - type: nauc_map_at_10_std value: -0.43247720737574813 - type: nauc_map_at_1_diff1 value: 41.36060666064399 - type: nauc_map_at_1_max value: 21.963186112235483 - type: nauc_map_at_1_std value: -2.501260740491966 - type: nauc_map_at_20_diff1 value: 33.39793495227392 - type: nauc_map_at_20_max value: 21.00785918268415 - type: nauc_map_at_20_std value: 0.17640004795604808 - type: nauc_map_at_3_diff1 value: 34.4973117300038 - type: nauc_map_at_3_max value: 19.632391312902868 - type: nauc_map_at_3_std value: -2.7433813735332633 - type: nauc_map_at_5_diff1 value: 34.08903030512529 - type: nauc_map_at_5_max value: 19.71145539531056 - type: nauc_map_at_5_std value: -1.7888621307481465 - type: nauc_mrr_at_1000_diff1 value: 30.036763252826663 - type: nauc_mrr_at_1000_max value: 22.721775717568743 - type: nauc_mrr_at_1000_std value: 1.5610106615257815 - type: nauc_mrr_at_100_diff1 value: 30.001177715898773 - type: nauc_mrr_at_100_max value: 22.688673505906557 - type: nauc_mrr_at_100_std value: 1.548564147265043 - type: nauc_mrr_at_10_diff1 value: 30.318607759330003 - type: nauc_mrr_at_10_max value: 22.62013842178604 - type: nauc_mrr_at_10_std value: 0.8741126049148205 - type: nauc_mrr_at_1_diff1 value: 37.89568693814772 - type: nauc_mrr_at_1_max value: 24.79729112521682 - type: nauc_mrr_at_1_std value: 0.45441850694116415 - type: nauc_mrr_at_20_diff1 value: 30.095683316159004 - type: nauc_mrr_at_20_max value: 22.660310419347745 - type: nauc_mrr_at_20_std value: 1.2383016742006898 - type: nauc_mrr_at_3_diff1 value: 31.15218479473984 - type: nauc_mrr_at_3_max value: 22.469292588239902 - type: nauc_mrr_at_3_std value: -0.3347771186823999 - type: nauc_mrr_at_5_diff1 value: 30.776401083584382 - type: nauc_mrr_at_5_max value: 22.021753205030357 - type: nauc_mrr_at_5_std value: -0.16866916918985556 - type: nauc_ndcg_at_1000_diff1 value: 29.146312601693747 - type: nauc_ndcg_at_1000_max value: 23.112744430247094 - type: nauc_ndcg_at_1000_std value: 6.653567572547435 - type: nauc_ndcg_at_100_diff1 value: 29.37923677271363 - type: nauc_ndcg_at_100_max value: 22.38970149525569 - type: nauc_ndcg_at_100_std value: 5.931300029746915 - type: nauc_ndcg_at_10_diff1 value: 29.77761043652284 - type: nauc_ndcg_at_10_max value: 20.957056341027606 - type: nauc_ndcg_at_10_std value: 1.6705876092521652 - type: nauc_ndcg_at_1_diff1 value: 37.89568693814772 - type: nauc_ndcg_at_1_max value: 24.79729112521682 - type: nauc_ndcg_at_1_std value: 0.45441850694116415 - type: nauc_ndcg_at_20_diff1 value: 29.69115181780205 - type: nauc_ndcg_at_20_max value: 21.812806056707494 - type: nauc_ndcg_at_20_std value: 3.3207687473070355 - type: nauc_ndcg_at_3_diff1 value: 31.71670832179318 - type: nauc_ndcg_at_3_max value: 20.005897851777153 - type: nauc_ndcg_at_3_std value: -1.945060538090631 - type: nauc_ndcg_at_5_diff1 value: 31.341621862768378 - type: nauc_ndcg_at_5_max value: 19.711932402527314 - type: nauc_ndcg_at_5_std value: -0.8986370500002848 - type: nauc_precision_at_1000_diff1 value: -1.80587378767679 - type: nauc_precision_at_1000_max value: 13.27864120357074 - type: nauc_precision_at_1000_std value: 11.35646447991795 - type: nauc_precision_at_100_diff1 value: 10.690493039650848 - type: nauc_precision_at_100_max value: 21.852108406166078 - type: nauc_precision_at_100_std value: 16.448506075274782 - type: nauc_precision_at_10_diff1 value: 19.47826562755573 - type: nauc_precision_at_10_max value: 23.175270348890578 - type: nauc_precision_at_10_std value: 7.092336860578882 - type: nauc_precision_at_1_diff1 value: 37.89568693814772 - type: nauc_precision_at_1_max value: 24.79729112521682 - type: nauc_precision_at_1_std value: 0.45441850694116415 - type: nauc_precision_at_20_diff1 value: 18.025999687256054 - type: nauc_precision_at_20_max value: 23.742299066638676 - type: nauc_precision_at_20_std value: 10.577257417584415 - type: nauc_precision_at_3_diff1 value: 24.999170493212436 - type: nauc_precision_at_3_max value: 20.606004668102138 - type: nauc_precision_at_3_std value: -0.2850972522167497 - type: nauc_precision_at_5_diff1 value: 23.566430582817883 - type: nauc_precision_at_5_max value: 19.922628542285032 - type: nauc_precision_at_5_std value: 1.8893748644482864 - type: nauc_recall_at_1000_diff1 value: 14.639102682660038 - type: nauc_recall_at_1000_max value: 26.572142236799056 - type: nauc_recall_at_1000_std value: 32.38940174413534 - type: nauc_recall_at_100_diff1 value: 20.31142739817575 - type: nauc_recall_at_100_max value: 21.51142155914532 - type: nauc_recall_at_100_std value: 19.67731733250046 - type: nauc_recall_at_10_diff1 value: 22.38727708437124 - type: nauc_recall_at_10_max value: 19.484869364238776 - type: nauc_recall_at_10_std value: 6.173360353625354 - type: nauc_recall_at_1_diff1 value: 41.36060666064399 - type: nauc_recall_at_1_max value: 21.963186112235483 - type: nauc_recall_at_1_std value: -2.501260740491966 - type: nauc_recall_at_20_diff1 value: 21.634328851993583 - type: nauc_recall_at_20_max value: 21.22314972109689 - type: nauc_recall_at_20_std value: 10.321822575671742 - type: nauc_recall_at_3_diff1 value: 27.235698960994014 - type: nauc_recall_at_3_max value: 16.267701020224287 - type: nauc_recall_at_3_std value: -3.2266239973778443 - type: nauc_recall_at_5_diff1 value: 26.102514254904 - type: nauc_recall_at_5_max value: 16.172508338466322 - type: nauc_recall_at_5_std value: -0.5086033534476121 - type: ndcg_at_1 value: 14.384 - type: ndcg_at_10 value: 20.772 - type: ndcg_at_100 value: 26.602999999999998 - type: ndcg_at_1000 value: 29.89 - type: ndcg_at_20 value: 22.961000000000002 - type: ndcg_at_3 value: 17.388 - type: ndcg_at_5 value: 18.787000000000003 - type: precision_at_1 value: 14.384 - type: precision_at_10 value: 4.154999999999999 - type: precision_at_100 value: 0.855 - type: precision_at_1000 value: 0.129 - type: precision_at_20 value: 2.7279999999999998 - type: precision_at_3 value: 8.486 - type: precision_at_5 value: 6.279 - type: recall_at_1 value: 11.594 - type: recall_at_10 value: 28.762999999999998 - type: recall_at_100 value: 54.67100000000001 - type: recall_at_1000 value: 78.105 - type: recall_at_20 value: 36.708 - type: recall_at_3 value: 19.488 - type: recall_at_5 value: 23.049 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval (default) type: CQADupstackRetrieval_is_a_combined_dataset config: default split: test revision: CQADupstackRetrieval_is_a_combined_dataset metrics: - type: main_score value: 21.864250000000006 - type: ndcg_at_10 value: 21.864250000000006 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval (default) type: mteb/cqadupstack-stats config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: main_score value: 17.529 - type: map_at_1 value: 9.628 - type: map_at_10 value: 14.463000000000001 - type: map_at_100 value: 15.17 - type: map_at_1000 value: 15.27 - type: map_at_20 value: 14.817 - type: map_at_3 value: 13.089999999999998 - type: map_at_5 value: 13.703999999999999 - type: mrr_at_1 value: 11.65644171779141 - type: mrr_at_10 value: 16.46332408218911 - type: mrr_at_100 value: 17.194209080357542 - type: mrr_at_1000 value: 17.285546487758563 - type: mrr_at_20 value: 16.835438815628372 - type: mrr_at_3 value: 15.081799591002035 - type: mrr_at_5 value: 15.7183026584867 - type: nauc_map_at_1000_diff1 value: 24.745069547833133 - type: nauc_map_at_1000_max value: 12.2311307150906 - type: nauc_map_at_1000_std value: 1.672159939231174 - type: nauc_map_at_100_diff1 value: 24.73004472604233 - type: nauc_map_at_100_max value: 12.179108920911679 - type: nauc_map_at_100_std value: 1.6049355454311964 - type: nauc_map_at_10_diff1 value: 25.14349932840619 - type: nauc_map_at_10_max value: 12.429412020446783 - type: nauc_map_at_10_std value: 1.305307255605515 - type: nauc_map_at_1_diff1 value: 31.417658938088493 - type: nauc_map_at_1_max value: 14.698481024465737 - type: nauc_map_at_1_std value: 1.713648870488101 - type: nauc_map_at_20_diff1 value: 25.078930789069286 - type: nauc_map_at_20_max value: 12.339225875456624 - type: nauc_map_at_20_std value: 1.3267901782136668 - type: nauc_map_at_3_diff1 value: 24.887634824567026 - type: nauc_map_at_3_max value: 13.4730845058873 - type: nauc_map_at_3_std value: 1.7415105393757087 - type: nauc_map_at_5_diff1 value: 24.703285086420937 - type: nauc_map_at_5_max value: 12.657053025607537 - type: nauc_map_at_5_std value: 1.2700178671031146 - type: nauc_mrr_at_1000_diff1 value: 26.138222320180915 - type: nauc_mrr_at_1000_max value: 15.673134825000663 - type: nauc_mrr_at_1000_std value: 3.895541215099143 - type: nauc_mrr_at_100_diff1 value: 26.11321305609665 - type: nauc_mrr_at_100_max value: 15.63930697560893 - type: nauc_mrr_at_100_std value: 3.8375334936132424 - type: nauc_mrr_at_10_diff1 value: 26.556834624868465 - type: nauc_mrr_at_10_max value: 15.862950253117766 - type: nauc_mrr_at_10_std value: 3.6335815894977843 - type: nauc_mrr_at_1_diff1 value: 32.65224482557457 - type: nauc_mrr_at_1_max value: 19.60009423463152 - type: nauc_mrr_at_1_std value: 4.477944071474801 - type: nauc_mrr_at_20_diff1 value: 26.459141609196635 - type: nauc_mrr_at_20_max value: 15.825532833656489 - type: nauc_mrr_at_20_std value: 3.640907121438615 - type: nauc_mrr_at_3_diff1 value: 26.080157246672986 - type: nauc_mrr_at_3_max value: 17.215486179933677 - type: nauc_mrr_at_3_std value: 4.617101355725209 - type: nauc_mrr_at_5_diff1 value: 26.16231186962206 - type: nauc_mrr_at_5_max value: 16.377814206094452 - type: nauc_mrr_at_5_std value: 4.0294499741813965 - type: nauc_ndcg_at_1000_diff1 value: 22.3760006380521 - type: nauc_ndcg_at_1000_max value: 11.167820745459364 - type: nauc_ndcg_at_1000_std value: 4.64834301044508 - type: nauc_ndcg_at_100_diff1 value: 21.8831501214006 - type: nauc_ndcg_at_100_max value: 10.202305205094655 - type: nauc_ndcg_at_100_std value: 3.185443757584521 - type: nauc_ndcg_at_10_diff1 value: 24.246023041707506 - type: nauc_ndcg_at_10_max value: 11.725940900458006 - type: nauc_ndcg_at_10_std value: 1.8396976589611098 - type: nauc_ndcg_at_1_diff1 value: 32.65224482557457 - type: nauc_ndcg_at_1_max value: 19.60009423463152 - type: nauc_ndcg_at_1_std value: 4.477944071474801 - type: nauc_ndcg_at_20_diff1 value: 24.132193295897135 - type: nauc_ndcg_at_20_max value: 11.35174138491149 - type: nauc_ndcg_at_20_std value: 1.7054085400159937 - type: nauc_ndcg_at_3_diff1 value: 23.544626586058385 - type: nauc_ndcg_at_3_max value: 13.674978929974557 - type: nauc_ndcg_at_3_std value: 2.9299702185201677 - type: nauc_ndcg_at_5_diff1 value: 23.33991313564435 - type: nauc_ndcg_at_5_max value: 12.288853399905868 - type: nauc_ndcg_at_5_std value: 1.9515614579903209 - type: nauc_precision_at_1000_diff1 value: 12.95872707457021 - type: nauc_precision_at_1000_max value: 12.036050516594216 - type: nauc_precision_at_1000_std value: 16.600008578952213 - type: nauc_precision_at_100_diff1 value: 13.139487415633106 - type: nauc_precision_at_100_max value: 9.417494003947107 - type: nauc_precision_at_100_std value: 11.738900823126077 - type: nauc_precision_at_10_diff1 value: 23.13031875688454 - type: nauc_precision_at_10_max value: 13.761622928864501 - type: nauc_precision_at_10_std value: 5.751034624604308 - type: nauc_precision_at_1_diff1 value: 32.65224482557457 - type: nauc_precision_at_1_max value: 19.60009423463152 - type: nauc_precision_at_1_std value: 4.477944071474801 - type: nauc_precision_at_20_diff1 value: 22.213759009490285 - type: nauc_precision_at_20_max value: 12.849772267882804 - type: nauc_precision_at_20_std value: 5.518908470286235 - type: nauc_precision_at_3_diff1 value: 20.90728187728363 - type: nauc_precision_at_3_max value: 16.768555028287597 - type: nauc_precision_at_3_std value: 6.323648876001936 - type: nauc_precision_at_5_diff1 value: 21.013985751654815 - type: nauc_precision_at_5_max value: 14.554098534876916 - type: nauc_precision_at_5_std value: 5.345460769434932 - type: nauc_recall_at_1000_diff1 value: 13.019558803790924 - type: nauc_recall_at_1000_max value: 4.681559433425631 - type: nauc_recall_at_1000_std value: 14.52978675424674 - type: nauc_recall_at_100_diff1 value: 12.440632605048178 - type: nauc_recall_at_100_max value: 1.218572373194076 - type: nauc_recall_at_100_std value: 5.124085960572474 - type: nauc_recall_at_10_diff1 value: 20.994262563840387 - type: nauc_recall_at_10_max value: 6.830224132326494 - type: nauc_recall_at_10_std value: 0.8753342641221064 - type: nauc_recall_at_1_diff1 value: 31.417658938088493 - type: nauc_recall_at_1_max value: 14.698481024465737 - type: nauc_recall_at_1_std value: 1.713648870488101 - type: nauc_recall_at_20_diff1 value: 20.861160718899914 - type: nauc_recall_at_20_max value: 5.748341529361438 - type: nauc_recall_at_20_std value: 0.24020073460682165 - type: nauc_recall_at_3_diff1 value: 18.431274803969966 - type: nauc_recall_at_3_max value: 10.592473557682561 - type: nauc_recall_at_3_std value: 2.8012186160825827 - type: nauc_recall_at_5_diff1 value: 18.338800346911565 - type: nauc_recall_at_5_max value: 7.8128202161812945 - type: nauc_recall_at_5_std value: 1.157375547570994 - type: ndcg_at_1 value: 11.655999999999999 - type: ndcg_at_10 value: 17.529 - type: ndcg_at_100 value: 21.359 - type: ndcg_at_1000 value: 24.212 - type: ndcg_at_20 value: 18.862000000000002 - type: ndcg_at_3 value: 14.821000000000002 - type: ndcg_at_5 value: 15.806999999999999 - type: precision_at_1 value: 11.655999999999999 - type: precision_at_10 value: 3.021 - type: precision_at_100 value: 0.5349999999999999 - type: precision_at_1000 value: 0.084 - type: precision_at_20 value: 1.833 - type: precision_at_3 value: 6.952999999999999 - type: precision_at_5 value: 4.755 - type: recall_at_1 value: 9.628 - type: recall_at_10 value: 25.201 - type: recall_at_100 value: 43.064 - type: recall_at_1000 value: 64.762 - type: recall_at_20 value: 30.418 - type: recall_at_3 value: 17.441000000000003 - type: recall_at_5 value: 20.079 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval (default) type: mteb/cqadupstack-tex config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: main_score value: 12.443999999999999 - type: map_at_1 value: 6.557 - type: map_at_10 value: 10.045 - type: map_at_100 value: 10.686 - type: map_at_1000 value: 10.804 - type: map_at_20 value: 10.32 - type: map_at_3 value: 8.879 - type: map_at_5 value: 9.599 - type: mrr_at_1 value: 8.017894012388162 - type: mrr_at_10 value: 12.108923518937278 - type: mrr_at_100 value: 12.782867084978939 - type: mrr_at_1000 value: 12.882306547754846 - type: mrr_at_20 value: 12.40886134937674 - type: mrr_at_3 value: 10.788024776324843 - type: mrr_at_5 value: 11.601858224363372 - type: nauc_map_at_1000_diff1 value: 32.65207872038934 - type: nauc_map_at_1000_max value: 15.3796831155072 - type: nauc_map_at_1000_std value: -2.0284631913694318 - type: nauc_map_at_100_diff1 value: 32.6980604049616 - type: nauc_map_at_100_max value: 15.296695101083902 - type: nauc_map_at_100_std value: -2.1656869010510302 - type: nauc_map_at_10_diff1 value: 33.6176771582625 - type: nauc_map_at_10_max value: 15.317492294272327 - type: nauc_map_at_10_std value: -2.514753327992827 - type: nauc_map_at_1_diff1 value: 44.846987901851705 - type: nauc_map_at_1_max value: 16.92140255796036 - type: nauc_map_at_1_std value: -2.759993870541995 - type: nauc_map_at_20_diff1 value: 33.198923596487504 - type: nauc_map_at_20_max value: 15.244580944712267 - type: nauc_map_at_20_std value: -2.4111035829237473 - type: nauc_map_at_3_diff1 value: 36.72269327063732 - type: nauc_map_at_3_max value: 16.239304706025763 - type: nauc_map_at_3_std value: -2.7517672917082283 - type: nauc_map_at_5_diff1 value: 34.578874725919626 - type: nauc_map_at_5_max value: 15.533855519388934 - type: nauc_map_at_5_std value: -2.6809627993789866 - type: nauc_mrr_at_1000_diff1 value: 30.389656359349686 - type: nauc_mrr_at_1000_max value: 16.67441537363918 - type: nauc_mrr_at_1000_std value: -1.3704521096871414 - type: nauc_mrr_at_100_diff1 value: 30.39705489133467 - type: nauc_mrr_at_100_max value: 16.65953388052282 - type: nauc_mrr_at_100_std value: -1.4155515799129026 - type: nauc_mrr_at_10_diff1 value: 31.093808772992897 - type: nauc_mrr_at_10_max value: 16.739835728088327 - type: nauc_mrr_at_10_std value: -1.8192204545363915 - type: nauc_mrr_at_1_diff1 value: 42.24020206282754 - type: nauc_mrr_at_1_max value: 18.62811529500791 - type: nauc_mrr_at_1_std value: -3.329554678904735 - type: nauc_mrr_at_20_diff1 value: 30.724507241928023 - type: nauc_mrr_at_20_max value: 16.6412080771391 - type: nauc_mrr_at_20_std value: -1.5892437165713176 - type: nauc_mrr_at_3_diff1 value: 34.08441470381647 - type: nauc_mrr_at_3_max value: 17.915657010600984 - type: nauc_mrr_at_3_std value: -2.2780151857350814 - type: nauc_mrr_at_5_diff1 value: 31.780239141576583 - type: nauc_mrr_at_5_max value: 16.93463602358056 - type: nauc_mrr_at_5_std value: -1.9834883780660013 - type: nauc_ndcg_at_1000_diff1 value: 25.15335120104252 - type: nauc_ndcg_at_1000_max value: 16.263845464802312 - type: nauc_ndcg_at_1000_std value: 2.410125087100955 - type: nauc_ndcg_at_100_diff1 value: 25.81428051006561 - type: nauc_ndcg_at_100_max value: 14.829210245320729 - type: nauc_ndcg_at_100_std value: -0.08689023270098711 - type: nauc_ndcg_at_10_diff1 value: 29.17693224783663 - type: nauc_ndcg_at_10_max value: 14.905848199498683 - type: nauc_ndcg_at_10_std value: -1.996054601864615 - type: nauc_ndcg_at_1_diff1 value: 42.24020206282754 - type: nauc_ndcg_at_1_max value: 18.62811529500791 - type: nauc_ndcg_at_1_std value: -3.329554678904735 - type: nauc_ndcg_at_20_diff1 value: 28.040580916658364 - type: nauc_ndcg_at_20_max value: 14.607177578819655 - type: nauc_ndcg_at_20_std value: -1.5602788676054553 - type: nauc_ndcg_at_3_diff1 value: 33.89467257334233 - type: nauc_ndcg_at_3_max value: 16.80743117748352 - type: nauc_ndcg_at_3_std value: -2.3084226107164865 - type: nauc_ndcg_at_5_diff1 value: 30.719473158355054 - type: nauc_ndcg_at_5_max value: 15.377162920426812 - type: nauc_ndcg_at_5_std value: -2.2615670747802077 - type: nauc_precision_at_1000_diff1 value: 4.916612061486793 - type: nauc_precision_at_1000_max value: 22.969361069828732 - type: nauc_precision_at_1000_std value: 18.6356088544518 - type: nauc_precision_at_100_diff1 value: 10.558231923280632 - type: nauc_precision_at_100_max value: 18.372817446247367 - type: nauc_precision_at_100_std value: 8.344586790370458 - type: nauc_precision_at_10_diff1 value: 19.9407132138494 - type: nauc_precision_at_10_max value: 16.327306192687878 - type: nauc_precision_at_10_std value: 0.16685419873145663 - type: nauc_precision_at_1_diff1 value: 42.24020206282754 - type: nauc_precision_at_1_max value: 18.62811529500791 - type: nauc_precision_at_1_std value: -3.329554678904735 - type: nauc_precision_at_20_diff1 value: 16.646892238566046 - type: nauc_precision_at_20_max value: 16.637316699800202 - type: nauc_precision_at_20_std value: 2.6934626880556345 - type: nauc_precision_at_3_diff1 value: 28.35394732911168 - type: nauc_precision_at_3_max value: 18.708227286896033 - type: nauc_precision_at_3_std value: -1.7316048532952557 - type: nauc_precision_at_5_diff1 value: 22.637258344117758 - type: nauc_precision_at_5_max value: 16.235324772438183 - type: nauc_precision_at_5_std value: -0.9073652855156291 - type: nauc_recall_at_1000_diff1 value: 7.769963101855644 - type: nauc_recall_at_1000_max value: 16.95143097349464 - type: nauc_recall_at_1000_std value: 15.47110521887779 - type: nauc_recall_at_100_diff1 value: 13.066911377416401 - type: nauc_recall_at_100_max value: 11.619784901704275 - type: nauc_recall_at_100_std value: 3.5395473327823654 - type: nauc_recall_at_10_diff1 value: 21.5626471119534 - type: nauc_recall_at_10_max value: 12.173867348784546 - type: nauc_recall_at_10_std value: -1.6058682655342469 - type: nauc_recall_at_1_diff1 value: 44.846987901851705 - type: nauc_recall_at_1_max value: 16.92140255796036 - type: nauc_recall_at_1_std value: -2.759993870541995 - type: nauc_recall_at_20_diff1 value: 18.777854488746364 - type: nauc_recall_at_20_max value: 11.241858114527968 - type: nauc_recall_at_20_std value: -0.5830148082352531 - type: nauc_recall_at_3_diff1 value: 29.592645115703363 - type: nauc_recall_at_3_max value: 15.309599487009862 - type: nauc_recall_at_3_std value: -2.569342359215212 - type: nauc_recall_at_5_diff1 value: 24.35312015828605 - type: nauc_recall_at_5_max value: 13.17850031245583 - type: nauc_recall_at_5_std value: -2.3536371984816546 - type: ndcg_at_1 value: 8.018 - type: ndcg_at_10 value: 12.443999999999999 - type: ndcg_at_100 value: 16.116 - type: ndcg_at_1000 value: 19.597 - type: ndcg_at_20 value: 13.427 - type: ndcg_at_3 value: 10.231 - type: ndcg_at_5 value: 11.399 - type: precision_at_1 value: 8.018 - type: precision_at_10 value: 2.378 - type: precision_at_100 value: 0.508 - type: precision_at_1000 value: 0.097 - type: precision_at_20 value: 1.476 - type: precision_at_3 value: 5.0360000000000005 - type: precision_at_5 value: 3.827 - type: recall_at_1 value: 6.557 - type: recall_at_10 value: 17.721 - type: recall_at_100 value: 34.996 - type: recall_at_1000 value: 60.897999999999996 - type: recall_at_20 value: 21.366 - type: recall_at_3 value: 11.618 - type: recall_at_5 value: 14.585999999999999 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval (default) type: mteb/cqadupstack-unix config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: main_score value: 19.711000000000002 - type: map_at_1 value: 12.16 - type: map_at_10 value: 16.735 - type: map_at_100 value: 17.49 - type: map_at_1000 value: 17.605999999999998 - type: map_at_20 value: 17.133000000000003 - type: map_at_3 value: 15.290999999999999 - type: map_at_5 value: 16.186 - type: mrr_at_1 value: 14.458955223880595 - type: mrr_at_10 value: 19.231743070362473 - type: mrr_at_100 value: 20.028932451430283 - type: mrr_at_1000 value: 20.123333090690128 - type: mrr_at_20 value: 19.67671069767135 - type: mrr_at_3 value: 17.708333333333343 - type: mrr_at_5 value: 18.613184079601997 - type: nauc_map_at_1000_diff1 value: 37.32734680518867 - type: nauc_map_at_1000_max value: 24.030968099609098 - type: nauc_map_at_1000_std value: -7.829648421477282 - type: nauc_map_at_100_diff1 value: 37.366553771960305 - type: nauc_map_at_100_max value: 23.96244415870664 - type: nauc_map_at_100_std value: -7.963447431457271 - type: nauc_map_at_10_diff1 value: 37.63825009720519 - type: nauc_map_at_10_max value: 23.831741442187663 - type: nauc_map_at_10_std value: -8.639346971580641 - type: nauc_map_at_1_diff1 value: 44.00293330504824 - type: nauc_map_at_1_max value: 28.610260969649147 - type: nauc_map_at_1_std value: -8.920133968270129 - type: nauc_map_at_20_diff1 value: 37.41690774575737 - type: nauc_map_at_20_max value: 24.05466724695789 - type: nauc_map_at_20_std value: -8.191886861933238 - type: nauc_map_at_3_diff1 value: 39.44794961214937 - type: nauc_map_at_3_max value: 25.19199372850414 - type: nauc_map_at_3_std value: -8.746945856127951 - type: nauc_map_at_5_diff1 value: 38.08100890718928 - type: nauc_map_at_5_max value: 24.55470931460194 - type: nauc_map_at_5_std value: -8.720841046581775 - type: nauc_mrr_at_1000_diff1 value: 36.90601049409893 - type: nauc_mrr_at_1000_max value: 25.420040608901186 - type: nauc_mrr_at_1000_std value: -6.24180100706028 - type: nauc_mrr_at_100_diff1 value: 36.923381404524015 - type: nauc_mrr_at_100_max value: 25.38556434928676 - type: nauc_mrr_at_100_std value: -6.309900016406157 - type: nauc_mrr_at_10_diff1 value: 37.23900452439483 - type: nauc_mrr_at_10_max value: 25.29723023323045 - type: nauc_mrr_at_10_std value: -6.88021958480657 - type: nauc_mrr_at_1_diff1 value: 43.46378935714078 - type: nauc_mrr_at_1_max value: 29.927650887925733 - type: nauc_mrr_at_1_std value: -7.32269982879421 - type: nauc_mrr_at_20_diff1 value: 37.00573427679709 - type: nauc_mrr_at_20_max value: 25.560921795102548 - type: nauc_mrr_at_20_std value: -6.462698863947647 - type: nauc_mrr_at_3_diff1 value: 39.22598180580575 - type: nauc_mrr_at_3_max value: 26.820045533411875 - type: nauc_mrr_at_3_std value: -7.082684949939846 - type: nauc_mrr_at_5_diff1 value: 37.669928969714285 - type: nauc_mrr_at_5_max value: 25.88130718712922 - type: nauc_mrr_at_5_std value: -6.973149985490427 - type: nauc_ndcg_at_1000_diff1 value: 32.568783318562325 - type: nauc_ndcg_at_1000_max value: 23.197510425408808 - type: nauc_ndcg_at_1000_std value: -2.194462348881883 - type: nauc_ndcg_at_100_diff1 value: 33.51118765048162 - type: nauc_ndcg_at_100_max value: 21.956530380832053 - type: nauc_ndcg_at_100_std value: -4.912966259503627 - type: nauc_ndcg_at_10_diff1 value: 34.83192049577735 - type: nauc_ndcg_at_10_max value: 22.00793188583066 - type: nauc_ndcg_at_10_std value: -8.038092629791779 - type: nauc_ndcg_at_1_diff1 value: 43.46378935714078 - type: nauc_ndcg_at_1_max value: 29.927650887925733 - type: nauc_ndcg_at_1_std value: -7.32269982879421 - type: nauc_ndcg_at_20_diff1 value: 34.09161027494196 - type: nauc_ndcg_at_20_max value: 22.80085172928815 - type: nauc_ndcg_at_20_std value: -6.449684776606084 - type: nauc_ndcg_at_3_diff1 value: 37.67731096559351 - type: nauc_ndcg_at_3_max value: 24.91192286661625 - type: nauc_ndcg_at_3_std value: -7.858573743848242 - type: nauc_ndcg_at_5_diff1 value: 35.50714685884494 - type: nauc_ndcg_at_5_max value: 23.450725063747797 - type: nauc_ndcg_at_5_std value: -8.076820492805357 - type: nauc_precision_at_1000_diff1 value: 1.6261342399001975 - type: nauc_precision_at_1000_max value: 16.62857152361468 - type: nauc_precision_at_1000_std value: 20.14197872377733 - type: nauc_precision_at_100_diff1 value: 18.546945979368445 - type: nauc_precision_at_100_max value: 17.9074573235532 - type: nauc_precision_at_100_std value: 7.2071192251551865 - type: nauc_precision_at_10_diff1 value: 26.147790428569678 - type: nauc_precision_at_10_max value: 19.038747350820756 - type: nauc_precision_at_10_std value: -4.461577497765854 - type: nauc_precision_at_1_diff1 value: 43.46378935714078 - type: nauc_precision_at_1_max value: 29.927650887925733 - type: nauc_precision_at_1_std value: -7.32269982879421 - type: nauc_precision_at_20_diff1 value: 23.486834765998893 - type: nauc_precision_at_20_max value: 21.51929896555235 - type: nauc_precision_at_20_std value: 1.2771529201062182 - type: nauc_precision_at_3_diff1 value: 33.51582295076194 - type: nauc_precision_at_3_max value: 24.24618327163066 - type: nauc_precision_at_3_std value: -6.103562407422206 - type: nauc_precision_at_5_diff1 value: 28.84576644797503 - type: nauc_precision_at_5_max value: 21.657452007178552 - type: nauc_precision_at_5_std value: -6.165268201341394 - type: nauc_recall_at_1000_diff1 value: 12.4094254713566 - type: nauc_recall_at_1000_max value: 18.404042388476743 - type: nauc_recall_at_1000_std value: 24.134383715413634 - type: nauc_recall_at_100_diff1 value: 22.03143133906221 - type: nauc_recall_at_100_max value: 13.134997386426056 - type: nauc_recall_at_100_std value: 3.0416018748427422 - type: nauc_recall_at_10_diff1 value: 27.55961989907554 - type: nauc_recall_at_10_max value: 15.275424017836334 - type: nauc_recall_at_10_std value: -8.036960761781968 - type: nauc_recall_at_1_diff1 value: 44.00293330504824 - type: nauc_recall_at_1_max value: 28.610260969649147 - type: nauc_recall_at_1_std value: -8.920133968270129 - type: nauc_recall_at_20_diff1 value: 25.066660237089046 - type: nauc_recall_at_20_max value: 17.431869579217718 - type: nauc_recall_at_20_std value: -3.242206535665684 - type: nauc_recall_at_3_diff1 value: 33.77586761575051 - type: nauc_recall_at_3_max value: 21.54071300723871 - type: nauc_recall_at_3_std value: -7.977936599864755 - type: nauc_recall_at_5_diff1 value: 28.508795525192532 - type: nauc_recall_at_5_max value: 18.460621100250922 - type: nauc_recall_at_5_std value: -7.8358348822156305 - type: ndcg_at_1 value: 14.459 - type: ndcg_at_10 value: 19.711000000000002 - type: ndcg_at_100 value: 23.774 - type: ndcg_at_1000 value: 27.060000000000002 - type: ndcg_at_20 value: 21.144 - type: ndcg_at_3 value: 16.975 - type: ndcg_at_5 value: 18.404 - type: precision_at_1 value: 14.459 - type: precision_at_10 value: 3.321 - type: precision_at_100 value: 0.597 - type: precision_at_1000 value: 0.099 - type: precision_at_20 value: 2.024 - type: precision_at_3 value: 7.710999999999999 - type: precision_at_5 value: 5.56 - type: recall_at_1 value: 12.16 - type: recall_at_10 value: 26.418999999999997 - type: recall_at_100 value: 45.019999999999996 - type: recall_at_1000 value: 69.241 - type: recall_at_20 value: 31.680000000000003 - type: recall_at_3 value: 19.003999999999998 - type: recall_at_5 value: 22.558 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval (default) type: mteb/cqadupstack-webmasters config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: main_score value: 22.777 - type: map_at_1 value: 12.565000000000001 - type: map_at_10 value: 18.531 - type: map_at_100 value: 19.652 - type: map_at_1000 value: 19.839000000000002 - type: map_at_20 value: 19.06 - type: map_at_3 value: 16.235 - type: map_at_5 value: 17.732999999999997 - type: mrr_at_1 value: 16.40316205533597 - type: mrr_at_10 value: 22.286294623251145 - type: mrr_at_100 value: 23.280233595022146 - type: mrr_at_1000 value: 23.35890919603868 - type: mrr_at_20 value: 22.762754595001386 - type: mrr_at_3 value: 20.256916996047426 - type: mrr_at_5 value: 21.630434782608692 - type: nauc_map_at_1000_diff1 value: 32.47228103496472 - type: nauc_map_at_1000_max value: 17.36438672012039 - type: nauc_map_at_1000_std value: -2.451193759562851 - type: nauc_map_at_100_diff1 value: 32.511726288124144 - type: nauc_map_at_100_max value: 17.316803786503694 - type: nauc_map_at_100_std value: -2.511214452087731 - type: nauc_map_at_10_diff1 value: 32.370423115473635 - type: nauc_map_at_10_max value: 17.04947773363954 - type: nauc_map_at_10_std value: -3.1281688945461275 - type: nauc_map_at_1_diff1 value: 44.89994966145238 - type: nauc_map_at_1_max value: 20.75776675745789 - type: nauc_map_at_1_std value: -5.845655694298756 - type: nauc_map_at_20_diff1 value: 32.63098666413862 - type: nauc_map_at_20_max value: 17.334066548532277 - type: nauc_map_at_20_std value: -2.7560061957765702 - type: nauc_map_at_3_diff1 value: 34.93440597530776 - type: nauc_map_at_3_max value: 17.08985849657863 - type: nauc_map_at_3_std value: -5.64947905087099 - type: nauc_map_at_5_diff1 value: 32.33013271500631 - type: nauc_map_at_5_max value: 16.551952430112685 - type: nauc_map_at_5_std value: -4.007998873265845 - type: nauc_mrr_at_1000_diff1 value: 30.32862869494007 - type: nauc_mrr_at_1000_max value: 16.00324601080758 - type: nauc_mrr_at_1000_std value: -3.3071695305368665 - type: nauc_mrr_at_100_diff1 value: 30.29853540714109 - type: nauc_mrr_at_100_max value: 15.98700586397874 - type: nauc_mrr_at_100_std value: -3.304588174231291 - type: nauc_mrr_at_10_diff1 value: 30.07467400035378 - type: nauc_mrr_at_10_max value: 15.4243644038634 - type: nauc_mrr_at_10_std value: -3.8133663710085037 - type: nauc_mrr_at_1_diff1 value: 39.53137251195742 - type: nauc_mrr_at_1_max value: 18.7427792052109 - type: nauc_mrr_at_1_std value: -7.049618117645837 - type: nauc_mrr_at_20_diff1 value: 30.476741278191984 - type: nauc_mrr_at_20_max value: 15.871647682916246 - type: nauc_mrr_at_20_std value: -3.3534144348402637 - type: nauc_mrr_at_3_diff1 value: 32.10935190389665 - type: nauc_mrr_at_3_max value: 15.195725472701655 - type: nauc_mrr_at_3_std value: -6.31736752169466 - type: nauc_mrr_at_5_diff1 value: 30.064890337610024 - type: nauc_mrr_at_5_max value: 14.95433853424041 - type: nauc_mrr_at_5_std value: -4.4862049600454545 - type: nauc_ndcg_at_1000_diff1 value: 28.735932987621695 - type: nauc_ndcg_at_1000_max value: 18.499379958394112 - type: nauc_ndcg_at_1000_std value: 2.287230702290486 - type: nauc_ndcg_at_100_diff1 value: 28.07228380867368 - type: nauc_ndcg_at_100_max value: 17.438083290386906 - type: nauc_ndcg_at_100_std value: 1.651284470756386 - type: nauc_ndcg_at_10_diff1 value: 28.250609933026983 - type: nauc_ndcg_at_10_max value: 15.624663744625632 - type: nauc_ndcg_at_10_std value: -1.4903481449272067 - type: nauc_ndcg_at_1_diff1 value: 39.53137251195742 - type: nauc_ndcg_at_1_max value: 18.7427792052109 - type: nauc_ndcg_at_1_std value: -7.049618117645837 - type: nauc_ndcg_at_20_diff1 value: 29.05076544905671 - type: nauc_ndcg_at_20_max value: 17.127153425226027 - type: nauc_ndcg_at_20_std value: -0.04561574524301717 - type: nauc_ndcg_at_3_diff1 value: 31.361003442326606 - type: nauc_ndcg_at_3_max value: 14.639094405606897 - type: nauc_ndcg_at_3_std value: -6.479703843701494 - type: nauc_ndcg_at_5_diff1 value: 28.141888428115852 - type: nauc_ndcg_at_5_max value: 14.17579858002202 - type: nauc_ndcg_at_5_std value: -3.215385996437239 - type: nauc_precision_at_1000_diff1 value: 0.44388309909094337 - type: nauc_precision_at_1000_max value: 10.91662398006627 - type: nauc_precision_at_1000_std value: 7.471065515098385 - type: nauc_precision_at_100_diff1 value: 4.5410780139270805 - type: nauc_precision_at_100_max value: 5.393171361851609 - type: nauc_precision_at_100_std value: 6.244061856434465 - type: nauc_precision_at_10_diff1 value: 15.248610611616325 - type: nauc_precision_at_10_max value: 9.966492166867939 - type: nauc_precision_at_10_std value: 0.33164335152610785 - type: nauc_precision_at_1_diff1 value: 39.53137251195742 - type: nauc_precision_at_1_max value: 18.7427792052109 - type: nauc_precision_at_1_std value: -7.049618117645837 - type: nauc_precision_at_20_diff1 value: 15.000587297689805 - type: nauc_precision_at_20_max value: 11.889744005776382 - type: nauc_precision_at_20_std value: 3.727427924339134 - type: nauc_precision_at_3_diff1 value: 21.50644576616347 - type: nauc_precision_at_3_max value: 10.271555217413596 - type: nauc_precision_at_3_std value: -8.701406474268508 - type: nauc_precision_at_5_diff1 value: 14.727982007015045 - type: nauc_precision_at_5_max value: 8.680957869264917 - type: nauc_precision_at_5_std value: -2.3797064194163378 - type: nauc_recall_at_1000_diff1 value: 15.367491508251302 - type: nauc_recall_at_1000_max value: 27.487867625988372 - type: nauc_recall_at_1000_std value: 30.294008877208544 - type: nauc_recall_at_100_diff1 value: 15.071272555237442 - type: nauc_recall_at_100_max value: 17.50074421497179 - type: nauc_recall_at_100_std value: 14.846023410230222 - type: nauc_recall_at_10_diff1 value: 18.994419200141266 - type: nauc_recall_at_10_max value: 13.843239964438547 - type: nauc_recall_at_10_std value: 3.7435701342911654 - type: nauc_recall_at_1_diff1 value: 44.89994966145238 - type: nauc_recall_at_1_max value: 20.75776675745789 - type: nauc_recall_at_1_std value: -5.845655694298756 - type: nauc_recall_at_20_diff1 value: 21.204455024518406 - type: nauc_recall_at_20_max value: 17.26124167759398 - type: nauc_recall_at_20_std value: 6.866640851457614 - type: nauc_recall_at_3_diff1 value: 26.279990178827262 - type: nauc_recall_at_3_max value: 13.496270253906292 - type: nauc_recall_at_3_std value: -5.38407111672445 - type: nauc_recall_at_5_diff1 value: 19.289091779649087 - type: nauc_recall_at_5_max value: 11.636893185484196 - type: nauc_recall_at_5_std value: 0.012873097179916523 - type: ndcg_at_1 value: 16.403000000000002 - type: ndcg_at_10 value: 22.777 - type: ndcg_at_100 value: 27.951999999999998 - type: ndcg_at_1000 value: 31.439 - type: ndcg_at_20 value: 24.377 - type: ndcg_at_3 value: 19.15 - type: ndcg_at_5 value: 21.279999999999998 - type: precision_at_1 value: 16.403000000000002 - type: precision_at_10 value: 4.545 - type: precision_at_100 value: 1.042 - type: precision_at_1000 value: 0.192 - type: precision_at_20 value: 2.915 - type: precision_at_3 value: 9.42 - type: precision_at_5 value: 7.391 - type: recall_at_1 value: 12.565000000000001 - type: recall_at_10 value: 31.104 - type: recall_at_100 value: 55.35 - type: recall_at_1000 value: 78.964 - type: recall_at_20 value: 37.531 - type: recall_at_3 value: 20.152 - type: recall_at_5 value: 25.884 - task: type: Retrieval dataset: name: MTEB CQADupstackWordpressRetrieval (default) type: mteb/cqadupstack-wordpress config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: main_score value: 16.282 - type: map_at_1 value: 9.649000000000001 - type: map_at_10 value: 13.602 - type: map_at_100 value: 14.382 - type: map_at_1000 value: 14.501 - type: map_at_20 value: 13.966000000000001 - type: map_at_3 value: 12.303 - type: map_at_5 value: 12.916 - type: mrr_at_1 value: 10.35120147874307 - type: mrr_at_10 value: 14.749361851949661 - type: mrr_at_100 value: 15.50519445622732 - type: mrr_at_1000 value: 15.613102138341636 - type: mrr_at_20 value: 15.10456339500994 - type: mrr_at_3 value: 13.308687615526798 - type: mrr_at_5 value: 14.011090573012941 - type: nauc_map_at_1000_diff1 value: 25.406827868560818 - type: nauc_map_at_1000_max value: 19.15293914417803 - type: nauc_map_at_1000_std value: -4.8359101644919615 - type: nauc_map_at_100_diff1 value: 25.397146242594026 - type: nauc_map_at_100_max value: 19.17433365762207 - type: nauc_map_at_100_std value: -4.88548425551582 - type: nauc_map_at_10_diff1 value: 26.07954715203469 - type: nauc_map_at_10_max value: 19.187658866034997 - type: nauc_map_at_10_std value: -5.418483452804894 - type: nauc_map_at_1_diff1 value: 34.725976334624 - type: nauc_map_at_1_max value: 22.32676837829979 - type: nauc_map_at_1_std value: -9.891035217503042 - type: nauc_map_at_20_diff1 value: 25.58513300417793 - type: nauc_map_at_20_max value: 19.043467938212757 - type: nauc_map_at_20_std value: -5.15992496594833 - type: nauc_map_at_3_diff1 value: 27.323777236201142 - type: nauc_map_at_3_max value: 19.239977764594364 - type: nauc_map_at_3_std value: -6.769018234574016 - type: nauc_map_at_5_diff1 value: 25.704535338897955 - type: nauc_map_at_5_max value: 19.714177629087157 - type: nauc_map_at_5_std value: -5.317374049487578 - type: nauc_mrr_at_1000_diff1 value: 26.03458534887099 - type: nauc_mrr_at_1000_max value: 20.508209191992876 - type: nauc_mrr_at_1000_std value: -4.41940789161801 - type: nauc_mrr_at_100_diff1 value: 26.01254219811696 - type: nauc_mrr_at_100_max value: 20.494471407099468 - type: nauc_mrr_at_100_std value: -4.440663245313711 - type: nauc_mrr_at_10_diff1 value: 26.57574311453146 - type: nauc_mrr_at_10_max value: 20.632552036611397 - type: nauc_mrr_at_10_std value: -4.771592957195832 - type: nauc_mrr_at_1_diff1 value: 35.390014363446255 - type: nauc_mrr_at_1_max value: 23.765064263436365 - type: nauc_mrr_at_1_std value: -9.303847729220868 - type: nauc_mrr_at_20_diff1 value: 26.160655570384506 - type: nauc_mrr_at_20_max value: 20.57364651024466 - type: nauc_mrr_at_20_std value: -4.5034526632293606 - type: nauc_mrr_at_3_diff1 value: 27.552478204693294 - type: nauc_mrr_at_3_max value: 20.17445834379607 - type: nauc_mrr_at_3_std value: -6.67066855923739 - type: nauc_mrr_at_5_diff1 value: 26.22164525148457 - type: nauc_mrr_at_5_max value: 20.89310573810927 - type: nauc_mrr_at_5_std value: -5.008742708258987 - type: nauc_ndcg_at_1000_diff1 value: 21.615095431824386 - type: nauc_ndcg_at_1000_max value: 17.853748614419747 - type: nauc_ndcg_at_1000_std value: 0.6193727817290967 - type: nauc_ndcg_at_100_diff1 value: 21.818943246370033 - type: nauc_ndcg_at_100_max value: 18.698246203544997 - type: nauc_ndcg_at_100_std value: -1.1272521120250336 - type: nauc_ndcg_at_10_diff1 value: 23.359954900380046 - type: nauc_ndcg_at_10_max value: 18.737848777790113 - type: nauc_ndcg_at_10_std value: -3.36419601429348 - type: nauc_ndcg_at_1_diff1 value: 35.390014363446255 - type: nauc_ndcg_at_1_max value: 23.765064263436365 - type: nauc_ndcg_at_1_std value: -9.303847729220868 - type: nauc_ndcg_at_20_diff1 value: 22.147298650970814 - type: nauc_ndcg_at_20_max value: 18.355086827610748 - type: nauc_ndcg_at_20_std value: -2.5228126260008663 - type: nauc_ndcg_at_3_diff1 value: 25.170481707827726 - type: nauc_ndcg_at_3_max value: 18.977482220898093 - type: nauc_ndcg_at_3_std value: -5.571884562280113 - type: nauc_ndcg_at_5_diff1 value: 22.440176671988215 - type: nauc_ndcg_at_5_max value: 19.711503311595237 - type: nauc_ndcg_at_5_std value: -3.320774351728452 - type: nauc_precision_at_1000_diff1 value: 7.088471605516087 - type: nauc_precision_at_1000_max value: 2.5074109220804552 - type: nauc_precision_at_1000_std value: 16.597577625968512 - type: nauc_precision_at_100_diff1 value: 13.79076033899892 - type: nauc_precision_at_100_max value: 17.83806669681337 - type: nauc_precision_at_100_std value: 10.618023622166596 - type: nauc_precision_at_10_diff1 value: 16.994875251928576 - type: nauc_precision_at_10_max value: 18.13757952250044 - type: nauc_precision_at_10_std value: 2.404164664767652 - type: nauc_precision_at_1_diff1 value: 35.390014363446255 - type: nauc_precision_at_1_max value: 23.765064263436365 - type: nauc_precision_at_1_std value: -9.303847729220868 - type: nauc_precision_at_20_diff1 value: 13.659636799192535 - type: nauc_precision_at_20_max value: 17.06992402074562 - type: nauc_precision_at_20_std value: 5.049904786282464 - type: nauc_precision_at_3_diff1 value: 19.192414775621234 - type: nauc_precision_at_3_max value: 17.836724392009973 - type: nauc_precision_at_3_std value: -3.335195237103341 - type: nauc_precision_at_5_diff1 value: 15.361755413631082 - type: nauc_precision_at_5_max value: 20.34559104612301 - type: nauc_precision_at_5_std value: 1.5876026091755358 - type: nauc_recall_at_1000_diff1 value: 9.765614015670025 - type: nauc_recall_at_1000_max value: 8.922939285160988 - type: nauc_recall_at_1000_std value: 23.178560977315318 - type: nauc_recall_at_100_diff1 value: 14.263000555979167 - type: nauc_recall_at_100_max value: 15.900184211143051 - type: nauc_recall_at_100_std value: 6.9266463291552585 - type: nauc_recall_at_10_diff1 value: 17.146962299776742 - type: nauc_recall_at_10_max value: 16.2838741849977 - type: nauc_recall_at_10_std value: 0.4957264189499765 - type: nauc_recall_at_1_diff1 value: 34.725976334624 - type: nauc_recall_at_1_max value: 22.32676837829979 - type: nauc_recall_at_1_std value: -9.891035217503042 - type: nauc_recall_at_20_diff1 value: 14.837805878033974 - type: nauc_recall_at_20_max value: 15.430841224254651 - type: nauc_recall_at_20_std value: 2.454500390960994 - type: nauc_recall_at_3_diff1 value: 19.299670932730177 - type: nauc_recall_at_3_max value: 17.16119384262901 - type: nauc_recall_at_3_std value: -2.9001038379970394 - type: nauc_recall_at_5_diff1 value: 14.450934000034282 - type: nauc_recall_at_5_max value: 18.92477166485719 - type: nauc_recall_at_5_std value: 1.344285921519555 - type: ndcg_at_1 value: 10.351 - type: ndcg_at_10 value: 16.282 - type: ndcg_at_100 value: 20.608 - type: ndcg_at_1000 value: 24.035 - type: ndcg_at_20 value: 17.565 - type: ndcg_at_3 value: 13.495 - type: ndcg_at_5 value: 14.621 - type: precision_at_1 value: 10.351 - type: precision_at_10 value: 2.68 - type: precision_at_100 value: 0.525 - type: precision_at_1000 value: 0.08800000000000001 - type: precision_at_20 value: 1.645 - type: precision_at_3 value: 5.853 - type: precision_at_5 value: 4.14 - type: recall_at_1 value: 9.649000000000001 - type: recall_at_10 value: 23.53 - type: recall_at_100 value: 44.227 - type: recall_at_1000 value: 70.697 - type: recall_at_20 value: 28.311999999999998 - type: recall_at_3 value: 15.903 - type: recall_at_5 value: 18.657 - task: type: Retrieval dataset: name: MTEB ClimateFEVER (default) type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: main_score value: 17.025000000000002 - type: map_at_1 value: 6.6290000000000004 - type: map_at_10 value: 11.347 - type: map_at_100 value: 12.644 - type: map_at_1000 value: 12.828999999999999 - type: map_at_20 value: 12.028 - type: map_at_3 value: 9.303 - type: map_at_5 value: 10.311 - type: mrr_at_1 value: 15.04885993485342 - type: mrr_at_10 value: 23.358771521637973 - type: mrr_at_100 value: 24.50069182382575 - type: mrr_at_1000 value: 24.566503286282707 - type: mrr_at_20 value: 24.040311778089198 - type: mrr_at_3 value: 20.14115092290989 - type: mrr_at_5 value: 22.040173724212796 - type: nauc_map_at_1000_diff1 value: 23.845405600696072 - type: nauc_map_at_1000_max value: 19.918654009707584 - type: nauc_map_at_1000_std value: 14.2995362236714 - type: nauc_map_at_100_diff1 value: 23.878652012076532 - type: nauc_map_at_100_max value: 19.7868200047113 - type: nauc_map_at_100_std value: 14.07147715756381 - type: nauc_map_at_10_diff1 value: 24.4513580135211 - type: nauc_map_at_10_max value: 18.528162379297424 - type: nauc_map_at_10_std value: 12.245001453530078 - type: nauc_map_at_1_diff1 value: 36.18745500997383 - type: nauc_map_at_1_max value: 16.04402749607054 - type: nauc_map_at_1_std value: 7.20280979985424 - type: nauc_map_at_20_diff1 value: 24.004649913803604 - type: nauc_map_at_20_max value: 19.146985692895363 - type: nauc_map_at_20_std value: 13.426418447399522 - type: nauc_map_at_3_diff1 value: 27.231160846132983 - type: nauc_map_at_3_max value: 16.79456580974431 - type: nauc_map_at_3_std value: 9.082589413808256 - type: nauc_map_at_5_diff1 value: 25.508268403660754 - type: nauc_map_at_5_max value: 17.536648703443745 - type: nauc_map_at_5_std value: 10.426038884702734 - type: nauc_mrr_at_1000_diff1 value: 22.301810862813536 - type: nauc_mrr_at_1000_max value: 22.188842181108974 - type: nauc_mrr_at_1000_std value: 16.815185470718596 - type: nauc_mrr_at_100_diff1 value: 22.270021376767758 - type: nauc_mrr_at_100_max value: 22.197683459172236 - type: nauc_mrr_at_100_std value: 16.834105172900742 - type: nauc_mrr_at_10_diff1 value: 22.136243185086514 - type: nauc_mrr_at_10_max value: 21.857378005743627 - type: nauc_mrr_at_10_std value: 16.432686834852518 - type: nauc_mrr_at_1_diff1 value: 30.849233749145778 - type: nauc_mrr_at_1_max value: 19.32746844370728 - type: nauc_mrr_at_1_std value: 11.031566943467187 - type: nauc_mrr_at_20_diff1 value: 22.15765322626337 - type: nauc_mrr_at_20_max value: 22.184656261244154 - type: nauc_mrr_at_20_std value: 16.78145586387376 - type: nauc_mrr_at_3_diff1 value: 24.36222387971331 - type: nauc_mrr_at_3_max value: 20.18602401345787 - type: nauc_mrr_at_3_std value: 13.538840530687176 - type: nauc_mrr_at_5_diff1 value: 22.802910814513766 - type: nauc_mrr_at_5_max value: 21.132374913487563 - type: nauc_mrr_at_5_std value: 15.23323248745897 - type: nauc_ndcg_at_1000_diff1 value: 18.560822303760116 - type: nauc_ndcg_at_1000_max value: 26.64819017464794 - type: nauc_ndcg_at_1000_std value: 24.155670440129555 - type: nauc_ndcg_at_100_diff1 value: 18.832523159535324 - type: nauc_ndcg_at_100_max value: 24.82236156209509 - type: nauc_ndcg_at_100_std value: 21.409720522408872 - type: nauc_ndcg_at_10_diff1 value: 20.379115014782254 - type: nauc_ndcg_at_10_max value: 21.219836183571033 - type: nauc_ndcg_at_10_std value: 16.623290906816138 - type: nauc_ndcg_at_1_diff1 value: 30.849233749145778 - type: nauc_ndcg_at_1_max value: 19.32746844370728 - type: nauc_ndcg_at_1_std value: 11.031566943467187 - type: nauc_ndcg_at_20_diff1 value: 19.702632431319948 - type: nauc_ndcg_at_20_max value: 22.64648878049477 - type: nauc_ndcg_at_20_std value: 19.160859808670168 - type: nauc_ndcg_at_3_diff1 value: 24.15080188034188 - type: nauc_ndcg_at_3_max value: 18.951129834096516 - type: nauc_ndcg_at_3_std value: 12.029452302285687 - type: nauc_ndcg_at_5_diff1 value: 22.10388157868759 - type: nauc_ndcg_at_5_max value: 19.518705876244603 - type: nauc_ndcg_at_5_std value: 13.536204983649055 - type: nauc_precision_at_1000_diff1 value: -2.7867448945624984 - type: nauc_precision_at_1000_max value: 31.1830226333977 - type: nauc_precision_at_1000_std value: 36.056696868596596 - type: nauc_precision_at_100_diff1 value: 2.5671052902696982 - type: nauc_precision_at_100_max value: 31.536411439256344 - type: nauc_precision_at_100_std value: 32.56217444076993 - type: nauc_precision_at_10_diff1 value: 9.251264374701544 - type: nauc_precision_at_10_max value: 26.707795270053186 - type: nauc_precision_at_10_std value: 25.659890002306735 - type: nauc_precision_at_1_diff1 value: 30.849233749145778 - type: nauc_precision_at_1_max value: 19.32746844370728 - type: nauc_precision_at_1_std value: 11.031566943467187 - type: nauc_precision_at_20_diff1 value: 7.916882330538136 - type: nauc_precision_at_20_max value: 28.746226747545755 - type: nauc_precision_at_20_std value: 29.783389984668922 - type: nauc_precision_at_3_diff1 value: 18.117583580441828 - type: nauc_precision_at_3_max value: 21.038596862616107 - type: nauc_precision_at_3_std value: 16.247022129678257 - type: nauc_precision_at_5_diff1 value: 12.949492485512934 - type: nauc_precision_at_5_max value: 23.8549777050663 - type: nauc_precision_at_5_std value: 20.6577089014477 - type: nauc_recall_at_1000_diff1 value: 3.187375002698742 - type: nauc_recall_at_1000_max value: 31.716729629155765 - type: nauc_recall_at_1000_std value: 37.670164094353765 - type: nauc_recall_at_100_diff1 value: 6.7938471488302525 - type: nauc_recall_at_100_max value: 25.207576554757217 - type: nauc_recall_at_100_std value: 26.746462298530066 - type: nauc_recall_at_10_diff1 value: 12.380354609250126 - type: nauc_recall_at_10_max value: 20.12686819994405 - type: nauc_recall_at_10_std value: 18.848179726655264 - type: nauc_recall_at_1_diff1 value: 36.18745500997383 - type: nauc_recall_at_1_max value: 16.04402749607054 - type: nauc_recall_at_1_std value: 7.20280979985424 - type: nauc_recall_at_20_diff1 value: 10.49270805649338 - type: nauc_recall_at_20_max value: 21.600861486130242 - type: nauc_recall_at_20_std value: 23.116939841321404 - type: nauc_recall_at_3_diff1 value: 21.195828641675384 - type: nauc_recall_at_3_max value: 18.102335877791635 - type: nauc_recall_at_3_std value: 11.369824368716667 - type: nauc_recall_at_5_diff1 value: 16.427575610593173 - type: nauc_recall_at_5_max value: 19.09871353855581 - type: nauc_recall_at_5_std value: 14.457824997826815 - type: ndcg_at_1 value: 15.049000000000001 - type: ndcg_at_10 value: 17.025000000000002 - type: ndcg_at_100 value: 23.180999999999997 - type: ndcg_at_1000 value: 27.040999999999997 - type: ndcg_at_20 value: 19.317999999999998 - type: ndcg_at_3 value: 13.077 - type: ndcg_at_5 value: 14.515 - type: precision_at_1 value: 15.049000000000001 - type: precision_at_10 value: 5.596 - type: precision_at_100 value: 1.207 - type: precision_at_1000 value: 0.191 - type: precision_at_20 value: 3.726 - type: precision_at_3 value: 9.881 - type: precision_at_5 value: 8.026 - type: recall_at_1 value: 6.6290000000000004 - type: recall_at_10 value: 21.477 - type: recall_at_100 value: 43.412 - type: recall_at_1000 value: 65.55199999999999 - type: recall_at_20 value: 28.233000000000004 - type: recall_at_3 value: 12.078999999999999 - type: recall_at_5 value: 15.645999999999999 - task: type: Retrieval dataset: name: MTEB DBPedia (default) type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: main_score value: 23.145 - type: map_at_1 value: 3.816 - type: map_at_10 value: 9.001000000000001 - type: map_at_100 value: 12.867999999999999 - type: map_at_1000 value: 13.797999999999998 - type: map_at_20 value: 10.47 - type: map_at_3 value: 6.143 - type: map_at_5 value: 7.434 - type: mrr_at_1 value: 40.0 - type: mrr_at_10 value: 51.30724206349206 - type: mrr_at_100 value: 51.952349401757125 - type: mrr_at_1000 value: 51.975655822131536 - type: mrr_at_20 value: 51.686717989397934 - type: mrr_at_3 value: 48.29166666666667 - type: mrr_at_5 value: 49.76666666666668 - type: nauc_map_at_1000_diff1 value: 29.149952592526564 - type: nauc_map_at_1000_max value: 32.226436659092606 - type: nauc_map_at_1000_std value: 32.390057319103434 - type: nauc_map_at_100_diff1 value: 29.172730549866234 - type: nauc_map_at_100_max value: 29.817348287817374 - type: nauc_map_at_100_std value: 29.349606482017794 - type: nauc_map_at_10_diff1 value: 35.06682468647057 - type: nauc_map_at_10_max value: 15.73804338889845 - type: nauc_map_at_10_std value: 12.738434634857859 - type: nauc_map_at_1_diff1 value: 49.44672062682341 - type: nauc_map_at_1_max value: 9.108021053145999 - type: nauc_map_at_1_std value: 8.096530686738642 - type: nauc_map_at_20_diff1 value: 33.64466083348261 - type: nauc_map_at_20_max value: 20.67820042174935 - type: nauc_map_at_20_std value: 19.34342798319318 - type: nauc_map_at_3_diff1 value: 36.795254128901135 - type: nauc_map_at_3_max value: 11.406998619880177 - type: nauc_map_at_3_std value: 8.489329588300814 - type: nauc_map_at_5_diff1 value: 36.855256763074294 - type: nauc_map_at_5_max value: 10.22666386541562 - type: nauc_map_at_5_std value: 7.272833436058382 - type: nauc_mrr_at_1000_diff1 value: 31.572520314511564 - type: nauc_mrr_at_1000_max value: 42.16995158368953 - type: nauc_mrr_at_1000_std value: 27.506635088154184 - type: nauc_mrr_at_100_diff1 value: 31.56423858862792 - type: nauc_mrr_at_100_max value: 42.16892415660271 - type: nauc_mrr_at_100_std value: 27.51447071368099 - type: nauc_mrr_at_10_diff1 value: 31.63721607115172 - type: nauc_mrr_at_10_max value: 42.01293195936492 - type: nauc_mrr_at_10_std value: 27.0714657875347 - type: nauc_mrr_at_1_diff1 value: 36.202046858908304 - type: nauc_mrr_at_1_max value: 38.823056127703396 - type: nauc_mrr_at_1_std value: 27.477471678681773 - type: nauc_mrr_at_20_diff1 value: 31.59291226163362 - type: nauc_mrr_at_20_max value: 42.240404025831914 - type: nauc_mrr_at_20_std value: 27.362294564659372 - type: nauc_mrr_at_3_diff1 value: 31.307828212578166 - type: nauc_mrr_at_3_max value: 42.67244565551664 - type: nauc_mrr_at_3_std value: 28.242033623242047 - type: nauc_mrr_at_5_diff1 value: 31.409186160018248 - type: nauc_mrr_at_5_max value: 41.848432556735524 - type: nauc_mrr_at_5_std value: 27.25145068724168 - type: nauc_ndcg_at_1000_diff1 value: 28.58616801626832 - type: nauc_ndcg_at_1000_max value: 40.560358072075104 - type: nauc_ndcg_at_1000_std value: 44.52955598541086 - type: nauc_ndcg_at_100_diff1 value: 27.9027071752844 - type: nauc_ndcg_at_100_max value: 34.20518605893189 - type: nauc_ndcg_at_100_std value: 36.130587066580205 - type: nauc_ndcg_at_10_diff1 value: 31.706197437847916 - type: nauc_ndcg_at_10_max value: 33.00607271012289 - type: nauc_ndcg_at_10_std value: 26.184672647158347 - type: nauc_ndcg_at_1_diff1 value: 34.95605226057856 - type: nauc_ndcg_at_1_max value: 31.214812707058798 - type: nauc_ndcg_at_1_std value: 21.85376867802022 - type: nauc_ndcg_at_20_diff1 value: 32.53139289159915 - type: nauc_ndcg_at_20_max value: 31.313355682963522 - type: nauc_ndcg_at_20_std value: 28.290485585471224 - type: nauc_ndcg_at_3_diff1 value: 27.952859333877143 - type: nauc_ndcg_at_3_max value: 36.41737019580803 - type: nauc_ndcg_at_3_std value: 25.907169391112916 - type: nauc_ndcg_at_5_diff1 value: 30.20489831357784 - type: nauc_ndcg_at_5_max value: 33.148015717542684 - type: nauc_ndcg_at_5_std value: 24.36156451865898 - type: nauc_precision_at_1000_diff1 value: -1.7693261994646308 - type: nauc_precision_at_1000_max value: 28.787263157708143 - type: nauc_precision_at_1000_std value: 27.551129796148842 - type: nauc_precision_at_100_diff1 value: 3.3116909318263423 - type: nauc_precision_at_100_max value: 48.74609450323826 - type: nauc_precision_at_100_std value: 43.954871932959584 - type: nauc_precision_at_10_diff1 value: 13.17227781231001 - type: nauc_precision_at_10_max value: 46.04619732652983 - type: nauc_precision_at_10_std value: 35.724316499219306 - type: nauc_precision_at_1_diff1 value: 36.202046858908304 - type: nauc_precision_at_1_max value: 38.823056127703396 - type: nauc_precision_at_1_std value: 27.477471678681773 - type: nauc_precision_at_20_diff1 value: 11.391896150500306 - type: nauc_precision_at_20_max value: 47.97574937213996 - type: nauc_precision_at_20_std value: 41.67941698772898 - type: nauc_precision_at_3_diff1 value: 15.947958903421853 - type: nauc_precision_at_3_max value: 43.21770613997525 - type: nauc_precision_at_3_std value: 29.198917951549987 - type: nauc_precision_at_5_diff1 value: 16.15687539160957 - type: nauc_precision_at_5_max value: 41.124004540776674 - type: nauc_precision_at_5_std value: 29.031535455608964 - type: nauc_recall_at_1000_diff1 value: 14.137908608289335 - type: nauc_recall_at_1000_max value: 24.246953345948455 - type: nauc_recall_at_1000_std value: 47.46111251689648 - type: nauc_recall_at_100_diff1 value: 13.773797590605948 - type: nauc_recall_at_100_max value: 18.33493637225964 - type: nauc_recall_at_100_std value: 30.815358014011053 - type: nauc_recall_at_10_diff1 value: 27.45316418783937 - type: nauc_recall_at_10_max value: 1.9618283613424727 - type: nauc_recall_at_10_std value: 2.2997275629360456 - type: nauc_recall_at_1_diff1 value: 49.44672062682341 - type: nauc_recall_at_1_max value: 9.108021053145999 - type: nauc_recall_at_1_std value: 8.096530686738642 - type: nauc_recall_at_20_diff1 value: 26.40815140945712 - type: nauc_recall_at_20_max value: 5.872192792550555 - type: nauc_recall_at_20_std value: 11.956420632395973 - type: nauc_recall_at_3_diff1 value: 27.55496529406887 - type: nauc_recall_at_3_max value: 5.338125726954482 - type: nauc_recall_at_3_std value: 3.7449938483594347 - type: nauc_recall_at_5_diff1 value: 29.567743062877106 - type: nauc_recall_at_5_max value: -0.8459501701113746 - type: nauc_recall_at_5_std value: -2.0261976816270897 - type: ndcg_at_1 value: 29.125 - type: ndcg_at_10 value: 23.145 - type: ndcg_at_100 value: 26.692 - type: ndcg_at_1000 value: 33.139 - type: ndcg_at_20 value: 22.78 - type: ndcg_at_3 value: 24.966 - type: ndcg_at_5 value: 23.663999999999998 - type: precision_at_1 value: 40.0 - type: precision_at_10 value: 20.849999999999998 - type: precision_at_100 value: 6.6530000000000005 - type: precision_at_1000 value: 1.383 - type: precision_at_20 value: 15.637 - type: precision_at_3 value: 30.667 - type: precision_at_5 value: 25.45 - type: recall_at_1 value: 3.816 - type: recall_at_10 value: 14.572 - type: recall_at_100 value: 34.348 - type: recall_at_1000 value: 56.459 - type: recall_at_20 value: 19.473 - type: recall_at_3 value: 7.35 - type: recall_at_5 value: 10.223 - task: type: Classification dataset: name: MTEB EmotionClassification (default) type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 43.015 - type: f1 value: 39.81378380529203 - type: f1_weighted value: 45.04126898767841 - type: main_score value: 43.015 - task: type: Retrieval dataset: name: MTEB FEVER (default) type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: main_score value: 28.729 - type: map_at_1 value: 15.290999999999999 - type: map_at_10 value: 23.576 - type: map_at_100 value: 24.696 - type: map_at_1000 value: 24.77 - type: map_at_20 value: 24.236 - type: map_at_3 value: 20.851 - type: map_at_5 value: 22.346 - type: mrr_at_1 value: 16.26162616261626 - type: mrr_at_10 value: 24.968234918729884 - type: mrr_at_100 value: 26.089931360025375 - type: mrr_at_1000 value: 26.155198368317695 - type: mrr_at_20 value: 25.641282363239036 - type: mrr_at_3 value: 22.07970797079703 - type: mrr_at_5 value: 23.684118411841187 - type: nauc_map_at_1000_diff1 value: 21.192175069318054 - type: nauc_map_at_1000_max value: 3.7572640862709243 - type: nauc_map_at_1000_std value: -10.714472199356356 - type: nauc_map_at_100_diff1 value: 21.186561403482305 - type: nauc_map_at_100_max value: 3.7545931107844868 - type: nauc_map_at_100_std value: -10.736961653022373 - type: nauc_map_at_10_diff1 value: 21.199850480482336 - type: nauc_map_at_10_max value: 3.3922238981131154 - type: nauc_map_at_10_std value: -11.30854047860436 - type: nauc_map_at_1_diff1 value: 27.359821763360536 - type: nauc_map_at_1_max value: 2.001288237582939 - type: nauc_map_at_1_std value: -14.25657635217778 - type: nauc_map_at_20_diff1 value: 21.177640873323206 - type: nauc_map_at_20_max value: 3.6497767620761783 - type: nauc_map_at_20_std value: -10.959538676164254 - type: nauc_map_at_3_diff1 value: 22.134097918320307 - type: nauc_map_at_3_max value: 2.758483257089319 - type: nauc_map_at_3_std value: -12.130220199113667 - type: nauc_map_at_5_diff1 value: 21.651581914816173 - type: nauc_map_at_5_max value: 3.30026419290027 - type: nauc_map_at_5_std value: -11.621659161118043 - type: nauc_mrr_at_1000_diff1 value: 21.05636367084882 - type: nauc_mrr_at_1000_max value: 3.8589634897191067 - type: nauc_mrr_at_1000_std value: -10.768804329214358 - type: nauc_mrr_at_100_diff1 value: 21.04385724589245 - type: nauc_mrr_at_100_max value: 3.870899879060927 - type: nauc_mrr_at_100_std value: -10.772030879899033 - type: nauc_mrr_at_10_diff1 value: 20.991417472205097 - type: nauc_mrr_at_10_max value: 3.511481751112349 - type: nauc_mrr_at_10_std value: -11.28741069984751 - type: nauc_mrr_at_1_diff1 value: 27.028372555750945 - type: nauc_mrr_at_1_max value: 1.8894486155746377 - type: nauc_mrr_at_1_std value: -14.273316378936693 - type: nauc_mrr_at_20_diff1 value: 21.002813934230556 - type: nauc_mrr_at_20_max value: 3.7727440130029932 - type: nauc_mrr_at_20_std value: -10.939766997821094 - type: nauc_mrr_at_3_diff1 value: 21.846070479237994 - type: nauc_mrr_at_3_max value: 2.842770092609676 - type: nauc_mrr_at_3_std value: -12.084295513186706 - type: nauc_mrr_at_5_diff1 value: 21.343782321566753 - type: nauc_mrr_at_5_max value: 3.4543962205661534 - type: nauc_mrr_at_5_std value: -11.563405912443502 - type: nauc_ndcg_at_1000_diff1 value: 19.24127844476728 - type: nauc_ndcg_at_1000_max value: 5.824599733811995 - type: nauc_ndcg_at_1000_std value: -6.776584277160208 - type: nauc_ndcg_at_100_diff1 value: 19.055257356060654 - type: nauc_ndcg_at_100_max value: 5.915633098037283 - type: nauc_ndcg_at_100_std value: -7.0611765517376535 - type: nauc_ndcg_at_10_diff1 value: 18.940447811371943 - type: nauc_ndcg_at_10_max value: 4.312700145067062 - type: nauc_ndcg_at_10_std value: -9.74854649500064 - type: nauc_ndcg_at_1_diff1 value: 27.028372555750945 - type: nauc_ndcg_at_1_max value: 1.8894486155746377 - type: nauc_ndcg_at_1_std value: -14.273316378936693 - type: nauc_ndcg_at_20_diff1 value: 18.932440328384416 - type: nauc_ndcg_at_20_max value: 5.210341483050613 - type: nauc_ndcg_at_20_std value: -8.500581748677142 - type: nauc_ndcg_at_3_diff1 value: 20.663097813901807 - type: nauc_ndcg_at_3_max value: 3.1324449228046842 - type: nauc_ndcg_at_3_std value: -11.403873150784904 - type: nauc_ndcg_at_5_diff1 value: 19.869732661093124 - type: nauc_ndcg_at_5_max value: 4.089322170378127 - type: nauc_ndcg_at_5_std value: -10.520137341697561 - type: nauc_precision_at_1000_diff1 value: 1.80906141434611 - type: nauc_precision_at_1000_max value: 17.65531772338905 - type: nauc_precision_at_1000_std value: 20.531912555908068 - type: nauc_precision_at_100_diff1 value: 8.972396603865617 - type: nauc_precision_at_100_max value: 15.782076588230645 - type: nauc_precision_at_100_std value: 10.544041380585218 - type: nauc_precision_at_10_diff1 value: 12.90572270438589 - type: nauc_precision_at_10_max value: 7.055912366089616 - type: nauc_precision_at_10_std value: -5.583590637914298 - type: nauc_precision_at_1_diff1 value: 27.028372555750945 - type: nauc_precision_at_1_max value: 1.8894486155746377 - type: nauc_precision_at_1_std value: -14.273316378936693 - type: nauc_precision_at_20_diff1 value: 12.001328323834533 - type: nauc_precision_at_20_max value: 10.577479186116202 - type: nauc_precision_at_20_std value: -0.5309086656129618 - type: nauc_precision_at_3_diff1 value: 17.128361177835814 - type: nauc_precision_at_3_max value: 4.189243523287345 - type: nauc_precision_at_3_std value: -9.653026091540033 - type: nauc_precision_at_5_diff1 value: 15.617440848702657 - type: nauc_precision_at_5_max value: 6.362830737351831 - type: nauc_precision_at_5_std value: -7.935703214287934 - type: nauc_recall_at_1000_diff1 value: 8.273650271469275 - type: nauc_recall_at_1000_max value: 18.539144354093576 - type: nauc_recall_at_1000_std value: 25.237168569016006 - type: nauc_recall_at_100_diff1 value: 11.317131164829458 - type: nauc_recall_at_100_max value: 14.217504930349332 - type: nauc_recall_at_100_std value: 9.361412059431458 - type: nauc_recall_at_10_diff1 value: 12.921497120514802 - type: nauc_recall_at_10_max value: 6.267626827850552 - type: nauc_recall_at_10_std value: -5.482507135511354 - type: nauc_recall_at_1_diff1 value: 27.359821763360536 - type: nauc_recall_at_1_max value: 2.001288237582939 - type: nauc_recall_at_1_std value: -14.25657635217778 - type: nauc_recall_at_20_diff1 value: 12.566385176965325 - type: nauc_recall_at_20_max value: 9.263362929577985 - type: nauc_recall_at_20_std value: -0.9767007909018534 - type: nauc_recall_at_3_diff1 value: 17.13833238669028 - type: nauc_recall_at_3_max value: 3.9556239580093964 - type: nauc_recall_at_3_std value: -9.484971760875943 - type: nauc_recall_at_5_diff1 value: 15.408008879396792 - type: nauc_recall_at_5_max value: 5.733443694979962 - type: nauc_recall_at_5_std value: -7.619618998886788 - type: ndcg_at_1 value: 16.262 - type: ndcg_at_10 value: 28.729 - type: ndcg_at_100 value: 34.407 - type: ndcg_at_1000 value: 36.403 - type: ndcg_at_20 value: 31.115 - type: ndcg_at_3 value: 23.025000000000002 - type: ndcg_at_5 value: 25.744 - type: precision_at_1 value: 16.262 - type: precision_at_10 value: 4.727 - type: precision_at_100 value: 0.775 - type: precision_at_1000 value: 0.096 - type: precision_at_20 value: 2.8819999999999997 - type: precision_at_3 value: 10.016 - type: precision_at_5 value: 7.449999999999999 - type: recall_at_1 value: 15.290999999999999 - type: recall_at_10 value: 43.646 - type: recall_at_100 value: 70.113 - type: recall_at_1000 value: 85.48 - type: recall_at_20 value: 52.821 - type: recall_at_3 value: 28.107 - type: recall_at_5 value: 34.61 - task: type: Retrieval dataset: name: MTEB FiQA2018 (default) type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: main_score value: 15.595 - type: map_at_1 value: 6.565 - type: map_at_10 value: 11.108 - type: map_at_100 value: 12.235999999999999 - type: map_at_1000 value: 12.443 - type: map_at_20 value: 11.711 - type: map_at_3 value: 9.344 - type: map_at_5 value: 10.091 - type: mrr_at_1 value: 13.271604938271606 - type: mrr_at_10 value: 19.662820889672748 - type: mrr_at_100 value: 20.682598865638496 - type: mrr_at_1000 value: 20.78404255147132 - type: mrr_at_20 value: 20.23313871116589 - type: mrr_at_3 value: 17.59259259259259 - type: mrr_at_5 value: 18.441358024691358 - type: nauc_map_at_1000_diff1 value: 22.921079244677138 - type: nauc_map_at_1000_max value: 1.6123187106312153 - type: nauc_map_at_1000_std value: -5.495269253922846 - type: nauc_map_at_100_diff1 value: 22.914921168866325 - type: nauc_map_at_100_max value: 1.3968600648831495 - type: nauc_map_at_100_std value: -5.645931216513478 - type: nauc_map_at_10_diff1 value: 22.612160253584886 - type: nauc_map_at_10_max value: 1.1234092397110391 - type: nauc_map_at_10_std value: -6.784494108123146 - type: nauc_map_at_1_diff1 value: 31.272282929056605 - type: nauc_map_at_1_max value: 6.799912979421051 - type: nauc_map_at_1_std value: -6.6031558209382055 - type: nauc_map_at_20_diff1 value: 22.8259258640759 - type: nauc_map_at_20_max value: 0.7432100826367205 - type: nauc_map_at_20_std value: -6.146953223038562 - type: nauc_map_at_3_diff1 value: 22.955603944226706 - type: nauc_map_at_3_max value: 2.2477594004270545 - type: nauc_map_at_3_std value: -7.575225547389122 - type: nauc_map_at_5_diff1 value: 22.56932712155024 - type: nauc_map_at_5_max value: 0.4077562548808868 - type: nauc_map_at_5_std value: -8.552137311095203 - type: nauc_mrr_at_1000_diff1 value: 19.80572361705586 - type: nauc_mrr_at_1000_max value: 1.6812092889541859 - type: nauc_mrr_at_1000_std value: -10.491306348526788 - type: nauc_mrr_at_100_diff1 value: 19.85259350388941 - type: nauc_mrr_at_100_max value: 1.6561000518863531 - type: nauc_mrr_at_100_std value: -10.480690401152904 - type: nauc_mrr_at_10_diff1 value: 19.533536862313408 - type: nauc_mrr_at_10_max value: 1.3995106916927087 - type: nauc_mrr_at_10_std value: -10.75185322467088 - type: nauc_mrr_at_1_diff1 value: 24.700601379046265 - type: nauc_mrr_at_1_max value: 3.393973322489269 - type: nauc_mrr_at_1_std value: -12.605333867765248 - type: nauc_mrr_at_20_diff1 value: 19.762509896286083 - type: nauc_mrr_at_20_max value: 1.3832552477068305 - type: nauc_mrr_at_20_std value: -10.61069673947179 - type: nauc_mrr_at_3_diff1 value: 19.142548681936596 - type: nauc_mrr_at_3_max value: 1.1527317829234591 - type: nauc_mrr_at_3_std value: -12.50027225748061 - type: nauc_mrr_at_5_diff1 value: 19.16019795231953 - type: nauc_mrr_at_5_max value: 0.6582098550091133 - type: nauc_mrr_at_5_std value: -11.95332987534088 - type: nauc_ndcg_at_1000_diff1 value: 21.20900884316552 - type: nauc_ndcg_at_1000_max value: 5.769615210126322 - type: nauc_ndcg_at_1000_std value: -0.839672047917041 - type: nauc_ndcg_at_100_diff1 value: 22.01841135054075 - type: nauc_ndcg_at_100_max value: 2.899132697881292 - type: nauc_ndcg_at_100_std value: -2.3305289486175984 - type: nauc_ndcg_at_10_diff1 value: 20.846375271379397 - type: nauc_ndcg_at_10_max value: 0.6075110546301129 - type: nauc_ndcg_at_10_std value: -6.56829544221122 - type: nauc_ndcg_at_1_diff1 value: 24.700601379046265 - type: nauc_ndcg_at_1_max value: 3.393973322489269 - type: nauc_ndcg_at_1_std value: -12.605333867765248 - type: nauc_ndcg_at_20_diff1 value: 21.516279742096213 - type: nauc_ndcg_at_20_max value: -0.08329822682709122 - type: nauc_ndcg_at_20_std value: -4.798485582049851 - type: nauc_ndcg_at_3_diff1 value: 19.632146936243853 - type: nauc_ndcg_at_3_max value: 1.0809843056666528 - type: nauc_ndcg_at_3_std value: -10.872897003746244 - type: nauc_ndcg_at_5_diff1 value: 20.15495897120557 - type: nauc_ndcg_at_5_max value: -0.8751859547516859 - type: nauc_ndcg_at_5_std value: -10.501974053604677 - type: nauc_precision_at_1000_diff1 value: 3.0998891770528743 - type: nauc_precision_at_1000_max value: 18.702259209681475 - type: nauc_precision_at_1000_std value: 4.847633093599936 - type: nauc_precision_at_100_diff1 value: 13.870790241031658 - type: nauc_precision_at_100_max value: 11.381893073925617 - type: nauc_precision_at_100_std value: 3.3997110967319424 - type: nauc_precision_at_10_diff1 value: 17.08241656155363 - type: nauc_precision_at_10_max value: 1.043655487315398 - type: nauc_precision_at_10_std value: -6.061446632702096 - type: nauc_precision_at_1_diff1 value: 24.700601379046265 - type: nauc_precision_at_1_max value: 3.393973322489269 - type: nauc_precision_at_1_std value: -12.605333867765248 - type: nauc_precision_at_20_diff1 value: 15.561816608848774 - type: nauc_precision_at_20_max value: 1.4387105640001014 - type: nauc_precision_at_20_std value: -2.3148485240364107 - type: nauc_precision_at_3_diff1 value: 15.791636673970427 - type: nauc_precision_at_3_max value: -0.48953847896803343 - type: nauc_precision_at_3_std value: -14.466917751105074 - type: nauc_precision_at_5_diff1 value: 14.407013696150702 - type: nauc_precision_at_5_max value: -3.623633621758513 - type: nauc_precision_at_5_std value: -13.679893638323668 - type: nauc_recall_at_1000_diff1 value: 14.095191934326234 - type: nauc_recall_at_1000_max value: 16.426400937089994 - type: nauc_recall_at_1000_std value: 21.29168932874166 - type: nauc_recall_at_100_diff1 value: 18.750731459551403 - type: nauc_recall_at_100_max value: 4.344246341500438 - type: nauc_recall_at_100_std value: 9.195285873556298 - type: nauc_recall_at_10_diff1 value: 16.8302080344721 - type: nauc_recall_at_10_max value: -1.2351359259281107 - type: nauc_recall_at_10_std value: -2.6278258890582484 - type: nauc_recall_at_1_diff1 value: 31.272282929056605 - type: nauc_recall_at_1_max value: 6.799912979421051 - type: nauc_recall_at_1_std value: -6.6031558209382055 - type: nauc_recall_at_20_diff1 value: 18.562953757787838 - type: nauc_recall_at_20_max value: -3.301819119883209 - type: nauc_recall_at_20_std value: 1.4940250110385407 - type: nauc_recall_at_3_diff1 value: 17.150794119645003 - type: nauc_recall_at_3_max value: -1.7201601188087563 - type: nauc_recall_at_3_std value: -8.43546725991927 - type: nauc_recall_at_5_diff1 value: 17.363347352192303 - type: nauc_recall_at_5_max value: -4.9737540456308755 - type: nauc_recall_at_5_std value: -9.746324422299473 - type: ndcg_at_1 value: 13.272 - type: ndcg_at_10 value: 15.595 - type: ndcg_at_100 value: 21.11 - type: ndcg_at_1000 value: 25.602999999999998 - type: ndcg_at_20 value: 17.577 - type: ndcg_at_3 value: 12.901000000000002 - type: ndcg_at_5 value: 13.489999999999998 - type: precision_at_1 value: 13.272 - type: precision_at_10 value: 4.552 - type: precision_at_100 value: 0.9809999999999999 - type: precision_at_1000 value: 0.17700000000000002 - type: precision_at_20 value: 3.048 - type: precision_at_3 value: 8.539 - type: precision_at_5 value: 6.389 - type: recall_at_1 value: 6.565 - type: recall_at_10 value: 20.535 - type: recall_at_100 value: 42.378 - type: recall_at_1000 value: 69.754 - type: recall_at_20 value: 26.88 - type: recall_at_3 value: 12.228 - type: recall_at_5 value: 14.625 - task: type: Retrieval dataset: name: MTEB HotpotQA (default) type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: main_score value: 37.478 - type: map_at_1 value: 21.695 - type: map_at_10 value: 29.651 - type: map_at_100 value: 30.563000000000002 - type: map_at_1000 value: 30.663 - type: map_at_20 value: 30.144 - type: map_at_3 value: 27.378999999999998 - type: map_at_5 value: 28.685 - type: mrr_at_1 value: 43.38960162052667 - type: mrr_at_10 value: 50.7335991340044 - type: mrr_at_100 value: 51.38613386358546 - type: mrr_at_1000 value: 51.4311096325992 - type: mrr_at_20 value: 51.11742428497492 - type: mrr_at_3 value: 48.82061670042784 - type: mrr_at_5 value: 49.94215620076533 - type: nauc_map_at_1000_diff1 value: 47.09534641484041 - type: nauc_map_at_1000_max value: 25.84416946902351 - type: nauc_map_at_1000_std value: 6.22430525152719 - type: nauc_map_at_100_diff1 value: 47.10264559039403 - type: nauc_map_at_100_max value: 25.829196587016916 - type: nauc_map_at_100_std value: 6.167582046246078 - type: nauc_map_at_10_diff1 value: 47.54215994617424 - type: nauc_map_at_10_max value: 25.741877794411224 - type: nauc_map_at_10_std value: 5.294699714891394 - type: nauc_map_at_1_diff1 value: 61.37553174317903 - type: nauc_map_at_1_max value: 28.314612538409715 - type: nauc_map_at_1_std value: 0.6565168608453245 - type: nauc_map_at_20_diff1 value: 47.32504591067196 - type: nauc_map_at_20_max value: 25.862432005549124 - type: nauc_map_at_20_std value: 5.8326652929299945 - type: nauc_map_at_3_diff1 value: 49.72129017170202 - type: nauc_map_at_3_max value: 26.31692918544824 - type: nauc_map_at_3_std value: 3.739704581459523 - type: nauc_map_at_5_diff1 value: 48.415053585747245 - type: nauc_map_at_5_max value: 25.781193661521396 - type: nauc_map_at_5_std value: 4.407681606999509 - type: nauc_mrr_at_1000_diff1 value: 58.32269379788515 - type: nauc_mrr_at_1000_max value: 27.915030639675535 - type: nauc_mrr_at_1000_std value: 3.2598345967531666 - type: nauc_mrr_at_100_diff1 value: 58.30993970174152 - type: nauc_mrr_at_100_max value: 27.916429768832625 - type: nauc_mrr_at_100_std value: 3.2688330131702594 - type: nauc_mrr_at_10_diff1 value: 58.355306982656586 - type: nauc_mrr_at_10_max value: 27.909701715855157 - type: nauc_mrr_at_10_std value: 3.016978210937183 - type: nauc_mrr_at_1_diff1 value: 61.37553174317903 - type: nauc_mrr_at_1_max value: 28.314612538409715 - type: nauc_mrr_at_1_std value: 0.6565168608453245 - type: nauc_mrr_at_20_diff1 value: 58.34633689657052 - type: nauc_mrr_at_20_max value: 27.929286473107496 - type: nauc_mrr_at_20_std value: 3.211337498366748 - type: nauc_mrr_at_3_diff1 value: 58.74568991079614 - type: nauc_mrr_at_3_max value: 28.12400510630213 - type: nauc_mrr_at_3_std value: 2.1802465826138673 - type: nauc_mrr_at_5_diff1 value: 58.39649035584997 - type: nauc_mrr_at_5_max value: 27.941440908365305 - type: nauc_mrr_at_5_std value: 2.6184419683046434 - type: nauc_ndcg_at_1000_diff1 value: 46.48287782676621 - type: nauc_ndcg_at_1000_max value: 26.09493353610149 - type: nauc_ndcg_at_1000_std value: 10.303657041560072 - type: nauc_ndcg_at_100_diff1 value: 46.517833835714185 - type: nauc_ndcg_at_100_max value: 25.73531982596624 - type: nauc_ndcg_at_100_std value: 9.399497932066883 - type: nauc_ndcg_at_10_diff1 value: 48.40399915446156 - type: nauc_ndcg_at_10_max value: 25.794094279378715 - type: nauc_ndcg_at_10_std value: 6.301783424981204 - type: nauc_ndcg_at_1_diff1 value: 61.37553174317903 - type: nauc_ndcg_at_1_max value: 28.314612538409715 - type: nauc_ndcg_at_1_std value: 0.6565168608453245 - type: nauc_ndcg_at_20_diff1 value: 47.80325328797878 - type: nauc_ndcg_at_20_max value: 26.012852901587113 - type: nauc_ndcg_at_20_std value: 7.721252596952678 - type: nauc_ndcg_at_3_diff1 value: 51.52453349541243 - type: nauc_ndcg_at_3_max value: 26.638522344434378 - type: nauc_ndcg_at_3_std value: 3.7194855038248917 - type: nauc_ndcg_at_5_diff1 value: 49.78137048541129 - type: nauc_ndcg_at_5_max value: 25.91490881872891 - type: nauc_ndcg_at_5_std value: 4.725975666940652 - type: nauc_precision_at_1000_diff1 value: 13.140210373077519 - type: nauc_precision_at_1000_max value: 17.08484696343514 - type: nauc_precision_at_1000_std value: 30.14820694651348 - type: nauc_precision_at_100_diff1 value: 21.55654916743866 - type: nauc_precision_at_100_max value: 17.50194659200474 - type: nauc_precision_at_100_std value: 21.51279344954705 - type: nauc_precision_at_10_diff1 value: 35.330596257649034 - type: nauc_precision_at_10_max value: 21.58835648977114 - type: nauc_precision_at_10_std value: 10.948965785333792 - type: nauc_precision_at_1_diff1 value: 61.37553174317903 - type: nauc_precision_at_1_max value: 28.314612538409715 - type: nauc_precision_at_1_std value: 0.6565168608453245 - type: nauc_precision_at_20_diff1 value: 31.105334918303583 - type: nauc_precision_at_20_max value: 20.93031217218494 - type: nauc_precision_at_20_std value: 14.943165224963831 - type: nauc_precision_at_3_diff1 value: 45.20407780117561 - type: nauc_precision_at_3_max value: 24.97354701555406 - type: nauc_precision_at_3_std value: 5.354478163201348 - type: nauc_precision_at_5_diff1 value: 40.196131824936515 - type: nauc_precision_at_5_max value: 22.631237478251986 - type: nauc_precision_at_5_std value: 7.200977485797791 - type: nauc_recall_at_1000_diff1 value: 13.140210373077565 - type: nauc_recall_at_1000_max value: 17.08484696343504 - type: nauc_recall_at_1000_std value: 30.14820694651354 - type: nauc_recall_at_100_diff1 value: 21.556549167438675 - type: nauc_recall_at_100_max value: 17.501946592004693 - type: nauc_recall_at_100_std value: 21.512793449547083 - type: nauc_recall_at_10_diff1 value: 35.330596257649006 - type: nauc_recall_at_10_max value: 21.588356489771147 - type: nauc_recall_at_10_std value: 10.948965785333794 - type: nauc_recall_at_1_diff1 value: 61.37553174317903 - type: nauc_recall_at_1_max value: 28.314612538409715 - type: nauc_recall_at_1_std value: 0.6565168608453245 - type: nauc_recall_at_20_diff1 value: 31.105334918303562 - type: nauc_recall_at_20_max value: 20.930312172184966 - type: nauc_recall_at_20_std value: 14.943165224963828 - type: nauc_recall_at_3_diff1 value: 45.20407780117561 - type: nauc_recall_at_3_max value: 24.97354701555401 - type: nauc_recall_at_3_std value: 5.354478163201327 - type: nauc_recall_at_5_diff1 value: 40.19613182493655 - type: nauc_recall_at_5_max value: 22.63123747825198 - type: nauc_recall_at_5_std value: 7.2009774857978295 - type: ndcg_at_1 value: 43.39 - type: ndcg_at_10 value: 37.478 - type: ndcg_at_100 value: 41.535 - type: ndcg_at_1000 value: 43.832 - type: ndcg_at_20 value: 38.994 - type: ndcg_at_3 value: 33.407 - type: ndcg_at_5 value: 35.475 - type: precision_at_1 value: 43.39 - type: precision_at_10 value: 8.051 - type: precision_at_100 value: 1.129 - type: precision_at_1000 value: 0.14300000000000002 - type: precision_at_20 value: 4.5120000000000005 - type: precision_at_3 value: 20.756 - type: precision_at_5 value: 14.096 - type: recall_at_1 value: 21.695 - type: recall_at_10 value: 40.257 - type: recall_at_100 value: 56.435 - type: recall_at_1000 value: 71.708 - type: recall_at_20 value: 45.125 - type: recall_at_3 value: 31.134 - type: recall_at_5 value: 35.24 - task: type: Classification dataset: name: MTEB ImdbClassification (default) type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 66.62440000000001 - type: ap value: 61.38791189713465 - type: ap_weighted value: 61.38791189713465 - type: f1 value: 66.13197122736734 - type: f1_weighted value: 66.13197122736733 - type: main_score value: 66.62440000000001 - task: type: Retrieval dataset: name: MTEB MSMARCO (default) type: mteb/msmarco config: default split: test revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: main_score value: 32.695 - type: map_at_1 value: 1.0 - type: map_at_10 value: 5.678 - type: map_at_100 value: 14.411999999999999 - type: map_at_1000 value: 18.412 - type: map_at_20 value: 8.49 - type: map_at_3 value: 2.2399999999999998 - type: map_at_5 value: 3.473 - type: mrr_at_1 value: 58.139534883720934 - type: mrr_at_10 value: 68.02325581395348 - type: mrr_at_100 value: 68.14220796578454 - type: mrr_at_1000 value: 68.16736545321 - type: mrr_at_20 value: 68.02325581395348 - type: mrr_at_3 value: 67.05426356589146 - type: mrr_at_5 value: 67.63565891472867 - type: nauc_map_at_1000_diff1 value: 10.081357169406843 - type: nauc_map_at_1000_max value: 65.52977943545287 - type: nauc_map_at_1000_std value: 60.56148146283225 - type: nauc_map_at_100_diff1 value: 8.901740928252257 - type: nauc_map_at_100_max value: 56.33594426005263 - type: nauc_map_at_100_std value: 51.85557415677924 - type: nauc_map_at_10_diff1 value: 4.764613289448945 - type: nauc_map_at_10_max value: 27.05251746701552 - type: nauc_map_at_10_std value: 24.729593218134063 - type: nauc_map_at_1_diff1 value: 18.949682046838028 - type: nauc_map_at_1_max value: 8.398832694812572 - type: nauc_map_at_1_std value: 9.158568546378119 - type: nauc_map_at_20_diff1 value: 4.35163781972467 - type: nauc_map_at_20_max value: 29.39315596036044 - type: nauc_map_at_20_std value: 28.20413648253673 - type: nauc_map_at_3_diff1 value: 14.426703941475743 - type: nauc_map_at_3_max value: 19.528085026047183 - type: nauc_map_at_3_std value: 19.19524526218501 - type: nauc_map_at_5_diff1 value: 8.542407271574234 - type: nauc_map_at_5_max value: 21.96571612444345 - type: nauc_map_at_5_std value: 19.562339269650337 - type: nauc_mrr_at_1000_diff1 value: 25.802609799300136 - type: nauc_mrr_at_1000_max value: 58.43325582968235 - type: nauc_mrr_at_1000_std value: 50.255275358008355 - type: nauc_mrr_at_100_diff1 value: 25.813430117639385 - type: nauc_mrr_at_100_max value: 58.440260343467386 - type: nauc_mrr_at_100_std value: 50.29567683948686 - type: nauc_mrr_at_10_diff1 value: 25.928537487080177 - type: nauc_mrr_at_10_max value: 58.624781011353356 - type: nauc_mrr_at_10_std value: 50.149362684438344 - type: nauc_mrr_at_1_diff1 value: 21.666838023145953 - type: nauc_mrr_at_1_max value: 48.08098031817834 - type: nauc_mrr_at_1_std value: 40.8447553136573 - type: nauc_mrr_at_20_diff1 value: 25.928537487080177 - type: nauc_mrr_at_20_max value: 58.624781011353356 - type: nauc_mrr_at_20_std value: 50.149362684438344 - type: nauc_mrr_at_3_diff1 value: 25.138742955770297 - type: nauc_mrr_at_3_max value: 58.15074557154154 - type: nauc_mrr_at_3_std value: 51.779060953730806 - type: nauc_mrr_at_5_diff1 value: 26.875742025667503 - type: nauc_mrr_at_5_max value: 58.047613429416444 - type: nauc_mrr_at_5_std value: 50.568212600486305 - type: nauc_ndcg_at_1000_diff1 value: 25.84434059592612 - type: nauc_ndcg_at_1000_max value: 66.41709602981231 - type: nauc_ndcg_at_1000_std value: 59.9002587130517 - type: nauc_ndcg_at_100_diff1 value: 18.157725071883185 - type: nauc_ndcg_at_100_max value: 62.507605378960406 - type: nauc_ndcg_at_100_std value: 55.33886660559506 - type: nauc_ndcg_at_10_diff1 value: 2.7415914051866124 - type: nauc_ndcg_at_10_max value: 55.388067307310365 - type: nauc_ndcg_at_10_std value: 42.82986062576109 - type: nauc_ndcg_at_1_diff1 value: 13.377021588510054 - type: nauc_ndcg_at_1_max value: 29.386074496105792 - type: nauc_ndcg_at_1_std value: 11.408077029215445 - type: nauc_ndcg_at_20_diff1 value: 8.326148156867626 - type: nauc_ndcg_at_20_max value: 60.697045867707686 - type: nauc_ndcg_at_20_std value: 47.99092577615938 - type: nauc_ndcg_at_3_diff1 value: 6.989449673124856 - type: nauc_ndcg_at_3_max value: 44.51332216957301 - type: nauc_ndcg_at_3_std value: 35.18696517451071 - type: nauc_ndcg_at_5_diff1 value: 4.060160624786844 - type: nauc_ndcg_at_5_max value: 51.10253293874687 - type: nauc_ndcg_at_5_std value: 39.070683399060684 - type: nauc_precision_at_1000_diff1 value: 9.50382587741955 - type: nauc_precision_at_1000_max value: 60.62439119757783 - type: nauc_precision_at_1000_std value: 56.83454176343064 - type: nauc_precision_at_100_diff1 value: 11.35723114967806 - type: nauc_precision_at_100_max value: 70.3418790427346 - type: nauc_precision_at_100_std value: 62.88335992120394 - type: nauc_precision_at_10_diff1 value: 7.871699282025061 - type: nauc_precision_at_10_max value: 66.95358829993387 - type: nauc_precision_at_10_std value: 56.28948117426722 - type: nauc_precision_at_1_diff1 value: 21.666838023145953 - type: nauc_precision_at_1_max value: 48.08098031817834 - type: nauc_precision_at_1_std value: 40.8447553136573 - type: nauc_precision_at_20_diff1 value: 11.207357488330521 - type: nauc_precision_at_20_max value: 66.93510334175923 - type: nauc_precision_at_20_std value: 57.866274868020305 - type: nauc_precision_at_3_diff1 value: 17.425252729248836 - type: nauc_precision_at_3_max value: 64.29856122616874 - type: nauc_precision_at_3_std value: 58.870231714525175 - type: nauc_precision_at_5_diff1 value: 12.645874392385444 - type: nauc_precision_at_5_max value: 66.57471189825266 - type: nauc_precision_at_5_std value: 56.0176748292857 - type: nauc_recall_at_1000_diff1 value: 42.119098502046924 - type: nauc_recall_at_1000_max value: 61.01949254784126 - type: nauc_recall_at_1000_std value: 55.85918701646594 - type: nauc_recall_at_100_diff1 value: 30.70499534789799 - type: nauc_recall_at_100_max value: 50.83303853714478 - type: nauc_recall_at_100_std value: 44.860207634175566 - type: nauc_recall_at_10_diff1 value: 10.291765213783561 - type: nauc_recall_at_10_max value: 21.84109268402226 - type: nauc_recall_at_10_std value: 17.09252188823626 - type: nauc_recall_at_1_diff1 value: 18.949682046838028 - type: nauc_recall_at_1_max value: 8.398832694812572 - type: nauc_recall_at_1_std value: 9.158568546378119 - type: nauc_recall_at_20_diff1 value: 15.927897890117283 - type: nauc_recall_at_20_max value: 23.747256386884786 - type: nauc_recall_at_20_std value: 18.662178810158274 - type: nauc_recall_at_3_diff1 value: 20.989284202667942 - type: nauc_recall_at_3_max value: 17.28520815922331 - type: nauc_recall_at_3_std value: 15.233570921492792 - type: nauc_recall_at_5_diff1 value: 14.983470564779777 - type: nauc_recall_at_5_max value: 17.209528818696583 - type: nauc_recall_at_5_std value: 11.479848062057618 - type: ndcg_at_1 value: 38.372 - type: ndcg_at_10 value: 32.695 - type: ndcg_at_100 value: 31.230000000000004 - type: ndcg_at_1000 value: 39.649 - type: ndcg_at_20 value: 31.447000000000003 - type: ndcg_at_3 value: 34.361999999999995 - type: ndcg_at_5 value: 33.702 - type: precision_at_1 value: 58.14 - type: precision_at_10 value: 43.721 - type: precision_at_100 value: 20.628 - type: precision_at_1000 value: 4.4159999999999995 - type: precision_at_20 value: 37.208999999999996 - type: precision_at_3 value: 51.163000000000004 - type: precision_at_5 value: 48.372 - type: recall_at_1 value: 1.0 - type: recall_at_10 value: 7.081999999999999 - type: recall_at_100 value: 26.572000000000003 - type: recall_at_1000 value: 50.20400000000001 - type: recall_at_20 value: 11.671 - type: recall_at_3 value: 2.596 - type: recall_at_5 value: 4.156 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 86.19471044231645 - type: f1 value: 85.27501322780665 - type: f1_weighted value: 86.42253471732333 - type: main_score value: 86.19471044231645 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 60.40355677154583 - type: f1 value: 42.23245201423625 - type: f1_weighted value: 64.52904706608977 - type: main_score value: 60.40355677154583 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 4672e20407010da34463acc759c162ca9734bca6 metrics: - type: accuracy value: 60.86079354404842 - type: f1 value: 59.86002295886139 - type: f1_weighted value: 61.600352700483704 - type: main_score value: 60.86079354404842 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 metrics: - type: accuracy value: 66.39878950907867 - type: f1 value: 66.08148670134351 - type: f1_weighted value: 66.75308329528572 - type: main_score value: 66.39878950907867 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P (default) type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: main_score value: 25.58908806090526 - type: v_measure value: 25.58908806090526 - type: v_measure_std value: 1.1067644908239649 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S (default) type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: main_score value: 20.27612317880978 - type: v_measure value: 20.27612317880978 - type: v_measure_std value: 1.4731971616119242 - task: type: Reranking dataset: name: MTEB MindSmallReranking (default) type: mteb/mind_small config: default split: test revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7 metrics: - type: main_score value: 28.939878341266613 - type: map value: 28.939878341266613 - type: mrr value: 29.639163441180692 - type: nAUC_map_diff1 value: 8.8793579187874 - type: nAUC_map_max value: -22.869629909935735 - type: nAUC_map_std value: -9.879777666990593 - type: nAUC_mrr_diff1 value: 9.219371253717831 - type: nAUC_mrr_max value: -17.019803510539926 - type: nAUC_mrr_std value: -6.648705434395337 - task: type: Retrieval dataset: name: MTEB NFCorpus (default) type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: main_score value: 22.552 - type: map_at_1 value: 3.4160000000000004 - type: map_at_10 value: 7.103 - type: map_at_100 value: 8.921999999999999 - type: map_at_1000 value: 9.976 - type: map_at_20 value: 7.807 - type: map_at_3 value: 5.3740000000000006 - type: map_at_5 value: 6.140000000000001 - type: mrr_at_1 value: 31.57894736842105 - type: mrr_at_10 value: 40.17679001425131 - type: mrr_at_100 value: 40.936950343260854 - type: mrr_at_1000 value: 40.99313849791527 - type: mrr_at_20 value: 40.532478199294225 - type: mrr_at_3 value: 37.87409700722393 - type: mrr_at_5 value: 39.22084623323012 - type: nauc_map_at_1000_diff1 value: 34.73224861867767 - type: nauc_map_at_1000_max value: 34.41772857333133 - type: nauc_map_at_1000_std value: 19.884774737807888 - type: nauc_map_at_100_diff1 value: 37.01728428035375 - type: nauc_map_at_100_max value: 33.414825195084916 - type: nauc_map_at_100_std value: 16.22181527030413 - type: nauc_map_at_10_diff1 value: 42.20565439584536 - type: nauc_map_at_10_max value: 30.666645971181296 - type: nauc_map_at_10_std value: 10.63902697253609 - type: nauc_map_at_1_diff1 value: 57.25535533467592 - type: nauc_map_at_1_max value: 25.0338718450961 - type: nauc_map_at_1_std value: 1.5109686532462137 - type: nauc_map_at_20_diff1 value: 39.971115426607945 - type: nauc_map_at_20_max value: 31.542560178492934 - type: nauc_map_at_20_std value: 12.413878932549483 - type: nauc_map_at_3_diff1 value: 46.15379275145193 - type: nauc_map_at_3_max value: 26.328980489303177 - type: nauc_map_at_3_std value: 5.10721452115037 - type: nauc_map_at_5_diff1 value: 46.44470524532154 - type: nauc_map_at_5_max value: 28.77917380032644 - type: nauc_map_at_5_std value: 7.726530419493592 - type: nauc_mrr_at_1000_diff1 value: 29.378170197712684 - type: nauc_mrr_at_1000_max value: 36.70254814804847 - type: nauc_mrr_at_1000_std value: 23.93411541050575 - type: nauc_mrr_at_100_diff1 value: 29.34620323004042 - type: nauc_mrr_at_100_max value: 36.70562654294673 - type: nauc_mrr_at_100_std value: 23.94090914510999 - type: nauc_mrr_at_10_diff1 value: 29.345134619677214 - type: nauc_mrr_at_10_max value: 36.92950804065547 - type: nauc_mrr_at_10_std value: 24.19628220924784 - type: nauc_mrr_at_1_diff1 value: 33.13330585053634 - type: nauc_mrr_at_1_max value: 30.618369537269775 - type: nauc_mrr_at_1_std value: 16.769007131224782 - type: nauc_mrr_at_20_diff1 value: 29.185086482257294 - type: nauc_mrr_at_20_max value: 36.68095260817287 - type: nauc_mrr_at_20_std value: 23.91780882920626 - type: nauc_mrr_at_3_diff1 value: 30.05197440329091 - type: nauc_mrr_at_3_max value: 35.953685760398685 - type: nauc_mrr_at_3_std value: 23.524957502759733 - type: nauc_mrr_at_5_diff1 value: 29.91745508269405 - type: nauc_mrr_at_5_max value: 37.18989075322827 - type: nauc_mrr_at_5_std value: 24.083241781489317 - type: nauc_ndcg_at_1000_diff1 value: 25.39233260296463 - type: nauc_ndcg_at_1000_max value: 40.177320929529856 - type: nauc_ndcg_at_1000_std value: 28.208928110945696 - type: nauc_ndcg_at_100_diff1 value: 24.552649164232033 - type: nauc_ndcg_at_100_max value: 36.60602229659139 - type: nauc_ndcg_at_100_std value: 26.029649121667582 - type: nauc_ndcg_at_10_diff1 value: 21.0338183814633 - type: nauc_ndcg_at_10_max value: 35.80460796981663 - type: nauc_ndcg_at_10_std value: 30.656647945229317 - type: nauc_ndcg_at_1_diff1 value: 33.341154674024686 - type: nauc_ndcg_at_1_max value: 28.871624645312167 - type: nauc_ndcg_at_1_std value: 17.58191846191065 - type: nauc_ndcg_at_20_diff1 value: 22.07221292200904 - type: nauc_ndcg_at_20_max value: 34.677067256339555 - type: nauc_ndcg_at_20_std value: 29.462105630225466 - type: nauc_ndcg_at_3_diff1 value: 23.63792366113301 - type: nauc_ndcg_at_3_max value: 33.441000011335795 - type: nauc_ndcg_at_3_std value: 25.52723257395818 - type: nauc_ndcg_at_5_diff1 value: 22.845754952633527 - type: nauc_ndcg_at_5_max value: 36.01734895823619 - type: nauc_ndcg_at_5_std value: 28.519871008836635 - type: nauc_precision_at_1000_diff1 value: -11.759314026815046 - type: nauc_precision_at_1000_max value: 16.309973886456124 - type: nauc_precision_at_1000_std value: 42.37816169817523 - type: nauc_precision_at_100_diff1 value: -8.029656826154214 - type: nauc_precision_at_100_max value: 25.331471940731316 - type: nauc_precision_at_100_std value: 44.457235673630116 - type: nauc_precision_at_10_diff1 value: 3.4946893557140797 - type: nauc_precision_at_10_max value: 36.03413341771082 - type: nauc_precision_at_10_std value: 39.58686481472833 - type: nauc_precision_at_1_diff1 value: 33.13330585053634 - type: nauc_precision_at_1_max value: 30.618369537269775 - type: nauc_precision_at_1_std value: 16.769007131224782 - type: nauc_precision_at_20_diff1 value: -0.7015547908093809 - type: nauc_precision_at_20_max value: 31.678433625540826 - type: nauc_precision_at_20_std value: 41.91391013932311 - type: nauc_precision_at_3_diff1 value: 15.741259800786864 - type: nauc_precision_at_3_max value: 35.03514751856323 - type: nauc_precision_at_3_std value: 29.399412146388926 - type: nauc_precision_at_5_diff1 value: 10.656798267400632 - type: nauc_precision_at_5_max value: 38.47044503438441 - type: nauc_precision_at_5_std value: 34.345843373967256 - type: nauc_recall_at_1000_diff1 value: 12.345126052107041 - type: nauc_recall_at_1000_max value: 20.127378174130243 - type: nauc_recall_at_1000_std value: 11.325945503912399 - type: nauc_recall_at_100_diff1 value: 18.683876129399103 - type: nauc_recall_at_100_max value: 20.479854369545283 - type: nauc_recall_at_100_std value: 8.32322214131877 - type: nauc_recall_at_10_diff1 value: 32.78583976883686 - type: nauc_recall_at_10_max value: 26.26666599537088 - type: nauc_recall_at_10_std value: 10.25062932159794 - type: nauc_recall_at_1_diff1 value: 57.25535533467592 - type: nauc_recall_at_1_max value: 25.0338718450961 - type: nauc_recall_at_1_std value: 1.5109686532462137 - type: nauc_recall_at_20_diff1 value: 26.904578553813895 - type: nauc_recall_at_20_max value: 23.54014529084009 - type: nauc_recall_at_20_std value: 7.5106508677906305 - type: nauc_recall_at_3_diff1 value: 38.01326191292155 - type: nauc_recall_at_3_max value: 23.12794360178698 - type: nauc_recall_at_3_std value: 5.615974785235754 - type: nauc_recall_at_5_diff1 value: 40.0812862656233 - type: nauc_recall_at_5_max value: 26.49961615304253 - type: nauc_recall_at_5_std value: 8.623796778044191 - type: ndcg_at_1 value: 29.720999999999997 - type: ndcg_at_10 value: 22.552 - type: ndcg_at_100 value: 20.952 - type: ndcg_at_1000 value: 29.665999999999997 - type: ndcg_at_20 value: 20.879 - type: ndcg_at_3 value: 26.008 - type: ndcg_at_5 value: 24.254 - type: precision_at_1 value: 31.579 - type: precision_at_10 value: 16.904 - type: precision_at_100 value: 5.771 - type: precision_at_1000 value: 1.807 - type: precision_at_20 value: 12.508 - type: precision_at_3 value: 24.458 - type: precision_at_5 value: 20.867 - type: recall_at_1 value: 3.4160000000000004 - type: recall_at_10 value: 10.544 - type: recall_at_100 value: 22.423000000000002 - type: recall_at_1000 value: 53.31399999999999 - type: recall_at_20 value: 13.254 - type: recall_at_3 value: 6.587 - type: recall_at_5 value: 8.004 - task: type: Retrieval dataset: name: MTEB NQ (default) type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: main_score value: 18.915000000000003 - type: map_at_1 value: 7.997999999999999 - type: map_at_10 value: 14.599 - type: map_at_100 value: 15.855 - type: map_at_1000 value: 15.956000000000001 - type: map_at_20 value: 15.304 - type: map_at_3 value: 12.084 - type: map_at_5 value: 13.406 - type: mrr_at_1 value: 9.154113557358054 - type: mrr_at_10 value: 16.148069212234912 - type: mrr_at_100 value: 17.328232730619938 - type: mrr_at_1000 value: 17.41416846980928 - type: mrr_at_20 value: 16.838894506604742 - type: mrr_at_3 value: 13.63460795674003 - type: mrr_at_5 value: 14.964271919660055 - type: nauc_map_at_1000_diff1 value: 15.90713522176542 - type: nauc_map_at_1000_max value: 14.058054318819337 - type: nauc_map_at_1000_std value: 1.71764752961226 - type: nauc_map_at_100_diff1 value: 15.888721447450294 - type: nauc_map_at_100_max value: 14.026270658328363 - type: nauc_map_at_100_std value: 1.6586834999299935 - type: nauc_map_at_10_diff1 value: 15.995890749381529 - type: nauc_map_at_10_max value: 13.337010545059295 - type: nauc_map_at_10_std value: 0.5367791374952049 - type: nauc_map_at_1_diff1 value: 21.239259347615892 - type: nauc_map_at_1_max value: 9.598569870159421 - type: nauc_map_at_1_std value: -4.583838315593807 - type: nauc_map_at_20_diff1 value: 15.956969208836384 - type: nauc_map_at_20_max value: 13.616399837852313 - type: nauc_map_at_20_std value: 1.0324543402143036 - type: nauc_map_at_3_diff1 value: 16.676728348589776 - type: nauc_map_at_3_max value: 12.451042785812467 - type: nauc_map_at_3_std value: -0.789763741932485 - type: nauc_map_at_5_diff1 value: 16.447244113302865 - type: nauc_map_at_5_max value: 12.95749524270521 - type: nauc_map_at_5_std value: -0.3251230711695382 - type: nauc_mrr_at_1000_diff1 value: 15.073118452813691 - type: nauc_mrr_at_1000_max value: 13.170705283599721 - type: nauc_mrr_at_1000_std value: 2.448752870133495 - type: nauc_mrr_at_100_diff1 value: 15.061106271314383 - type: nauc_mrr_at_100_max value: 13.158638767625122 - type: nauc_mrr_at_100_std value: 2.4233787157245503 - type: nauc_mrr_at_10_diff1 value: 15.110139028526653 - type: nauc_mrr_at_10_max value: 12.58107503299545 - type: nauc_mrr_at_10_std value: 1.6059860211244354 - type: nauc_mrr_at_1_diff1 value: 20.07347864038319 - type: nauc_mrr_at_1_max value: 8.753798204494833 - type: nauc_mrr_at_1_std value: -3.0954712020399757 - type: nauc_mrr_at_20_diff1 value: 15.15772623183269 - type: nauc_mrr_at_20_max value: 12.845970575412016 - type: nauc_mrr_at_20_std value: 1.9367090919425642 - type: nauc_mrr_at_3_diff1 value: 15.707253564919574 - type: nauc_mrr_at_3_max value: 11.766708904867357 - type: nauc_mrr_at_3_std value: 0.6897852064415965 - type: nauc_mrr_at_5_diff1 value: 15.314729521818851 - type: nauc_mrr_at_5_max value: 12.167084762521691 - type: nauc_mrr_at_5_std value: 0.9644311984548367 - type: nauc_ndcg_at_1000_diff1 value: 14.057042359223487 - type: nauc_ndcg_at_1000_max value: 17.98877056056709 - type: nauc_ndcg_at_1000_std value: 8.789944358950999 - type: nauc_ndcg_at_100_diff1 value: 13.967305121920074 - type: nauc_ndcg_at_100_max value: 17.266067744752988 - type: nauc_ndcg_at_100_std value: 7.584677984382887 - type: nauc_ndcg_at_10_diff1 value: 14.387617437927885 - type: nauc_ndcg_at_10_max value: 14.292926918424087 - type: nauc_ndcg_at_10_std value: 2.627819024277558 - type: nauc_ndcg_at_1_diff1 value: 20.07347864038319 - type: nauc_ndcg_at_1_max value: 8.753798204494833 - type: nauc_ndcg_at_1_std value: -3.0954712020399757 - type: nauc_ndcg_at_20_diff1 value: 14.451094553630634 - type: nauc_ndcg_at_20_max value: 15.058281815059955 - type: nauc_ndcg_at_20_std value: 3.92438497685508 - type: nauc_ndcg_at_3_diff1 value: 15.559724909977096 - type: nauc_ndcg_at_3_max value: 12.841571395325172 - type: nauc_ndcg_at_3_std value: 0.43222338262037896 - type: nauc_ndcg_at_5_diff1 value: 15.115376106401095 - type: nauc_ndcg_at_5_max value: 13.49029255935586 - type: nauc_ndcg_at_5_std value: 0.9957644044539958 - type: nauc_precision_at_1000_diff1 value: 2.506720981269311 - type: nauc_precision_at_1000_max value: 23.273192823379606 - type: nauc_precision_at_1000_std value: 31.322951491635376 - type: nauc_precision_at_100_diff1 value: 6.794973231645045 - type: nauc_precision_at_100_max value: 22.50730818192606 - type: nauc_precision_at_100_std value: 24.01996431227184 - type: nauc_precision_at_10_diff1 value: 11.313840611816433 - type: nauc_precision_at_10_max value: 16.201007398196975 - type: nauc_precision_at_10_std value: 8.055477231883994 - type: nauc_precision_at_1_diff1 value: 20.07347864038319 - type: nauc_precision_at_1_max value: 8.753798204494833 - type: nauc_precision_at_1_std value: -3.0954712020399757 - type: nauc_precision_at_20_diff1 value: 10.763998085353158 - type: nauc_precision_at_20_max value: 17.84734827690337 - type: nauc_precision_at_20_std value: 11.985866110578096 - type: nauc_precision_at_3_diff1 value: 13.162794424533503 - type: nauc_precision_at_3_max value: 14.00384518428568 - type: nauc_precision_at_3_std value: 3.7122526085280922 - type: nauc_precision_at_5_diff1 value: 12.792222404502432 - type: nauc_precision_at_5_max value: 15.150586449094511 - type: nauc_precision_at_5_std value: 4.479952453320129 - type: nauc_recall_at_1000_diff1 value: 7.35043294716119 - type: nauc_recall_at_1000_max value: 39.878077152939944 - type: nauc_recall_at_1000_std value: 45.31518161593176 - type: nauc_recall_at_100_diff1 value: 9.639738532299598 - type: nauc_recall_at_100_max value: 26.40449142273476 - type: nauc_recall_at_100_std value: 23.47399393925123 - type: nauc_recall_at_10_diff1 value: 11.382215143690553 - type: nauc_recall_at_10_max value: 16.055795348391445 - type: nauc_recall_at_10_std value: 5.79723301276639 - type: nauc_recall_at_1_diff1 value: 21.239259347615892 - type: nauc_recall_at_1_max value: 9.598569870159421 - type: nauc_recall_at_1_std value: -4.583838315593807 - type: nauc_recall_at_20_diff1 value: 11.767634027966208 - type: nauc_recall_at_20_max value: 17.65757486794198 - type: nauc_recall_at_20_std value: 8.711778307017545 - type: nauc_recall_at_3_diff1 value: 13.489144007229998 - type: nauc_recall_at_3_max value: 13.830969102485993 - type: nauc_recall_at_3_std value: 1.9486438053847617 - type: nauc_recall_at_5_diff1 value: 12.68519694626271 - type: nauc_recall_at_5_max value: 14.44963314285102 - type: nauc_recall_at_5_std value: 2.5866683732307676 - type: ndcg_at_1 value: 9.154 - type: ndcg_at_10 value: 18.915000000000003 - type: ndcg_at_100 value: 25.233 - type: ndcg_at_1000 value: 27.966 - type: ndcg_at_20 value: 21.413 - type: ndcg_at_3 value: 13.757 - type: ndcg_at_5 value: 16.105 - type: precision_at_1 value: 9.154 - type: precision_at_10 value: 3.618 - type: precision_at_100 value: 0.718 - type: precision_at_1000 value: 0.098 - type: precision_at_20 value: 2.384 - type: precision_at_3 value: 6.6049999999999995 - type: precision_at_5 value: 5.255 - type: recall_at_1 value: 7.997999999999999 - type: recall_at_10 value: 30.880999999999997 - type: recall_at_100 value: 60.07900000000001 - type: recall_at_1000 value: 80.931 - type: recall_at_20 value: 40.305 - type: recall_at_3 value: 17.065 - type: recall_at_5 value: 22.581 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval (default) type: mteb/quora config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: main_score value: 80.40899999999999 - type: map_at_1 value: 62.967 - type: map_at_10 value: 75.729 - type: map_at_100 value: 76.485 - type: map_at_1000 value: 76.518 - type: map_at_20 value: 76.19500000000001 - type: map_at_3 value: 72.648 - type: map_at_5 value: 74.535 - type: mrr_at_1 value: 72.36 - type: mrr_at_10 value: 79.80928174603142 - type: mrr_at_100 value: 80.04810915016965 - type: mrr_at_1000 value: 80.05404282434492 - type: mrr_at_20 value: 79.9752046927097 - type: mrr_at_3 value: 78.26999999999964 - type: mrr_at_5 value: 79.25499999999943 - type: nauc_map_at_1000_diff1 value: 71.44734653597175 - type: nauc_map_at_1000_max value: 38.03163484121072 - type: nauc_map_at_1000_std value: -20.01135273018076 - type: nauc_map_at_100_diff1 value: 71.4492633463356 - type: nauc_map_at_100_max value: 38.02496420233032 - type: nauc_map_at_100_std value: -20.028147392038477 - type: nauc_map_at_10_diff1 value: 71.43600060158558 - type: nauc_map_at_10_max value: 37.60143505549297 - type: nauc_map_at_10_std value: -20.750601305197694 - type: nauc_map_at_1_diff1 value: 74.10053436229589 - type: nauc_map_at_1_max value: 31.634043582921358 - type: nauc_map_at_1_std value: -19.10233383904706 - type: nauc_map_at_20_diff1 value: 71.47215135391012 - type: nauc_map_at_20_max value: 37.93060745861974 - type: nauc_map_at_20_std value: -20.35195519369371 - type: nauc_map_at_3_diff1 value: 71.7447226910883 - type: nauc_map_at_3_max value: 35.75837853087979 - type: nauc_map_at_3_std value: -21.45770967497823 - type: nauc_map_at_5_diff1 value: 71.44627032768398 - type: nauc_map_at_5_max value: 36.85968036780219 - type: nauc_map_at_5_std value: -21.27009284572053 - type: nauc_mrr_at_1000_diff1 value: 72.93393755872036 - type: nauc_mrr_at_1000_max value: 41.062199520990944 - type: nauc_mrr_at_1000_std value: -18.214450339301465 - type: nauc_mrr_at_100_diff1 value: 72.9338954698279 - type: nauc_mrr_at_100_max value: 41.06739041598209 - type: nauc_mrr_at_100_std value: -18.21185309111013 - type: nauc_mrr_at_10_diff1 value: 72.86852331747343 - type: nauc_mrr_at_10_max value: 41.010071785297725 - type: nauc_mrr_at_10_std value: -18.32669190496378 - type: nauc_mrr_at_1_diff1 value: 74.40459799903888 - type: nauc_mrr_at_1_max value: 40.62517113685782 - type: nauc_mrr_at_1_std value: -17.381816260527614 - type: nauc_mrr_at_20_diff1 value: 72.92460646367282 - type: nauc_mrr_at_20_max value: 41.115425092838365 - type: nauc_mrr_at_20_std value: -18.21918120739131 - type: nauc_mrr_at_3_diff1 value: 72.68548371501298 - type: nauc_mrr_at_3_max value: 40.81307478219739 - type: nauc_mrr_at_3_std value: -18.371203295008456 - type: nauc_mrr_at_5_diff1 value: 72.64265755168549 - type: nauc_mrr_at_5_max value: 40.85995329309679 - type: nauc_mrr_at_5_std value: -18.42022323880988 - type: nauc_ndcg_at_1000_diff1 value: 71.50579103558329 - type: nauc_ndcg_at_1000_max value: 39.642475328813454 - type: nauc_ndcg_at_1000_std value: -18.693250212834265 - type: nauc_ndcg_at_100_diff1 value: 71.45580845115161 - type: nauc_ndcg_at_100_max value: 39.61743054199241 - type: nauc_ndcg_at_100_std value: -18.62286770657828 - type: nauc_ndcg_at_10_diff1 value: 71.00284978712129 - type: nauc_ndcg_at_10_max value: 38.79840796126045 - type: nauc_ndcg_at_10_std value: -20.209178989425656 - type: nauc_ndcg_at_1_diff1 value: 74.38548357261384 - type: nauc_ndcg_at_1_max value: 40.74477066701173 - type: nauc_ndcg_at_1_std value: -17.376529142164348 - type: nauc_ndcg_at_20_diff1 value: 71.3476142044932 - type: nauc_ndcg_at_20_max value: 39.486308359587284 - type: nauc_ndcg_at_20_std value: -19.560180462398325 - type: nauc_ndcg_at_3_diff1 value: 70.8418446759826 - type: nauc_ndcg_at_3_max value: 37.72358525783554 - type: nauc_ndcg_at_3_std value: -20.343206910586 - type: nauc_ndcg_at_5_diff1 value: 70.63237002211079 - type: nauc_ndcg_at_5_max value: 38.03210706086737 - type: nauc_ndcg_at_5_std value: -20.688928824502135 - type: nauc_precision_at_1000_diff1 value: -35.84821487446952 - type: nauc_precision_at_1000_max value: -4.43766549932842 - type: nauc_precision_at_1000_std value: 20.30619875663738 - type: nauc_precision_at_100_diff1 value: -33.36156532327729 - type: nauc_precision_at_100_max value: -1.7205109382838484 - type: nauc_precision_at_100_std value: 19.41069837765925 - type: nauc_precision_at_10_diff1 value: -19.666458153183846 - type: nauc_precision_at_10_max value: 8.194727061983723 - type: nauc_precision_at_10_std value: 10.076122753113907 - type: nauc_precision_at_1_diff1 value: 74.38548357261384 - type: nauc_precision_at_1_max value: 40.74477066701173 - type: nauc_precision_at_1_std value: -17.376529142164348 - type: nauc_precision_at_20_diff1 value: -26.254198884165852 - type: nauc_precision_at_20_max value: 4.167939714564809 - type: nauc_precision_at_20_std value: 14.33558520190413 - type: nauc_precision_at_3_diff1 value: 8.46532245458286 - type: nauc_precision_at_3_max value: 19.343881650984773 - type: nauc_precision_at_3_std value: -2.766117052816905 - type: nauc_precision_at_5_diff1 value: -7.332465588908396 - type: nauc_precision_at_5_max value: 13.620527724391138 - type: nauc_precision_at_5_std value: 3.343253035116599 - type: nauc_recall_at_1000_diff1 value: 62.557945868716914 - type: nauc_recall_at_1000_max value: 52.338193380713605 - type: nauc_recall_at_1000_std value: 28.00992076040417 - type: nauc_recall_at_100_diff1 value: 63.813868323587684 - type: nauc_recall_at_100_max value: 41.59741710530504 - type: nauc_recall_at_100_std value: -2.6546597042763684 - type: nauc_recall_at_10_diff1 value: 64.10173787073887 - type: nauc_recall_at_10_max value: 35.91066436377341 - type: nauc_recall_at_10_std value: -22.93842362739045 - type: nauc_recall_at_1_diff1 value: 74.10053436229589 - type: nauc_recall_at_1_max value: 31.634043582921358 - type: nauc_recall_at_1_std value: -19.10233383904706 - type: nauc_recall_at_20_diff1 value: 64.07556856941558 - type: nauc_recall_at_20_max value: 39.75721856827878 - type: nauc_recall_at_20_std value: -18.189752584583083 - type: nauc_recall_at_3_diff1 value: 67.14069997451308 - type: nauc_recall_at_3_max value: 33.01729392815289 - type: nauc_recall_at_3_std value: -23.328876142396993 - type: nauc_recall_at_5_diff1 value: 64.93333694440804 - type: nauc_recall_at_5_max value: 33.92919891592357 - type: nauc_recall_at_5_std value: -24.261954402489426 - type: ndcg_at_1 value: 72.37 - type: ndcg_at_10 value: 80.40899999999999 - type: ndcg_at_100 value: 82.49499999999999 - type: ndcg_at_1000 value: 82.87299999999999 - type: ndcg_at_20 value: 81.42 - type: ndcg_at_3 value: 76.64 - type: ndcg_at_5 value: 78.61200000000001 - type: precision_at_1 value: 72.37 - type: precision_at_10 value: 12.205 - type: precision_at_100 value: 1.456 - type: precision_at_1000 value: 0.155 - type: precision_at_20 value: 6.555 - type: precision_at_3 value: 33.233000000000004 - type: precision_at_5 value: 22.094 - type: recall_at_1 value: 62.967 - type: recall_at_10 value: 89.588 - type: recall_at_100 value: 97.436 - type: recall_at_1000 value: 99.57000000000001 - type: recall_at_20 value: 93.014 - type: recall_at_3 value: 78.89 - type: recall_at_5 value: 84.263 - task: type: Clustering dataset: name: MTEB RedditClustering (default) type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: main_score value: 29.825273344892018 - type: v_measure value: 29.825273344892018 - type: v_measure_std value: 3.413163578106075 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P (default) type: mteb/reddit-clustering-p2p config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: main_score value: 42.0392645090109 - type: v_measure value: 42.0392645090109 - type: v_measure_std value: 10.73024571766896 - task: type: Retrieval dataset: name: MTEB SCIDOCS (default) type: mteb/scidocs config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: main_score value: 12.253 - type: map_at_1 value: 2.888 - type: map_at_10 value: 6.783 - type: map_at_100 value: 8.051 - type: map_at_1000 value: 8.296000000000001 - type: map_at_20 value: 7.371999999999999 - type: map_at_3 value: 4.928 - type: map_at_5 value: 5.778 - type: mrr_at_1 value: 14.2 - type: mrr_at_10 value: 21.927499999999988 - type: mrr_at_100 value: 23.063614104255763 - type: mrr_at_1000 value: 23.151849009942037 - type: mrr_at_20 value: 22.56615860189003 - type: mrr_at_3 value: 19.100000000000012 - type: mrr_at_5 value: 20.61 - type: nauc_map_at_1000_diff1 value: 14.182568931212458 - type: nauc_map_at_1000_max value: 11.87242323440983 - type: nauc_map_at_1000_std value: 10.987284359684235 - type: nauc_map_at_100_diff1 value: 14.193093675098092 - type: nauc_map_at_100_max value: 11.599572888739685 - type: nauc_map_at_100_std value: 10.475220881514295 - type: nauc_map_at_10_diff1 value: 14.034910844934629 - type: nauc_map_at_10_max value: 10.051858862026663 - type: nauc_map_at_10_std value: 7.9164358825456995 - type: nauc_map_at_1_diff1 value: 22.17139483822647 - type: nauc_map_at_1_max value: 8.448516282439247 - type: nauc_map_at_1_std value: 4.088360090256417 - type: nauc_map_at_20_diff1 value: 14.51586760670648 - type: nauc_map_at_20_max value: 10.875650485023241 - type: nauc_map_at_20_std value: 8.80606207048993 - type: nauc_map_at_3_diff1 value: 16.108107384612087 - type: nauc_map_at_3_max value: 7.805819214793363 - type: nauc_map_at_3_std value: 3.166150628019278 - type: nauc_map_at_5_diff1 value: 15.609474481800204 - type: nauc_map_at_5_max value: 8.202684739141446 - type: nauc_map_at_5_std value: 4.848998167169013 - type: nauc_mrr_at_1000_diff1 value: 16.617525087558985 - type: nauc_mrr_at_1000_max value: 10.455039784741615 - type: nauc_mrr_at_1000_std value: 7.848422088275981 - type: nauc_mrr_at_100_diff1 value: 16.63627308215634 - type: nauc_mrr_at_100_max value: 10.448616797013655 - type: nauc_mrr_at_100_std value: 7.863445451737694 - type: nauc_mrr_at_10_diff1 value: 16.20708330936853 - type: nauc_mrr_at_10_max value: 10.280022855557586 - type: nauc_mrr_at_10_std value: 7.505175583320494 - type: nauc_mrr_at_1_diff1 value: 21.642236003980578 - type: nauc_mrr_at_1_max value: 8.52819778549444 - type: nauc_mrr_at_1_std value: 4.7558963204944575 - type: nauc_mrr_at_20_diff1 value: 16.614102679900437 - type: nauc_mrr_at_20_max value: 10.404962178769987 - type: nauc_mrr_at_20_std value: 7.74497246518132 - type: nauc_mrr_at_3_diff1 value: 17.244670863658367 - type: nauc_mrr_at_3_max value: 9.74949752067465 - type: nauc_mrr_at_3_std value: 6.517516700262418 - type: nauc_mrr_at_5_diff1 value: 16.597545154527417 - type: nauc_mrr_at_5_max value: 9.659314909684689 - type: nauc_mrr_at_5_std value: 6.200070025395718 - type: nauc_ndcg_at_1000_diff1 value: 13.152063056797244 - type: nauc_ndcg_at_1000_max value: 15.447792612691421 - type: nauc_ndcg_at_1000_std value: 18.992760141528446 - type: nauc_ndcg_at_100_diff1 value: 13.072230609859858 - type: nauc_ndcg_at_100_max value: 14.016812909904536 - type: nauc_ndcg_at_100_std value: 15.943278161477101 - type: nauc_ndcg_at_10_diff1 value: 12.812207960632893 - type: nauc_ndcg_at_10_max value: 11.691288481527394 - type: nauc_ndcg_at_10_std value: 10.155068868552386 - type: nauc_ndcg_at_1_diff1 value: 21.642236003980578 - type: nauc_ndcg_at_1_max value: 8.52819778549444 - type: nauc_ndcg_at_1_std value: 4.7558963204944575 - type: nauc_ndcg_at_20_diff1 value: 13.810532975146646 - type: nauc_ndcg_at_20_max value: 12.636202272299846 - type: nauc_ndcg_at_20_std value: 11.707685287793824 - type: nauc_ndcg_at_3_diff1 value: 15.496505430297228 - type: nauc_ndcg_at_3_max value: 8.909406009315223 - type: nauc_ndcg_at_3_std value: 4.93706512584431 - type: nauc_ndcg_at_5_diff1 value: 14.723191457561866 - type: nauc_ndcg_at_5_max value: 9.104793724082013 - type: nauc_ndcg_at_5_std value: 5.922930997187946 - type: nauc_precision_at_1000_diff1 value: 7.8489167991593805 - type: nauc_precision_at_1000_max value: 18.26907219363144 - type: nauc_precision_at_1000_std value: 31.77288326543712 - type: nauc_precision_at_100_diff1 value: 8.245950132869769 - type: nauc_precision_at_100_max value: 16.265649997550394 - type: nauc_precision_at_100_std value: 24.558860546717327 - type: nauc_precision_at_10_diff1 value: 8.500463072198098 - type: nauc_precision_at_10_max value: 14.314584519336309 - type: nauc_precision_at_10_std value: 14.679159921019563 - type: nauc_precision_at_1_diff1 value: 21.642236003980578 - type: nauc_precision_at_1_max value: 8.52819778549444 - type: nauc_precision_at_1_std value: 4.7558963204944575 - type: nauc_precision_at_20_diff1 value: 10.588135856629089 - type: nauc_precision_at_20_max value: 15.06828548759572 - type: nauc_precision_at_20_std value: 16.674200927559312 - type: nauc_precision_at_3_diff1 value: 13.410961698561563 - type: nauc_precision_at_3_max value: 9.244832586270592 - type: nauc_precision_at_3_std value: 4.889153531043075 - type: nauc_precision_at_5_diff1 value: 11.72677687134605 - type: nauc_precision_at_5_max value: 9.199481058013285 - type: nauc_precision_at_5_std value: 6.779996046649549 - type: nauc_recall_at_1000_diff1 value: 7.545697390524447 - type: nauc_recall_at_1000_max value: 19.044567411069345 - type: nauc_recall_at_1000_std value: 32.05107412714431 - type: nauc_recall_at_100_diff1 value: 8.304643594720291 - type: nauc_recall_at_100_max value: 16.528675326894266 - type: nauc_recall_at_100_std value: 24.542570307483576 - type: nauc_recall_at_10_diff1 value: 8.75742794104925 - type: nauc_recall_at_10_max value: 14.214138183257088 - type: nauc_recall_at_10_std value: 14.405821458762851 - type: nauc_recall_at_1_diff1 value: 22.17139483822647 - type: nauc_recall_at_1_max value: 8.448516282439247 - type: nauc_recall_at_1_std value: 4.088360090256417 - type: nauc_recall_at_20_diff1 value: 10.764515099695727 - type: nauc_recall_at_20_max value: 14.962914794654061 - type: nauc_recall_at_20_std value: 16.493144394882123 - type: nauc_recall_at_3_diff1 value: 13.639624586343633 - type: nauc_recall_at_3_max value: 9.229738836591675 - type: nauc_recall_at_3_std value: 4.651985338466959 - type: nauc_recall_at_5_diff1 value: 12.033971364395832 - type: nauc_recall_at_5_max value: 9.334298843786886 - type: nauc_recall_at_5_std value: 6.587498761472439 - type: ndcg_at_1 value: 14.2 - type: ndcg_at_10 value: 12.253 - type: ndcg_at_100 value: 18.186 - type: ndcg_at_1000 value: 23.221 - type: ndcg_at_20 value: 14.122000000000002 - type: ndcg_at_3 value: 11.325000000000001 - type: ndcg_at_5 value: 9.911 - type: precision_at_1 value: 14.2 - type: precision_at_10 value: 6.47 - type: precision_at_100 value: 1.518 - type: precision_at_1000 value: 0.27299999999999996 - type: precision_at_20 value: 4.315 - type: precision_at_3 value: 10.5 - type: precision_at_5 value: 8.66 - type: recall_at_1 value: 2.888 - type: recall_at_10 value: 13.113 - type: recall_at_100 value: 30.79 - type: recall_at_1000 value: 55.47 - type: recall_at_20 value: 17.477 - type: recall_at_3 value: 6.388000000000001 - type: recall_at_5 value: 8.793 - task: type: STS dataset: name: MTEB SICK-R (default) type: mteb/sickr-sts config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: cosine_pearson value: 74.47186624461347 - type: cosine_spearman value: 63.91365796295238 - type: euclidean_pearson value: 64.47379282614025 - type: euclidean_spearman value: 57.952313105909546 - type: main_score value: 63.91365796295238 - type: manhattan_pearson value: 63.78641984214034 - type: manhattan_spearman value: 58.1628493915856 - task: type: Reranking dataset: name: MTEB SciDocsRR (default) type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: main_score value: 70.68076882532134 - type: map value: 70.68076882532134 - type: mrr value: 89.99516188241678 - type: nAUC_map_diff1 value: 10.122594085098678 - type: nAUC_map_max value: 54.78827936225146 - type: nAUC_map_std value: 66.34579346833124 - type: nAUC_mrr_diff1 value: 49.215609105523825 - type: nAUC_mrr_max value: 76.08848787865979 - type: nAUC_mrr_std value: 72.73263647015558 - task: type: Retrieval dataset: name: MTEB SciFact (default) type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: main_score value: 48.231 - type: map_at_1 value: 34.721999999999994 - type: map_at_10 value: 43.265 - type: map_at_100 value: 44.289 - type: map_at_1000 value: 44.345 - type: map_at_20 value: 43.855 - type: map_at_3 value: 40.176 - type: map_at_5 value: 42.095 - type: mrr_at_1 value: 36.666666666666664 - type: mrr_at_10 value: 44.615079365079346 - type: mrr_at_100 value: 45.52478310480785 - type: mrr_at_1000 value: 45.56825745517574 - type: mrr_at_20 value: 45.132147618177 - type: mrr_at_3 value: 41.833333333333314 - type: mrr_at_5 value: 43.583333333333314 - type: nauc_map_at_1000_diff1 value: 53.57155702970847 - type: nauc_map_at_1000_max value: 37.553228317905734 - type: nauc_map_at_1000_std value: -6.952334017204984 - type: nauc_map_at_100_diff1 value: 53.57075326773955 - type: nauc_map_at_100_max value: 37.55176603615736 - type: nauc_map_at_100_std value: -6.948451471419115 - type: nauc_map_at_10_diff1 value: 53.268386251103706 - type: nauc_map_at_10_max value: 37.351400478599736 - type: nauc_map_at_10_std value: -7.379738112714526 - type: nauc_map_at_1_diff1 value: 57.56838727422756 - type: nauc_map_at_1_max value: 33.91693407903183 - type: nauc_map_at_1_std value: -12.559053818648572 - type: nauc_map_at_20_diff1 value: 53.429671187841 - type: nauc_map_at_20_max value: 37.55936524741779 - type: nauc_map_at_20_std value: -6.880881015953415 - type: nauc_map_at_3_diff1 value: 54.458437868703605 - type: nauc_map_at_3_max value: 36.81445939308554 - type: nauc_map_at_3_std value: -8.231513775582439 - type: nauc_map_at_5_diff1 value: 53.33057094321079 - type: nauc_map_at_5_max value: 37.195578868838275 - type: nauc_map_at_5_std value: -7.749307309217592 - type: nauc_mrr_at_1000_diff1 value: 54.34857273810341 - type: nauc_mrr_at_1000_max value: 38.826755160525714 - type: nauc_mrr_at_1000_std value: -3.129730482959875 - type: nauc_mrr_at_100_diff1 value: 54.32917120301528 - type: nauc_mrr_at_100_max value: 38.84753102337434 - type: nauc_mrr_at_100_std value: -3.110529285852822 - type: nauc_mrr_at_10_diff1 value: 54.21030071588315 - type: nauc_mrr_at_10_max value: 38.661275938475434 - type: nauc_mrr_at_10_std value: -3.1549502405659173 - type: nauc_mrr_at_1_diff1 value: 59.470724157474834 - type: nauc_mrr_at_1_max value: 35.60578977198414 - type: nauc_mrr_at_1_std value: -7.093778610977887 - type: nauc_mrr_at_20_diff1 value: 54.2349834325068 - type: nauc_mrr_at_20_max value: 38.85239854501404 - type: nauc_mrr_at_20_std value: -2.997711648490797 - type: nauc_mrr_at_3_diff1 value: 55.470098003809824 - type: nauc_mrr_at_3_max value: 38.84286486592964 - type: nauc_mrr_at_3_std value: -3.830268753542997 - type: nauc_mrr_at_5_diff1 value: 54.201419630958156 - type: nauc_mrr_at_5_max value: 38.67554185963586 - type: nauc_mrr_at_5_std value: -3.0596087257425566 - type: nauc_ndcg_at_1000_diff1 value: 52.72751108752398 - type: nauc_ndcg_at_1000_max value: 39.941645650054184 - type: nauc_ndcg_at_1000_std value: -3.4493637196751 - type: nauc_ndcg_at_100_diff1 value: 52.30910994650125 - type: nauc_ndcg_at_100_max value: 40.33184270901276 - type: nauc_ndcg_at_100_std value: -2.901475443164014 - type: nauc_ndcg_at_10_diff1 value: 51.26542850640732 - type: nauc_ndcg_at_10_max value: 39.175805828652656 - type: nauc_ndcg_at_10_std value: -4.381732114321711 - type: nauc_ndcg_at_1_diff1 value: 59.470724157474834 - type: nauc_ndcg_at_1_max value: 35.60578977198414 - type: nauc_ndcg_at_1_std value: -7.093778610977887 - type: nauc_ndcg_at_20_diff1 value: 51.40324983657063 - type: nauc_ndcg_at_20_max value: 39.97369516223508 - type: nauc_ndcg_at_20_std value: -2.72975335651069 - type: nauc_ndcg_at_3_diff1 value: 53.71558494303497 - type: nauc_ndcg_at_3_max value: 38.61655310566153 - type: nauc_ndcg_at_3_std value: -5.262243707278389 - type: nauc_ndcg_at_5_diff1 value: 51.55385916859908 - type: nauc_ndcg_at_5_max value: 38.97638742673797 - type: nauc_ndcg_at_5_std value: -4.828131283920876 - type: nauc_precision_at_1000_diff1 value: 2.7393612079697673 - type: nauc_precision_at_1000_max value: 29.35941187035881 - type: nauc_precision_at_1000_std value: 45.03378524845005 - type: nauc_precision_at_100_diff1 value: 22.31747414714092 - type: nauc_precision_at_100_max value: 42.31726670193232 - type: nauc_precision_at_100_std value: 37.85148997521028 - type: nauc_precision_at_10_diff1 value: 34.91968585372156 - type: nauc_precision_at_10_max value: 40.74286545360522 - type: nauc_precision_at_10_std value: 14.679551404285561 - type: nauc_precision_at_1_diff1 value: 59.470724157474834 - type: nauc_precision_at_1_max value: 35.60578977198414 - type: nauc_precision_at_1_std value: -7.093778610977887 - type: nauc_precision_at_20_diff1 value: 31.7196752846707 - type: nauc_precision_at_20_max value: 41.887774560054275 - type: nauc_precision_at_20_std value: 26.111756052789254 - type: nauc_precision_at_3_diff1 value: 47.59338596894126 - type: nauc_precision_at_3_max value: 41.60456773600893 - type: nauc_precision_at_3_std value: 7.290322069735562 - type: nauc_precision_at_5_diff1 value: 39.59998573359068 - type: nauc_precision_at_5_max value: 41.716169786122784 - type: nauc_precision_at_5_std value: 10.030870924661512 - type: nauc_recall_at_1000_diff1 value: 57.971222677105146 - type: nauc_recall_at_1000_max value: 74.35977239898826 - type: nauc_recall_at_1000_std value: 25.046884850806233 - type: nauc_recall_at_100_diff1 value: 44.05189249844107 - type: nauc_recall_at_100_max value: 56.07575711551889 - type: nauc_recall_at_100_std value: 15.984701343858864 - type: nauc_recall_at_10_diff1 value: 42.17980666307712 - type: nauc_recall_at_10_max value: 42.410562357398085 - type: nauc_recall_at_10_std value: 0.5085324911166664 - type: nauc_recall_at_1_diff1 value: 57.56838727422756 - type: nauc_recall_at_1_max value: 33.91693407903183 - type: nauc_recall_at_1_std value: -12.559053818648572 - type: nauc_recall_at_20_diff1 value: 41.4295207526725 - type: nauc_recall_at_20_max value: 47.09186640937312 - type: nauc_recall_at_20_std value: 8.914121841807814 - type: nauc_recall_at_3_diff1 value: 49.71735118306758 - type: nauc_recall_at_3_max value: 40.79913675597018 - type: nauc_recall_at_3_std value: -2.6560764286787197 - type: nauc_recall_at_5_diff1 value: 44.40157202097473 - type: nauc_recall_at_5_max value: 41.81658165643147 - type: nauc_recall_at_5_std value: -0.9819402152656556 - type: ndcg_at_1 value: 36.667 - type: ndcg_at_10 value: 48.231 - type: ndcg_at_100 value: 53.077 - type: ndcg_at_1000 value: 54.529 - type: ndcg_at_20 value: 50.202000000000005 - type: ndcg_at_3 value: 42.296 - type: ndcg_at_5 value: 45.540000000000006 - type: precision_at_1 value: 36.667 - type: precision_at_10 value: 6.933 - type: precision_at_100 value: 0.95 - type: precision_at_1000 value: 0.109 - type: precision_at_20 value: 3.9170000000000003 - type: precision_at_3 value: 16.889000000000003 - type: precision_at_5 value: 11.933 - type: recall_at_1 value: 34.721999999999994 - type: recall_at_10 value: 62.428 - type: recall_at_100 value: 84.806 - type: recall_at_1000 value: 96.1 - type: recall_at_20 value: 69.828 - type: recall_at_3 value: 46.417 - type: recall_at_5 value: 54.289 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions (default) type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cosine_accuracy value: 99.69900990099009 - type: cosine_accuracy_threshold value: 76.38404965400696 - type: cosine_ap value: 89.62871668005243 - type: cosine_f1 value: 84.05797101449276 - type: cosine_f1_threshold value: 75.10095834732056 - type: cosine_precision value: 87.1244635193133 - type: cosine_recall value: 81.2 - type: dot_accuracy value: 99.06534653465346 - type: dot_accuracy_threshold value: 67162.48779296875 - type: dot_ap value: 32.76318972787881 - type: dot_f1 value: 36.930860033726816 - type: dot_f1_threshold value: 39663.446044921875 - type: dot_precision value: 31.924198250728864 - type: dot_recall value: 43.8 - type: euclidean_accuracy value: 99.44257425742575 - type: euclidean_accuracy_threshold value: 1320.8951950073242 - type: euclidean_ap value: 70.6030643455353 - type: euclidean_f1 value: 68.96551724137932 - type: euclidean_f1_threshold value: 1433.2849502563477 - type: euclidean_precision value: 73.44632768361582 - type: euclidean_recall value: 65.0 - type: main_score value: 89.62871668005243 - type: manhattan_accuracy value: 99.43564356435644 - type: manhattan_accuracy_threshold value: 23616.18194580078 - type: manhattan_ap value: 69.53528871039842 - type: manhattan_f1 value: 67.80032912781131 - type: manhattan_f1_threshold value: 24650.78887939453 - type: manhattan_precision value: 75.09113001215067 - type: manhattan_recall value: 61.8 - type: max_accuracy value: 99.69900990099009 - type: max_ap value: 89.62871668005243 - type: max_f1 value: 84.05797101449276 - type: max_precision value: 87.1244635193133 - type: max_recall value: 81.2 - type: similarity_accuracy value: 99.69900990099009 - type: similarity_accuracy_threshold value: 76.38405561447144 - type: similarity_ap value: 89.62871668005243 - type: similarity_f1 value: 84.05797101449276 - type: similarity_f1_threshold value: 75.10097026824951 - type: similarity_precision value: 87.1244635193133 - type: similarity_recall value: 81.2 - task: type: Clustering dataset: name: MTEB StackExchangeClustering (default) type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: main_score value: 39.45192779241171 - type: v_measure value: 39.45192779241171 - type: v_measure_std value: 4.925771037453908 --- # 🪲 brown-beetle-base-v1.1 Model Card <div align="center"> <img width="75%" alt="Beetle logo" src="./assets/beetle_logo.png"> </div> > [!TIP] > Beetles are some of the most diverse and interesting creatures on Earth. They are found in every environment, from the deepest oceans to the highest mountains. They are also known for their ability to adapt to a wide range of habitats and lifestyles. They are small, fast and powerful! The beetle series of models are made as good starting points for Static Embedding training (via TokenLearn or Fine-tuning), as well as decent Static Embedding models. Each beetle model is made to be an improvement over the original **M2V_base_output** model in some way, and that's the threshold we set for each model (except the brown beetle series, which is the original model). This model has been distilled from `baai/bge-base-en-v1.5`, with PCA with 768 dimensions, applying Zipf and applying SIF re-weighting, learnt from a subset of the C4 corpus. This model is significantly better than the M2V_base_output model. > [!NOTE] > The brown beetle series is made for convinience in loading and using the model instead of having to run it, though it is pretty fast to reproduce anyways. If you want to use the original model by the folks from the Minish Lab, you can use the **M2V_base_output** model. ## Version Information - **brown-beetle-base-v0**: The original model, without using PCA or Zipf. The lack of PCA and Zipf also makes this a decent model for further training. - **brown-beetle-base-v0.1**: The original model, with PCA but of the same size as the original model. This model is great if you want to experiment with Zipf or other weighting methods. - **brown-beetle-base-v1**: The original model, with PCA and Zipf. - **brown-beetle-small-v1**: A smaller version of the original model, with PCA and Zipf. Equivalent to **M2V_base_output**. - **brown-beetle-tiny-v1**: A tiny version of the original model, with PCA and Zipf. - **brown-beetle-base-v1.1**: The original model, with PCA with 768 dimensions, applying Zipf and applying SIF re-weighting, learnt from a subset of the C4 corpus. This model is significantly better than the M2V_base_output model. - **brown-beetle-small-v1.1**: A smaller version of the original model, with PCA with 256 dimensions, applying Zipf and applying SIF re-weighting, learnt from a subset of the C4 corpus. This model is significantly better than the M2V_base_output model but slightly worse than the brown-beetle-base-v1.1 model. - **brown-beetle-tiny-v1.1**: A tiny version of the original model, with PCA with 128 dimensions, applying Zipf and applying SIF re-weighting, learnt from a subset of the C4 corpus. This model is significantly better than the M2V_base_output model but slightly worse than the brown-beetle-small-v1.1 model. ## Installation Install model2vec using pip: ```bash pip install model2vec ``` ## Usage Load this model using the `from_pretrained` method: ```python from model2vec import StaticModel # Load a pretrained Model2Vec model model = StaticModel.from_pretrained("bhavnicksm/brown-beetle-base-v1.1") # Compute text embeddings embeddings = model.encode(["Example sentence"]) ``` Read more about the Model2Vec library [here](https://github.com/MinishLab/model2vec). ## Comparison with other models Coming soon... ## Acknowledgements This model is made using the [Model2Vec](https://github.com/MinishLab/model2vec) library. Credit goes to the [Minish Lab](https://github.com/MinishLab) team for developing this library. ## Citation Please cite the [Model2Vec repository](https://github.com/MinishLab/model2vec) if you use this model in your work. ```bibtex @software{minishlab2024model2vec, authors = {Stephan Tulkens, Thomas van Dongen}, title = {Model2Vec: Turn any Sentence Transformer into a Small Fast Model}, year = {2024}, url = {https://github.com/MinishLab/model2vec}, } ```
[ "BIOSSES", "SCIFACT" ]
dinab/multilingual-e5-base-Q4_K_M-GGUF
dinab
sentence-similarity
[ "sentence-transformers", "gguf", "mteb", "Sentence Transformers", "sentence-similarity", "llama-cpp", "gguf-my-repo", "multilingual", "af", "am", "ar", "as", "az", "be", "bg", "bn", "br", "bs", "ca", "cs", "cy", "da", "de", "el", "en", "eo", "es", "et", "eu", "fa", "fi", "fr", "fy", "ga", "gd", "gl", "gu", "ha", "he", "hi", "hr", "hu", "hy", "id", "is", "it", "ja", "jv", "ka", "kk", "km", "kn", "ko", "ku", "ky", "la", "lo", "lt", "lv", "mg", "mk", "ml", "mn", "mr", "ms", "my", "ne", "nl", "no", "om", "or", "pa", "pl", "ps", "pt", "ro", "ru", "sa", "sd", "si", "sk", "sl", "so", "sq", "sr", "su", "sv", "sw", "ta", "te", "th", "tl", "tr", "ug", "uk", "ur", "uz", "vi", "xh", "yi", "zh", "base_model:intfloat/multilingual-e5-base", "base_model:quantized:intfloat/multilingual-e5-base", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us", "feature-extraction" ]
2025-01-28T20:15:23Z
2025-01-28T20:15:31+00:00
24
0
--- base_model: intfloat/multilingual-e5-base language: - multilingual - af - am - ar - as - az - be - bg - bn - br - bs - ca - cs - cy - da - de - el - en - eo - es - et - eu - fa - fi - fr - fy - ga - gd - gl - gu - ha - he - hi - hr - hu - hy - id - is - it - ja - jv - ka - kk - km - kn - ko - ku - ky - la - lo - lt - lv - mg - mk - ml - mn - mr - ms - my - ne - nl - 'no' - om - or - pa - pl - ps - pt - ro - ru - sa - sd - si - sk - sl - so - sq - sr - su - sv - sw - ta - te - th - tl - tr - ug - uk - ur - uz - vi - xh - yi - zh license: mit tags: - mteb - Sentence Transformers - sentence-similarity - sentence-transformers - llama-cpp - gguf-my-repo model-index: - name: multilingual-e5-base results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 78.97014925373135 - type: ap value: 43.69351129103008 - type: f1 value: 73.38075030070492 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (de) type: mteb/amazon_counterfactual config: de split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 71.7237687366167 - type: ap value: 82.22089859962671 - type: f1 value: 69.95532758884401 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en-ext) type: mteb/amazon_counterfactual config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 79.65517241379312 - type: ap value: 28.507918657094738 - type: f1 value: 66.84516013726119 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (ja) type: mteb/amazon_counterfactual config: ja split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.32976445396146 - type: ap value: 20.720481637566014 - type: f1 value: 59.78002763416003 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 90.63775 - type: ap value: 87.22277903861716 - type: f1 value: 90.60378636386807 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 44.546 - type: f1 value: 44.05666638370923 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (de) type: mteb/amazon_reviews_multi config: de split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 41.828 - type: f1 value: 41.2710255644252 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (es) type: mteb/amazon_reviews_multi config: es split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.534 - type: f1 value: 39.820743174270326 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (fr) type: mteb/amazon_reviews_multi config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 39.684 - type: f1 value: 39.11052682815307 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (ja) type: mteb/amazon_reviews_multi config: ja split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.436 - type: f1 value: 37.07082931930871 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.226000000000006 - type: f1 value: 36.65372077739185 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 22.831000000000003 - type: map_at_10 value: 36.42 - type: map_at_100 value: 37.699 - type: map_at_1000 value: 37.724000000000004 - type: map_at_3 value: 32.207 - type: map_at_5 value: 34.312 - type: mrr_at_1 value: 23.257 - type: mrr_at_10 value: 36.574 - type: mrr_at_100 value: 37.854 - type: mrr_at_1000 value: 37.878 - type: mrr_at_3 value: 32.385000000000005 - type: mrr_at_5 value: 34.48 - type: ndcg_at_1 value: 22.831000000000003 - type: ndcg_at_10 value: 44.230000000000004 - type: ndcg_at_100 value: 49.974000000000004 - type: ndcg_at_1000 value: 50.522999999999996 - type: ndcg_at_3 value: 35.363 - type: ndcg_at_5 value: 39.164 - type: precision_at_1 value: 22.831000000000003 - type: precision_at_10 value: 6.935 - type: precision_at_100 value: 0.9520000000000001 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 14.841 - type: precision_at_5 value: 10.754 - type: recall_at_1 value: 22.831000000000003 - type: recall_at_10 value: 69.346 - type: recall_at_100 value: 95.235 - type: recall_at_1000 value: 99.36 - type: recall_at_3 value: 44.523 - type: recall_at_5 value: 53.769999999999996 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 40.27789869854063 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 35.41979463347428 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 58.22752045109304 - type: mrr value: 71.51112430198303 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 84.71147646622866 - type: cos_sim_spearman value: 85.059167046486 - type: euclidean_pearson value: 75.88421613600647 - type: euclidean_spearman value: 75.12821787150585 - type: manhattan_pearson value: 75.22005646957604 - type: manhattan_spearman value: 74.42880434453272 - task: type: BitextMining dataset: name: MTEB BUCC (de-en) type: mteb/bucc-bitext-mining config: de-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 99.23799582463465 - type: f1 value: 99.12665274878218 - type: precision value: 99.07098121085595 - type: recall value: 99.23799582463465 - task: type: BitextMining dataset: name: MTEB BUCC (fr-en) type: mteb/bucc-bitext-mining config: fr-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 97.88685890380806 - type: f1 value: 97.59336708489249 - type: precision value: 97.44662117543473 - type: recall value: 97.88685890380806 - task: type: BitextMining dataset: name: MTEB BUCC (ru-en) type: mteb/bucc-bitext-mining config: ru-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 97.47142362313821 - type: f1 value: 97.1989377670015 - type: precision value: 97.06384944001847 - type: recall value: 97.47142362313821 - task: type: BitextMining dataset: name: MTEB BUCC (zh-en) type: mteb/bucc-bitext-mining config: zh-en split: test revision: d51519689f32196a32af33b075a01d0e7c51e252 metrics: - type: accuracy value: 98.4728804634018 - type: f1 value: 98.2973494821836 - type: precision value: 98.2095839915745 - type: recall value: 98.4728804634018 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 82.74025974025975 - type: f1 value: 82.67420447730439 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 35.0380848063507 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 29.45956405670166 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.122 - type: map_at_10 value: 42.03 - type: map_at_100 value: 43.364000000000004 - type: map_at_1000 value: 43.474000000000004 - type: map_at_3 value: 38.804 - type: map_at_5 value: 40.585 - type: mrr_at_1 value: 39.914 - type: mrr_at_10 value: 48.227 - type: mrr_at_100 value: 49.018 - type: mrr_at_1000 value: 49.064 - type: mrr_at_3 value: 45.994 - type: mrr_at_5 value: 47.396 - type: ndcg_at_1 value: 39.914 - type: ndcg_at_10 value: 47.825 - type: ndcg_at_100 value: 52.852 - type: ndcg_at_1000 value: 54.891 - type: ndcg_at_3 value: 43.517 - type: ndcg_at_5 value: 45.493 - type: precision_at_1 value: 39.914 - type: precision_at_10 value: 8.956 - type: precision_at_100 value: 1.388 - type: precision_at_1000 value: 0.182 - type: precision_at_3 value: 20.791999999999998 - type: precision_at_5 value: 14.821000000000002 - type: recall_at_1 value: 32.122 - type: recall_at_10 value: 58.294999999999995 - type: recall_at_100 value: 79.726 - type: recall_at_1000 value: 93.099 - type: recall_at_3 value: 45.017 - type: recall_at_5 value: 51.002 - type: map_at_1 value: 29.677999999999997 - type: map_at_10 value: 38.684000000000005 - type: map_at_100 value: 39.812999999999995 - type: map_at_1000 value: 39.945 - type: map_at_3 value: 35.831 - type: map_at_5 value: 37.446 - type: mrr_at_1 value: 37.771 - type: mrr_at_10 value: 44.936 - type: mrr_at_100 value: 45.583 - type: mrr_at_1000 value: 45.634 - type: mrr_at_3 value: 42.771 - type: mrr_at_5 value: 43.994 - type: ndcg_at_1 value: 37.771 - type: ndcg_at_10 value: 44.059 - type: ndcg_at_100 value: 48.192 - type: ndcg_at_1000 value: 50.375 - type: ndcg_at_3 value: 40.172000000000004 - type: ndcg_at_5 value: 41.899 - type: precision_at_1 value: 37.771 - type: precision_at_10 value: 8.286999999999999 - type: precision_at_100 value: 1.322 - type: precision_at_1000 value: 0.178 - type: precision_at_3 value: 19.406000000000002 - type: precision_at_5 value: 13.745 - type: recall_at_1 value: 29.677999999999997 - type: recall_at_10 value: 53.071 - type: recall_at_100 value: 70.812 - type: recall_at_1000 value: 84.841 - type: recall_at_3 value: 41.016000000000005 - type: recall_at_5 value: 46.22 - type: map_at_1 value: 42.675000000000004 - type: map_at_10 value: 53.93599999999999 - type: map_at_100 value: 54.806999999999995 - type: map_at_1000 value: 54.867 - type: map_at_3 value: 50.934000000000005 - type: map_at_5 value: 52.583 - type: mrr_at_1 value: 48.339 - type: mrr_at_10 value: 57.265 - type: mrr_at_100 value: 57.873 - type: mrr_at_1000 value: 57.906 - type: mrr_at_3 value: 55.193000000000005 - type: mrr_at_5 value: 56.303000000000004 - type: ndcg_at_1 value: 48.339 - type: ndcg_at_10 value: 59.19799999999999 - type: ndcg_at_100 value: 62.743 - type: ndcg_at_1000 value: 63.99399999999999 - type: ndcg_at_3 value: 54.367 - type: ndcg_at_5 value: 56.548 - type: precision_at_1 value: 48.339 - type: precision_at_10 value: 9.216000000000001 - type: precision_at_100 value: 1.1809999999999998 - type: precision_at_1000 value: 0.134 - type: precision_at_3 value: 23.72 - type: precision_at_5 value: 16.025 - type: recall_at_1 value: 42.675000000000004 - type: recall_at_10 value: 71.437 - type: recall_at_100 value: 86.803 - type: recall_at_1000 value: 95.581 - type: recall_at_3 value: 58.434 - type: recall_at_5 value: 63.754 - type: map_at_1 value: 23.518 - type: map_at_10 value: 30.648999999999997 - type: map_at_100 value: 31.508999999999997 - type: map_at_1000 value: 31.604 - type: map_at_3 value: 28.247 - type: map_at_5 value: 29.65 - type: mrr_at_1 value: 25.650000000000002 - type: mrr_at_10 value: 32.771 - type: mrr_at_100 value: 33.554 - type: mrr_at_1000 value: 33.629999999999995 - type: mrr_at_3 value: 30.433 - type: mrr_at_5 value: 31.812 - type: ndcg_at_1 value: 25.650000000000002 - type: ndcg_at_10 value: 34.929 - type: ndcg_at_100 value: 39.382 - type: ndcg_at_1000 value: 41.913 - type: ndcg_at_3 value: 30.292 - type: ndcg_at_5 value: 32.629999999999995 - type: precision_at_1 value: 25.650000000000002 - type: precision_at_10 value: 5.311 - type: precision_at_100 value: 0.792 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 12.58 - type: precision_at_5 value: 8.994 - type: recall_at_1 value: 23.518 - type: recall_at_10 value: 46.19 - type: recall_at_100 value: 67.123 - type: recall_at_1000 value: 86.442 - type: recall_at_3 value: 33.678000000000004 - type: recall_at_5 value: 39.244 - type: map_at_1 value: 15.891 - type: map_at_10 value: 22.464000000000002 - type: map_at_100 value: 23.483 - type: map_at_1000 value: 23.613 - type: map_at_3 value: 20.080000000000002 - type: map_at_5 value: 21.526 - type: mrr_at_1 value: 20.025000000000002 - type: mrr_at_10 value: 26.712999999999997 - type: mrr_at_100 value: 27.650000000000002 - type: mrr_at_1000 value: 27.737000000000002 - type: mrr_at_3 value: 24.274 - type: mrr_at_5 value: 25.711000000000002 - type: ndcg_at_1 value: 20.025000000000002 - type: ndcg_at_10 value: 27.028999999999996 - type: ndcg_at_100 value: 32.064 - type: ndcg_at_1000 value: 35.188 - type: ndcg_at_3 value: 22.512999999999998 - type: ndcg_at_5 value: 24.89 - type: precision_at_1 value: 20.025000000000002 - type: precision_at_10 value: 4.776 - type: precision_at_100 value: 0.8500000000000001 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 10.531 - type: precision_at_5 value: 7.811 - type: recall_at_1 value: 15.891 - type: recall_at_10 value: 37.261 - type: recall_at_100 value: 59.12 - type: recall_at_1000 value: 81.356 - type: recall_at_3 value: 24.741 - type: recall_at_5 value: 30.753999999999998 - type: map_at_1 value: 27.544 - type: map_at_10 value: 36.283 - type: map_at_100 value: 37.467 - type: map_at_1000 value: 37.574000000000005 - type: map_at_3 value: 33.528999999999996 - type: map_at_5 value: 35.028999999999996 - type: mrr_at_1 value: 34.166999999999994 - type: mrr_at_10 value: 41.866 - type: mrr_at_100 value: 42.666 - type: mrr_at_1000 value: 42.716 - type: mrr_at_3 value: 39.541 - type: mrr_at_5 value: 40.768 - type: ndcg_at_1 value: 34.166999999999994 - type: ndcg_at_10 value: 41.577 - type: ndcg_at_100 value: 46.687 - type: ndcg_at_1000 value: 48.967 - type: ndcg_at_3 value: 37.177 - type: ndcg_at_5 value: 39.097 - type: precision_at_1 value: 34.166999999999994 - type: precision_at_10 value: 7.420999999999999 - type: precision_at_100 value: 1.165 - type: precision_at_1000 value: 0.154 - type: precision_at_3 value: 17.291999999999998 - type: precision_at_5 value: 12.166 - type: recall_at_1 value: 27.544 - type: recall_at_10 value: 51.99399999999999 - type: recall_at_100 value: 73.738 - type: recall_at_1000 value: 89.33 - type: recall_at_3 value: 39.179 - type: recall_at_5 value: 44.385999999999996 - type: map_at_1 value: 26.661 - type: map_at_10 value: 35.475 - type: map_at_100 value: 36.626999999999995 - type: map_at_1000 value: 36.741 - type: map_at_3 value: 32.818000000000005 - type: map_at_5 value: 34.397 - type: mrr_at_1 value: 32.647999999999996 - type: mrr_at_10 value: 40.784 - type: mrr_at_100 value: 41.602 - type: mrr_at_1000 value: 41.661 - type: mrr_at_3 value: 38.68 - type: mrr_at_5 value: 39.838 - type: ndcg_at_1 value: 32.647999999999996 - type: ndcg_at_10 value: 40.697 - type: ndcg_at_100 value: 45.799 - type: ndcg_at_1000 value: 48.235 - type: ndcg_at_3 value: 36.516 - type: ndcg_at_5 value: 38.515 - type: precision_at_1 value: 32.647999999999996 - type: precision_at_10 value: 7.202999999999999 - type: precision_at_100 value: 1.1360000000000001 - type: precision_at_1000 value: 0.151 - type: precision_at_3 value: 17.314 - type: precision_at_5 value: 12.145999999999999 - type: recall_at_1 value: 26.661 - type: recall_at_10 value: 50.995000000000005 - type: recall_at_100 value: 73.065 - type: recall_at_1000 value: 89.781 - type: recall_at_3 value: 39.073 - type: recall_at_5 value: 44.395 - type: map_at_1 value: 25.946583333333333 - type: map_at_10 value: 33.79725 - type: map_at_100 value: 34.86408333333333 - type: map_at_1000 value: 34.9795 - type: map_at_3 value: 31.259999999999998 - type: map_at_5 value: 32.71541666666666 - type: mrr_at_1 value: 30.863749999999996 - type: mrr_at_10 value: 37.99183333333333 - type: mrr_at_100 value: 38.790499999999994 - type: mrr_at_1000 value: 38.85575000000001 - type: mrr_at_3 value: 35.82083333333333 - type: mrr_at_5 value: 37.07533333333333 - type: ndcg_at_1 value: 30.863749999999996 - type: ndcg_at_10 value: 38.52141666666667 - type: ndcg_at_100 value: 43.17966666666667 - type: ndcg_at_1000 value: 45.64608333333333 - type: ndcg_at_3 value: 34.333000000000006 - type: ndcg_at_5 value: 36.34975 - type: precision_at_1 value: 30.863749999999996 - type: precision_at_10 value: 6.598999999999999 - type: precision_at_100 value: 1.0502500000000001 - type: precision_at_1000 value: 0.14400000000000002 - type: precision_at_3 value: 15.557583333333334 - type: precision_at_5 value: 11.020000000000001 - type: recall_at_1 value: 25.946583333333333 - type: recall_at_10 value: 48.36991666666666 - type: recall_at_100 value: 69.02408333333334 - type: recall_at_1000 value: 86.43858333333331 - type: recall_at_3 value: 36.4965 - type: recall_at_5 value: 41.76258333333334 - type: map_at_1 value: 22.431 - type: map_at_10 value: 28.889 - type: map_at_100 value: 29.642000000000003 - type: map_at_1000 value: 29.742 - type: map_at_3 value: 26.998 - type: map_at_5 value: 28.172000000000004 - type: mrr_at_1 value: 25.307000000000002 - type: mrr_at_10 value: 31.763 - type: mrr_at_100 value: 32.443 - type: mrr_at_1000 value: 32.531 - type: mrr_at_3 value: 29.959000000000003 - type: mrr_at_5 value: 31.063000000000002 - type: ndcg_at_1 value: 25.307000000000002 - type: ndcg_at_10 value: 32.586999999999996 - type: ndcg_at_100 value: 36.5 - type: ndcg_at_1000 value: 39.133 - type: ndcg_at_3 value: 29.25 - type: ndcg_at_5 value: 31.023 - type: precision_at_1 value: 25.307000000000002 - type: precision_at_10 value: 4.954 - type: precision_at_100 value: 0.747 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 12.577 - type: precision_at_5 value: 8.741999999999999 - type: recall_at_1 value: 22.431 - type: recall_at_10 value: 41.134 - type: recall_at_100 value: 59.28600000000001 - type: recall_at_1000 value: 78.857 - type: recall_at_3 value: 31.926 - type: recall_at_5 value: 36.335 - type: map_at_1 value: 17.586 - type: map_at_10 value: 23.304 - type: map_at_100 value: 24.159 - type: map_at_1000 value: 24.281 - type: map_at_3 value: 21.316 - type: map_at_5 value: 22.383 - type: mrr_at_1 value: 21.645 - type: mrr_at_10 value: 27.365000000000002 - type: mrr_at_100 value: 28.108 - type: mrr_at_1000 value: 28.192 - type: mrr_at_3 value: 25.482 - type: mrr_at_5 value: 26.479999999999997 - type: ndcg_at_1 value: 21.645 - type: ndcg_at_10 value: 27.306 - type: ndcg_at_100 value: 31.496000000000002 - type: ndcg_at_1000 value: 34.53 - type: ndcg_at_3 value: 23.73 - type: ndcg_at_5 value: 25.294 - type: precision_at_1 value: 21.645 - type: precision_at_10 value: 4.797 - type: precision_at_100 value: 0.8059999999999999 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 10.850999999999999 - type: precision_at_5 value: 7.736 - type: recall_at_1 value: 17.586 - type: recall_at_10 value: 35.481 - type: recall_at_100 value: 54.534000000000006 - type: recall_at_1000 value: 76.456 - type: recall_at_3 value: 25.335 - type: recall_at_5 value: 29.473 - type: map_at_1 value: 25.095 - type: map_at_10 value: 32.374 - type: map_at_100 value: 33.537 - type: map_at_1000 value: 33.634 - type: map_at_3 value: 30.089 - type: map_at_5 value: 31.433 - type: mrr_at_1 value: 29.198 - type: mrr_at_10 value: 36.01 - type: mrr_at_100 value: 37.022 - type: mrr_at_1000 value: 37.083 - type: mrr_at_3 value: 33.94 - type: mrr_at_5 value: 35.148 - type: ndcg_at_1 value: 29.198 - type: ndcg_at_10 value: 36.729 - type: ndcg_at_100 value: 42.114000000000004 - type: ndcg_at_1000 value: 44.592 - type: ndcg_at_3 value: 32.644 - type: ndcg_at_5 value: 34.652 - type: precision_at_1 value: 29.198 - type: precision_at_10 value: 5.970000000000001 - type: precision_at_100 value: 0.967 - type: precision_at_1000 value: 0.129 - type: precision_at_3 value: 14.396999999999998 - type: precision_at_5 value: 10.093 - type: recall_at_1 value: 25.095 - type: recall_at_10 value: 46.392 - type: recall_at_100 value: 69.706 - type: recall_at_1000 value: 87.738 - type: recall_at_3 value: 35.303000000000004 - type: recall_at_5 value: 40.441 - type: map_at_1 value: 26.857999999999997 - type: map_at_10 value: 34.066 - type: map_at_100 value: 35.671 - type: map_at_1000 value: 35.881 - type: map_at_3 value: 31.304 - type: map_at_5 value: 32.885 - type: mrr_at_1 value: 32.411 - type: mrr_at_10 value: 38.987 - type: mrr_at_100 value: 39.894 - type: mrr_at_1000 value: 39.959 - type: mrr_at_3 value: 36.626999999999995 - type: mrr_at_5 value: 38.011 - type: ndcg_at_1 value: 32.411 - type: ndcg_at_10 value: 39.208 - type: ndcg_at_100 value: 44.626 - type: ndcg_at_1000 value: 47.43 - type: ndcg_at_3 value: 35.091 - type: ndcg_at_5 value: 37.119 - type: precision_at_1 value: 32.411 - type: precision_at_10 value: 7.51 - type: precision_at_100 value: 1.486 - type: precision_at_1000 value: 0.234 - type: precision_at_3 value: 16.14 - type: precision_at_5 value: 11.976 - type: recall_at_1 value: 26.857999999999997 - type: recall_at_10 value: 47.407 - type: recall_at_100 value: 72.236 - type: recall_at_1000 value: 90.77 - type: recall_at_3 value: 35.125 - type: recall_at_5 value: 40.522999999999996 - type: map_at_1 value: 21.3 - type: map_at_10 value: 27.412999999999997 - type: map_at_100 value: 28.29 - type: map_at_1000 value: 28.398 - type: map_at_3 value: 25.169999999999998 - type: map_at_5 value: 26.496 - type: mrr_at_1 value: 23.29 - type: mrr_at_10 value: 29.215000000000003 - type: mrr_at_100 value: 30.073 - type: mrr_at_1000 value: 30.156 - type: mrr_at_3 value: 26.956000000000003 - type: mrr_at_5 value: 28.38 - type: ndcg_at_1 value: 23.29 - type: ndcg_at_10 value: 31.113000000000003 - type: ndcg_at_100 value: 35.701 - type: ndcg_at_1000 value: 38.505 - type: ndcg_at_3 value: 26.727 - type: ndcg_at_5 value: 29.037000000000003 - type: precision_at_1 value: 23.29 - type: precision_at_10 value: 4.787 - type: precision_at_100 value: 0.763 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 11.091 - type: precision_at_5 value: 7.985 - type: recall_at_1 value: 21.3 - type: recall_at_10 value: 40.782000000000004 - type: recall_at_100 value: 62.13999999999999 - type: recall_at_1000 value: 83.012 - type: recall_at_3 value: 29.131 - type: recall_at_5 value: 34.624 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 9.631 - type: map_at_10 value: 16.634999999999998 - type: map_at_100 value: 18.23 - type: map_at_1000 value: 18.419 - type: map_at_3 value: 13.66 - type: map_at_5 value: 15.173 - type: mrr_at_1 value: 21.368000000000002 - type: mrr_at_10 value: 31.56 - type: mrr_at_100 value: 32.58 - type: mrr_at_1000 value: 32.633 - type: mrr_at_3 value: 28.241 - type: mrr_at_5 value: 30.225 - type: ndcg_at_1 value: 21.368000000000002 - type: ndcg_at_10 value: 23.855999999999998 - type: ndcg_at_100 value: 30.686999999999998 - type: ndcg_at_1000 value: 34.327000000000005 - type: ndcg_at_3 value: 18.781 - type: ndcg_at_5 value: 20.73 - type: precision_at_1 value: 21.368000000000002 - type: precision_at_10 value: 7.564 - type: precision_at_100 value: 1.496 - type: precision_at_1000 value: 0.217 - type: precision_at_3 value: 13.876 - type: precision_at_5 value: 11.062 - type: recall_at_1 value: 9.631 - type: recall_at_10 value: 29.517 - type: recall_at_100 value: 53.452 - type: recall_at_1000 value: 74.115 - type: recall_at_3 value: 17.605999999999998 - type: recall_at_5 value: 22.505 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.885 - type: map_at_10 value: 18.798000000000002 - type: map_at_100 value: 26.316 - type: map_at_1000 value: 27.869 - type: map_at_3 value: 13.719000000000001 - type: map_at_5 value: 15.716 - type: mrr_at_1 value: 66 - type: mrr_at_10 value: 74.263 - type: mrr_at_100 value: 74.519 - type: mrr_at_1000 value: 74.531 - type: mrr_at_3 value: 72.458 - type: mrr_at_5 value: 73.321 - type: ndcg_at_1 value: 53.87499999999999 - type: ndcg_at_10 value: 40.355999999999995 - type: ndcg_at_100 value: 44.366 - type: ndcg_at_1000 value: 51.771 - type: ndcg_at_3 value: 45.195 - type: ndcg_at_5 value: 42.187000000000005 - type: precision_at_1 value: 66 - type: precision_at_10 value: 31.75 - type: precision_at_100 value: 10.11 - type: precision_at_1000 value: 1.9800000000000002 - type: precision_at_3 value: 48.167 - type: precision_at_5 value: 40.050000000000004 - type: recall_at_1 value: 8.885 - type: recall_at_10 value: 24.471999999999998 - type: recall_at_100 value: 49.669000000000004 - type: recall_at_1000 value: 73.383 - type: recall_at_3 value: 14.872 - type: recall_at_5 value: 18.262999999999998 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 45.18 - type: f1 value: 40.26878691789978 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 62.751999999999995 - type: map_at_10 value: 74.131 - type: map_at_100 value: 74.407 - type: map_at_1000 value: 74.423 - type: map_at_3 value: 72.329 - type: map_at_5 value: 73.555 - type: mrr_at_1 value: 67.282 - type: mrr_at_10 value: 78.292 - type: mrr_at_100 value: 78.455 - type: mrr_at_1000 value: 78.458 - type: mrr_at_3 value: 76.755 - type: mrr_at_5 value: 77.839 - type: ndcg_at_1 value: 67.282 - type: ndcg_at_10 value: 79.443 - type: ndcg_at_100 value: 80.529 - type: ndcg_at_1000 value: 80.812 - type: ndcg_at_3 value: 76.281 - type: ndcg_at_5 value: 78.235 - type: precision_at_1 value: 67.282 - type: precision_at_10 value: 10.078 - type: precision_at_100 value: 1.082 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 30.178 - type: precision_at_5 value: 19.232 - type: recall_at_1 value: 62.751999999999995 - type: recall_at_10 value: 91.521 - type: recall_at_100 value: 95.997 - type: recall_at_1000 value: 97.775 - type: recall_at_3 value: 83.131 - type: recall_at_5 value: 87.93299999999999 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 18.861 - type: map_at_10 value: 30.252000000000002 - type: map_at_100 value: 32.082 - type: map_at_1000 value: 32.261 - type: map_at_3 value: 25.909 - type: map_at_5 value: 28.296 - type: mrr_at_1 value: 37.346000000000004 - type: mrr_at_10 value: 45.802 - type: mrr_at_100 value: 46.611999999999995 - type: mrr_at_1000 value: 46.659 - type: mrr_at_3 value: 43.056 - type: mrr_at_5 value: 44.637 - type: ndcg_at_1 value: 37.346000000000004 - type: ndcg_at_10 value: 38.169 - type: ndcg_at_100 value: 44.864 - type: ndcg_at_1000 value: 47.974 - type: ndcg_at_3 value: 33.619 - type: ndcg_at_5 value: 35.317 - type: precision_at_1 value: 37.346000000000004 - type: precision_at_10 value: 10.693999999999999 - type: precision_at_100 value: 1.775 - type: precision_at_1000 value: 0.231 - type: precision_at_3 value: 22.325 - type: precision_at_5 value: 16.852 - type: recall_at_1 value: 18.861 - type: recall_at_10 value: 45.672000000000004 - type: recall_at_100 value: 70.60499999999999 - type: recall_at_1000 value: 89.216 - type: recall_at_3 value: 30.361 - type: recall_at_5 value: 36.998999999999995 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 37.852999999999994 - type: map_at_10 value: 59.961 - type: map_at_100 value: 60.78 - type: map_at_1000 value: 60.843 - type: map_at_3 value: 56.39999999999999 - type: map_at_5 value: 58.646 - type: mrr_at_1 value: 75.70599999999999 - type: mrr_at_10 value: 82.321 - type: mrr_at_100 value: 82.516 - type: mrr_at_1000 value: 82.525 - type: mrr_at_3 value: 81.317 - type: mrr_at_5 value: 81.922 - type: ndcg_at_1 value: 75.70599999999999 - type: ndcg_at_10 value: 68.557 - type: ndcg_at_100 value: 71.485 - type: ndcg_at_1000 value: 72.71600000000001 - type: ndcg_at_3 value: 63.524 - type: ndcg_at_5 value: 66.338 - type: precision_at_1 value: 75.70599999999999 - type: precision_at_10 value: 14.463000000000001 - type: precision_at_100 value: 1.677 - type: precision_at_1000 value: 0.184 - type: precision_at_3 value: 40.806 - type: precision_at_5 value: 26.709 - type: recall_at_1 value: 37.852999999999994 - type: recall_at_10 value: 72.316 - type: recall_at_100 value: 83.842 - type: recall_at_1000 value: 91.999 - type: recall_at_3 value: 61.209 - type: recall_at_5 value: 66.77199999999999 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 85.46039999999999 - type: ap value: 79.9812521351881 - type: f1 value: 85.31722909702084 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 22.704 - type: map_at_10 value: 35.329 - type: map_at_100 value: 36.494 - type: map_at_1000 value: 36.541000000000004 - type: map_at_3 value: 31.476 - type: map_at_5 value: 33.731 - type: mrr_at_1 value: 23.294999999999998 - type: mrr_at_10 value: 35.859 - type: mrr_at_100 value: 36.968 - type: mrr_at_1000 value: 37.008 - type: mrr_at_3 value: 32.085 - type: mrr_at_5 value: 34.299 - type: ndcg_at_1 value: 23.324 - type: ndcg_at_10 value: 42.274 - type: ndcg_at_100 value: 47.839999999999996 - type: ndcg_at_1000 value: 48.971 - type: ndcg_at_3 value: 34.454 - type: ndcg_at_5 value: 38.464 - type: precision_at_1 value: 23.324 - type: precision_at_10 value: 6.648 - type: precision_at_100 value: 0.9440000000000001 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.674999999999999 - type: precision_at_5 value: 10.850999999999999 - type: recall_at_1 value: 22.704 - type: recall_at_10 value: 63.660000000000004 - type: recall_at_100 value: 89.29899999999999 - type: recall_at_1000 value: 97.88900000000001 - type: recall_at_3 value: 42.441 - type: recall_at_5 value: 52.04 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.1326949384405 - type: f1 value: 92.89743579612082 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (de) type: mteb/mtop_domain config: de split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.62524654832347 - type: f1 value: 88.65106082263151 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (es) type: mteb/mtop_domain config: es split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 90.59039359573046 - type: f1 value: 90.31532892105662 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (fr) type: mteb/mtop_domain config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 86.21046038208581 - type: f1 value: 86.41459529813113 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (hi) type: mteb/mtop_domain config: hi split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 87.3180351380423 - type: f1 value: 86.71383078226444 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (th) type: mteb/mtop_domain config: th split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 86.24231464737792 - type: f1 value: 86.31845567592403 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 75.27131782945736 - type: f1 value: 57.52079940417103 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (de) type: mteb/mtop_intent config: de split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.2341504649197 - type: f1 value: 51.349951558039244 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (es) type: mteb/mtop_intent config: es split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.27418278852569 - type: f1 value: 50.1714985749095 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (fr) type: mteb/mtop_intent config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 67.68243031631694 - type: f1 value: 50.1066160836192 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (hi) type: mteb/mtop_intent config: hi split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 69.2362854069559 - type: f1 value: 48.821279948766424 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (th) type: mteb/mtop_intent config: th split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 71.71428571428571 - type: f1 value: 53.94611389496195 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (af) type: mteb/amazon_massive_intent config: af split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.97646267652992 - type: f1 value: 57.26797883561521 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (am) type: mteb/amazon_massive_intent config: am split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 53.65501008742435 - type: f1 value: 50.416258382177034 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ar) type: mteb/amazon_massive_intent config: ar split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 57.45796906523201 - type: f1 value: 53.306690547422185 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (az) type: mteb/amazon_massive_intent config: az split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.59246805648957 - type: f1 value: 59.818381969051494 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (bn) type: mteb/amazon_massive_intent config: bn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.126429051782104 - type: f1 value: 58.25993593933026 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (cy) type: mteb/amazon_massive_intent config: cy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 50.057162071284466 - type: f1 value: 46.96095728790911 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (da) type: mteb/amazon_massive_intent config: da split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.64425016812375 - type: f1 value: 62.858291698755764 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (de) type: mteb/amazon_massive_intent config: de split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.08944182918628 - type: f1 value: 62.44639030604241 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (el) type: mteb/amazon_massive_intent config: el split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.68056489576328 - type: f1 value: 61.775326758789504 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 72.11163416274377 - type: f1 value: 69.70789096927015 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (es) type: mteb/amazon_massive_intent config: es split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.40282447881641 - type: f1 value: 66.38492065671895 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fa) type: mteb/amazon_massive_intent config: fa split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.24613315400134 - type: f1 value: 64.3348019501336 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fi) type: mteb/amazon_massive_intent config: fi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.78345662407531 - type: f1 value: 62.21279452354622 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fr) type: mteb/amazon_massive_intent config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.9455279085407 - type: f1 value: 65.48193124964094 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (he) type: mteb/amazon_massive_intent config: he split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.05110961667788 - type: f1 value: 58.097856564684534 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hi) type: mteb/amazon_massive_intent config: hi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.95292535305985 - type: f1 value: 62.09182174767901 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hu) type: mteb/amazon_massive_intent config: hu split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.97310020174848 - type: f1 value: 61.14252567730396 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (hy) type: mteb/amazon_massive_intent config: hy split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.08069939475453 - type: f1 value: 57.044041742492034 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (id) type: mteb/amazon_massive_intent config: id split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.63752521856085 - type: f1 value: 63.889340907205316 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (is) type: mteb/amazon_massive_intent config: is split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 56.385339609952936 - type: f1 value: 53.449033750088304 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (it) type: mteb/amazon_massive_intent config: it split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.93073301950234 - type: f1 value: 65.9884357824104 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ja) type: mteb/amazon_massive_intent config: ja split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.94418291862812 - type: f1 value: 66.48740222583132 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (jv) type: mteb/amazon_massive_intent config: jv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 54.26025554808339 - type: f1 value: 50.19562815100793 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ka) type: mteb/amazon_massive_intent config: ka split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 48.98789509078682 - type: f1 value: 46.65788438676836 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (km) type: mteb/amazon_massive_intent config: km split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 44.68728984532616 - type: f1 value: 41.642419349541996 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (kn) type: mteb/amazon_massive_intent config: kn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.19300605245461 - type: f1 value: 55.8626492442437 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ko) type: mteb/amazon_massive_intent config: ko split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.33826496301278 - type: f1 value: 63.89499791648792 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (lv) type: mteb/amazon_massive_intent config: lv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.33960995292536 - type: f1 value: 57.15242464180892 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ml) type: mteb/amazon_massive_intent config: ml split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 63.09347679892402 - type: f1 value: 59.64733214063841 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (mn) type: mteb/amazon_massive_intent config: mn split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.75924680564896 - type: f1 value: 55.96585692366827 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ms) type: mteb/amazon_massive_intent config: ms split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.48486886348352 - type: f1 value: 59.45143559032946 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (my) type: mteb/amazon_massive_intent config: my split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.56422326832549 - type: f1 value: 54.96368702901926 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nb) type: mteb/amazon_massive_intent config: nb split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.18022864828512 - type: f1 value: 63.05369805040634 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (nl) type: mteb/amazon_massive_intent config: nl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.30329522528581 - type: f1 value: 64.06084612020727 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pl) type: mteb/amazon_massive_intent config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.36919973100201 - type: f1 value: 65.12154124788887 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pt) type: mteb/amazon_massive_intent config: pt split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.98117014122394 - type: f1 value: 66.41847559806962 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ro) type: mteb/amazon_massive_intent config: ro split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.53799596503026 - type: f1 value: 62.17067330740817 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ru) type: mteb/amazon_massive_intent config: ru split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.01815736381977 - type: f1 value: 66.24988369607843 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sl) type: mteb/amazon_massive_intent config: sl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 62.34700739744452 - type: f1 value: 59.957933424941636 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sq) type: mteb/amazon_massive_intent config: sq split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.23402824478815 - type: f1 value: 57.98836976018471 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sv) type: mteb/amazon_massive_intent config: sv split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 68.54068594485541 - type: f1 value: 65.43849680666855 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (sw) type: mteb/amazon_massive_intent config: sw split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 55.998655010087425 - type: f1 value: 52.83737515406804 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ta) type: mteb/amazon_massive_intent config: ta split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 58.71217215870882 - type: f1 value: 55.051794977833026 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (te) type: mteb/amazon_massive_intent config: te split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 59.724277067921996 - type: f1 value: 56.33485571838306 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (th) type: mteb/amazon_massive_intent config: th split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 65.59515803631473 - type: f1 value: 64.96772366193588 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tl) type: mteb/amazon_massive_intent config: tl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 60.860793544048406 - type: f1 value: 58.148845819115394 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (tr) type: mteb/amazon_massive_intent config: tr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.40753194351043 - type: f1 value: 63.18903778054698 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (ur) type: mteb/amazon_massive_intent config: ur split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 61.52320107599194 - type: f1 value: 58.356144563398516 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (vi) type: mteb/amazon_massive_intent config: vi split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 66.17014122394083 - type: f1 value: 63.919964062638925 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.15601882985878 - type: f1 value: 67.01451905761371 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-TW) type: mteb/amazon_massive_intent config: zh-TW split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 64.65030262273034 - type: f1 value: 64.14420425129063 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (af) type: mteb/amazon_massive_scenario config: af split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.08742434431743 - type: f1 value: 63.044060042311756 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (am) type: mteb/amazon_massive_scenario config: am split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 58.52387357094821 - type: f1 value: 56.82398588814534 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ar) type: mteb/amazon_massive_scenario config: ar split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.239408204438476 - type: f1 value: 61.92570286170469 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (az) type: mteb/amazon_massive_scenario config: az split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.74915938130463 - type: f1 value: 62.130740689396276 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (bn) type: mteb/amazon_massive_scenario config: bn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.00336247478144 - type: f1 value: 63.71080635228055 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (cy) type: mteb/amazon_massive_scenario config: cy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 52.837928715534645 - type: f1 value: 50.390741680320836 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (da) type: mteb/amazon_massive_scenario config: da split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.42098184263618 - type: f1 value: 71.41355113538995 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (de) type: mteb/amazon_massive_scenario config: de split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.95359784801613 - type: f1 value: 71.42699340156742 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (el) type: mteb/amazon_massive_scenario config: el split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.18157363819772 - type: f1 value: 69.74836113037671 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.08137188971082 - type: f1 value: 76.78000685068261 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (es) type: mteb/amazon_massive_scenario config: es split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.5030262273033 - type: f1 value: 71.71620130425673 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fa) type: mteb/amazon_massive_scenario config: fa split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.24546065904505 - type: f1 value: 69.07638311730359 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fi) type: mteb/amazon_massive_scenario config: fi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.12911903160726 - type: f1 value: 68.32651736539815 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fr) type: mteb/amazon_massive_scenario config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.89307330195025 - type: f1 value: 71.33986549860187 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (he) type: mteb/amazon_massive_scenario config: he split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 67.44451916610626 - type: f1 value: 66.90192664503866 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hi) type: mteb/amazon_massive_scenario config: hi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.16274377942166 - type: f1 value: 68.01090953775066 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hu) type: mteb/amazon_massive_scenario config: hu split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.75319435104237 - type: f1 value: 70.18035309201403 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (hy) type: mteb/amazon_massive_scenario config: hy split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 63.14391392064559 - type: f1 value: 61.48286540778145 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (id) type: mteb/amazon_massive_scenario config: id split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.70275722932078 - type: f1 value: 70.26164779846495 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (is) type: mteb/amazon_massive_scenario config: is split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 60.93813046402153 - type: f1 value: 58.8852862116525 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (it) type: mteb/amazon_massive_scenario config: it split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.320107599193 - type: f1 value: 72.19836409602924 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ja) type: mteb/amazon_massive_scenario config: ja split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.65366509751176 - type: f1 value: 74.55188288799579 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (jv) type: mteb/amazon_massive_scenario config: jv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.694014794889036 - type: f1 value: 58.11353311721067 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ka) type: mteb/amazon_massive_scenario config: ka split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 54.37457969065231 - type: f1 value: 52.81306134311697 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (km) type: mteb/amazon_massive_scenario config: km split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 48.3086751849361 - type: f1 value: 45.396449765419376 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (kn) type: mteb/amazon_massive_scenario config: kn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.151983860121064 - type: f1 value: 60.31762544281696 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ko) type: mteb/amazon_massive_scenario config: ko split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.44788164088769 - type: f1 value: 71.68150151736367 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (lv) type: mteb/amazon_massive_scenario config: lv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.81439139206455 - type: f1 value: 62.06735559105593 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ml) type: mteb/amazon_massive_scenario config: ml split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 68.04303967720242 - type: f1 value: 66.68298851670133 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (mn) type: mteb/amazon_massive_scenario config: mn split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.43913920645595 - type: f1 value: 60.25605977560783 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ms) type: mteb/amazon_massive_scenario config: ms split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.90316072629456 - type: f1 value: 65.1325924692381 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (my) type: mteb/amazon_massive_scenario config: my split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 61.63752521856086 - type: f1 value: 59.14284778039585 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nb) type: mteb/amazon_massive_scenario config: nb split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.63080026899797 - type: f1 value: 70.89771864626877 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (nl) type: mteb/amazon_massive_scenario config: nl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.10827168796234 - type: f1 value: 71.71954219691159 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pl) type: mteb/amazon_massive_scenario config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.59515803631471 - type: f1 value: 70.05040128099003 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pt) type: mteb/amazon_massive_scenario config: pt split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.83389374579691 - type: f1 value: 70.84877936562735 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ro) type: mteb/amazon_massive_scenario config: ro split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 69.18628110289173 - type: f1 value: 68.97232927921841 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ru) type: mteb/amazon_massive_scenario config: ru split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.99260255548083 - type: f1 value: 72.85139492157732 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sl) type: mteb/amazon_massive_scenario config: sl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 65.26227303295225 - type: f1 value: 65.08833655469431 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sq) type: mteb/amazon_massive_scenario config: sq split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 66.48621385339611 - type: f1 value: 64.43483199071298 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sv) type: mteb/amazon_massive_scenario config: sv split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 73.14391392064559 - type: f1 value: 72.2580822579741 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (sw) type: mteb/amazon_massive_scenario config: sw split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 59.88567585743107 - type: f1 value: 58.3073765932569 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ta) type: mteb/amazon_massive_scenario config: ta split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.38399462004034 - type: f1 value: 60.82139544252606 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (te) type: mteb/amazon_massive_scenario config: te split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.58574310692671 - type: f1 value: 60.71443370385374 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (th) type: mteb/amazon_massive_scenario config: th split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.61398789509079 - type: f1 value: 70.99761812049401 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tl) type: mteb/amazon_massive_scenario config: tl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 62.73705447209146 - type: f1 value: 61.680849331794796 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (tr) type: mteb/amazon_massive_scenario config: tr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 71.66778749159381 - type: f1 value: 71.17320646080115 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (ur) type: mteb/amazon_massive_scenario config: ur split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 64.640215198386 - type: f1 value: 63.301805157015444 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (vi) type: mteb/amazon_massive_scenario config: vi split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.00672494956288 - type: f1 value: 70.26005548582106 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 75.42030934767989 - type: f1 value: 75.2074842882598 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-TW) type: mteb/amazon_massive_scenario config: zh-TW split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 70.69266980497646 - type: f1 value: 70.94103167391192 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 28.91697191169135 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.434000079573313 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.96683513343383 - type: mrr value: 31.967364078714834 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.5280000000000005 - type: map_at_10 value: 11.793 - type: map_at_100 value: 14.496999999999998 - type: map_at_1000 value: 15.783 - type: map_at_3 value: 8.838 - type: map_at_5 value: 10.07 - type: mrr_at_1 value: 43.653 - type: mrr_at_10 value: 51.531000000000006 - type: mrr_at_100 value: 52.205 - type: mrr_at_1000 value: 52.242999999999995 - type: mrr_at_3 value: 49.431999999999995 - type: mrr_at_5 value: 50.470000000000006 - type: ndcg_at_1 value: 42.415000000000006 - type: ndcg_at_10 value: 32.464999999999996 - type: ndcg_at_100 value: 28.927999999999997 - type: ndcg_at_1000 value: 37.629000000000005 - type: ndcg_at_3 value: 37.845 - type: ndcg_at_5 value: 35.147 - type: precision_at_1 value: 43.653 - type: precision_at_10 value: 23.932000000000002 - type: precision_at_100 value: 7.17 - type: precision_at_1000 value: 1.967 - type: precision_at_3 value: 35.397 - type: precision_at_5 value: 29.907 - type: recall_at_1 value: 5.5280000000000005 - type: recall_at_10 value: 15.568000000000001 - type: recall_at_100 value: 28.54 - type: recall_at_1000 value: 59.864 - type: recall_at_3 value: 9.822000000000001 - type: recall_at_5 value: 11.726 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 37.041000000000004 - type: map_at_10 value: 52.664 - type: map_at_100 value: 53.477 - type: map_at_1000 value: 53.505 - type: map_at_3 value: 48.510999999999996 - type: map_at_5 value: 51.036 - type: mrr_at_1 value: 41.338 - type: mrr_at_10 value: 55.071000000000005 - type: mrr_at_100 value: 55.672 - type: mrr_at_1000 value: 55.689 - type: mrr_at_3 value: 51.82 - type: mrr_at_5 value: 53.852 - type: ndcg_at_1 value: 41.338 - type: ndcg_at_10 value: 60.01800000000001 - type: ndcg_at_100 value: 63.409000000000006 - type: ndcg_at_1000 value: 64.017 - type: ndcg_at_3 value: 52.44799999999999 - type: ndcg_at_5 value: 56.571000000000005 - type: precision_at_1 value: 41.338 - type: precision_at_10 value: 9.531 - type: precision_at_100 value: 1.145 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 23.416 - type: precision_at_5 value: 16.46 - type: recall_at_1 value: 37.041000000000004 - type: recall_at_10 value: 79.76299999999999 - type: recall_at_100 value: 94.39 - type: recall_at_1000 value: 98.851 - type: recall_at_3 value: 60.465 - type: recall_at_5 value: 69.906 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 69.952 - type: map_at_10 value: 83.758 - type: map_at_100 value: 84.406 - type: map_at_1000 value: 84.425 - type: map_at_3 value: 80.839 - type: map_at_5 value: 82.646 - type: mrr_at_1 value: 80.62 - type: mrr_at_10 value: 86.947 - type: mrr_at_100 value: 87.063 - type: mrr_at_1000 value: 87.064 - type: mrr_at_3 value: 85.96000000000001 - type: mrr_at_5 value: 86.619 - type: ndcg_at_1 value: 80.63 - type: ndcg_at_10 value: 87.64800000000001 - type: ndcg_at_100 value: 88.929 - type: ndcg_at_1000 value: 89.054 - type: ndcg_at_3 value: 84.765 - type: ndcg_at_5 value: 86.291 - type: precision_at_1 value: 80.63 - type: precision_at_10 value: 13.314 - type: precision_at_100 value: 1.525 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.1 - type: precision_at_5 value: 24.372 - type: recall_at_1 value: 69.952 - type: recall_at_10 value: 94.955 - type: recall_at_100 value: 99.38 - type: recall_at_1000 value: 99.96000000000001 - type: recall_at_3 value: 86.60600000000001 - type: recall_at_5 value: 90.997 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 42.41329517878427 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 55.171278362748666 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.213 - type: map_at_10 value: 9.895 - type: map_at_100 value: 11.776 - type: map_at_1000 value: 12.084 - type: map_at_3 value: 7.2669999999999995 - type: map_at_5 value: 8.620999999999999 - type: mrr_at_1 value: 20.8 - type: mrr_at_10 value: 31.112000000000002 - type: mrr_at_100 value: 32.274 - type: mrr_at_1000 value: 32.35 - type: mrr_at_3 value: 28.133000000000003 - type: mrr_at_5 value: 29.892999999999997 - type: ndcg_at_1 value: 20.8 - type: ndcg_at_10 value: 17.163999999999998 - type: ndcg_at_100 value: 24.738 - type: ndcg_at_1000 value: 30.316 - type: ndcg_at_3 value: 16.665 - type: ndcg_at_5 value: 14.478 - type: precision_at_1 value: 20.8 - type: precision_at_10 value: 8.74 - type: precision_at_100 value: 1.963 - type: precision_at_1000 value: 0.33 - type: precision_at_3 value: 15.467 - type: precision_at_5 value: 12.6 - type: recall_at_1 value: 4.213 - type: recall_at_10 value: 17.698 - type: recall_at_100 value: 39.838 - type: recall_at_1000 value: 66.893 - type: recall_at_3 value: 9.418 - type: recall_at_5 value: 12.773000000000001 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 82.90453315738294 - type: cos_sim_spearman value: 78.51197850080254 - type: euclidean_pearson value: 80.09647123597748 - type: euclidean_spearman value: 78.63548011514061 - type: manhattan_pearson value: 80.10645285675231 - type: manhattan_spearman value: 78.57861806068901 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.2616156846401 - type: cos_sim_spearman value: 76.69713867850156 - type: euclidean_pearson value: 77.97948563800394 - type: euclidean_spearman value: 74.2371211567807 - type: manhattan_pearson value: 77.69697879669705 - type: manhattan_spearman value: 73.86529778022278 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 77.0293269315045 - type: cos_sim_spearman value: 78.02555120584198 - type: euclidean_pearson value: 78.25398100379078 - type: euclidean_spearman value: 78.66963870599464 - type: manhattan_pearson value: 78.14314682167348 - type: manhattan_spearman value: 78.57692322969135 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 79.16989925136942 - type: cos_sim_spearman value: 76.5996225327091 - type: euclidean_pearson value: 77.8319003279786 - type: euclidean_spearman value: 76.42824009468998 - type: manhattan_pearson value: 77.69118862737736 - type: manhattan_spearman value: 76.25568104762812 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 87.42012286935325 - type: cos_sim_spearman value: 88.15654297884122 - type: euclidean_pearson value: 87.34082819427852 - type: euclidean_spearman value: 88.06333589547084 - type: manhattan_pearson value: 87.25115596784842 - type: manhattan_spearman value: 87.9559927695203 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 82.88222044996712 - type: cos_sim_spearman value: 84.28476589061077 - type: euclidean_pearson value: 83.17399758058309 - type: euclidean_spearman value: 83.85497357244542 - type: manhattan_pearson value: 83.0308397703786 - type: manhattan_spearman value: 83.71554539935046 - task: type: STS dataset: name: MTEB STS17 (ko-ko) type: mteb/sts17-crosslingual-sts config: ko-ko split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 80.20682986257339 - type: cos_sim_spearman value: 79.94567120362092 - type: euclidean_pearson value: 79.43122480368902 - type: euclidean_spearman value: 79.94802077264987 - type: manhattan_pearson value: 79.32653021527081 - type: manhattan_spearman value: 79.80961146709178 - task: type: STS dataset: name: MTEB STS17 (ar-ar) type: mteb/sts17-crosslingual-sts config: ar-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 74.46578144394383 - type: cos_sim_spearman value: 74.52496637472179 - type: euclidean_pearson value: 72.2903807076809 - type: euclidean_spearman value: 73.55549359771645 - type: manhattan_pearson value: 72.09324837709393 - type: manhattan_spearman value: 73.36743103606581 - task: type: STS dataset: name: MTEB STS17 (en-ar) type: mteb/sts17-crosslingual-sts config: en-ar split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 71.37272335116 - type: cos_sim_spearman value: 71.26702117766037 - type: euclidean_pearson value: 67.114829954434 - type: euclidean_spearman value: 66.37938893947761 - type: manhattan_pearson value: 66.79688574095246 - type: manhattan_spearman value: 66.17292828079667 - task: type: STS dataset: name: MTEB STS17 (en-de) type: mteb/sts17-crosslingual-sts config: en-de split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 80.61016770129092 - type: cos_sim_spearman value: 82.08515426632214 - type: euclidean_pearson value: 80.557340361131 - type: euclidean_spearman value: 80.37585812266175 - type: manhattan_pearson value: 80.6782873404285 - type: manhattan_spearman value: 80.6678073032024 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.00150745350108 - type: cos_sim_spearman value: 87.83441972211425 - type: euclidean_pearson value: 87.94826702308792 - type: euclidean_spearman value: 87.46143974860725 - type: manhattan_pearson value: 87.97560344306105 - type: manhattan_spearman value: 87.5267102829796 - task: type: STS dataset: name: MTEB STS17 (en-tr) type: mteb/sts17-crosslingual-sts config: en-tr split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 64.76325252267235 - type: cos_sim_spearman value: 63.32615095463905 - type: euclidean_pearson value: 64.07920669155716 - type: euclidean_spearman value: 61.21409893072176 - type: manhattan_pearson value: 64.26308625680016 - type: manhattan_spearman value: 61.2438185254079 - task: type: STS dataset: name: MTEB STS17 (es-en) type: mteb/sts17-crosslingual-sts config: es-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 75.82644463022595 - type: cos_sim_spearman value: 76.50381269945073 - type: euclidean_pearson value: 75.1328548315934 - type: euclidean_spearman value: 75.63761139408453 - type: manhattan_pearson value: 75.18610101241407 - type: manhattan_spearman value: 75.30669266354164 - task: type: STS dataset: name: MTEB STS17 (es-es) type: mteb/sts17-crosslingual-sts config: es-es split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.49994164686832 - type: cos_sim_spearman value: 86.73743986245549 - type: euclidean_pearson value: 86.8272894387145 - type: euclidean_spearman value: 85.97608491000507 - type: manhattan_pearson value: 86.74960140396779 - type: manhattan_spearman value: 85.79285984190273 - task: type: STS dataset: name: MTEB STS17 (fr-en) type: mteb/sts17-crosslingual-sts config: fr-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.58172210788469 - type: cos_sim_spearman value: 80.17516468334607 - type: euclidean_pearson value: 77.56537843470504 - type: euclidean_spearman value: 77.57264627395521 - type: manhattan_pearson value: 78.09703521695943 - type: manhattan_spearman value: 78.15942760916954 - task: type: STS dataset: name: MTEB STS17 (it-en) type: mteb/sts17-crosslingual-sts config: it-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.7589932931751 - type: cos_sim_spearman value: 80.15210089028162 - type: euclidean_pearson value: 77.54135223516057 - type: euclidean_spearman value: 77.52697996368764 - type: manhattan_pearson value: 77.65734439572518 - type: manhattan_spearman value: 77.77702992016121 - task: type: STS dataset: name: MTEB STS17 (nl-en) type: mteb/sts17-crosslingual-sts config: nl-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 79.16682365511267 - type: cos_sim_spearman value: 79.25311267628506 - type: euclidean_pearson value: 77.54882036762244 - type: euclidean_spearman value: 77.33212935194827 - type: manhattan_pearson value: 77.98405516064015 - type: manhattan_spearman value: 77.85075717865719 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.10473294775917 - type: cos_sim_spearman value: 61.82780474476838 - type: euclidean_pearson value: 45.885111672377256 - type: euclidean_spearman value: 56.88306351932454 - type: manhattan_pearson value: 46.101218127323186 - type: manhattan_spearman value: 56.80953694186333 - task: type: STS dataset: name: MTEB STS22 (de) type: mteb/sts22-crosslingual-sts config: de split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 45.781923079584146 - type: cos_sim_spearman value: 55.95098449691107 - type: euclidean_pearson value: 25.4571031323205 - type: euclidean_spearman value: 49.859978118078935 - type: manhattan_pearson value: 25.624938455041384 - type: manhattan_spearman value: 49.99546185049401 - task: type: STS dataset: name: MTEB STS22 (es) type: mteb/sts22-crosslingual-sts config: es split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 60.00618133997907 - type: cos_sim_spearman value: 66.57896677718321 - type: euclidean_pearson value: 42.60118466388821 - type: euclidean_spearman value: 62.8210759715209 - type: manhattan_pearson value: 42.63446860604094 - type: manhattan_spearman value: 62.73803068925271 - task: type: STS dataset: name: MTEB STS22 (pl) type: mteb/sts22-crosslingual-sts config: pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 28.460759121626943 - type: cos_sim_spearman value: 34.13459007469131 - type: euclidean_pearson value: 6.0917739325525195 - type: euclidean_spearman value: 27.9947262664867 - type: manhattan_pearson value: 6.16877864169911 - type: manhattan_spearman value: 28.00664163971514 - task: type: STS dataset: name: MTEB STS22 (tr) type: mteb/sts22-crosslingual-sts config: tr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 57.42546621771696 - type: cos_sim_spearman value: 63.699663168970474 - type: euclidean_pearson value: 38.12085278789738 - type: euclidean_spearman value: 58.12329140741536 - type: manhattan_pearson value: 37.97364549443335 - type: manhattan_spearman value: 57.81545502318733 - task: type: STS dataset: name: MTEB STS22 (ar) type: mteb/sts22-crosslingual-sts config: ar split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 46.82241380954213 - type: cos_sim_spearman value: 57.86569456006391 - type: euclidean_pearson value: 31.80480070178813 - type: euclidean_spearman value: 52.484000620130104 - type: manhattan_pearson value: 31.952708554646097 - type: manhattan_spearman value: 52.8560972356195 - task: type: STS dataset: name: MTEB STS22 (ru) type: mteb/sts22-crosslingual-sts config: ru split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 52.00447170498087 - type: cos_sim_spearman value: 60.664116225735164 - type: euclidean_pearson value: 33.87382555421702 - type: euclidean_spearman value: 55.74649067458667 - type: manhattan_pearson value: 33.99117246759437 - type: manhattan_spearman value: 55.98749034923899 - task: type: STS dataset: name: MTEB STS22 (zh) type: mteb/sts22-crosslingual-sts config: zh split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.06497233105448 - type: cos_sim_spearman value: 65.62968801135676 - type: euclidean_pearson value: 47.482076613243905 - type: euclidean_spearman value: 62.65137791498299 - type: manhattan_pearson value: 47.57052626104093 - type: manhattan_spearman value: 62.436916516613294 - task: type: STS dataset: name: MTEB STS22 (fr) type: mteb/sts22-crosslingual-sts config: fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 70.49397298562575 - type: cos_sim_spearman value: 74.79604041187868 - type: euclidean_pearson value: 49.661891561317795 - type: euclidean_spearman value: 70.31535537621006 - type: manhattan_pearson value: 49.553715741850006 - type: manhattan_spearman value: 70.24779344636806 - task: type: STS dataset: name: MTEB STS22 (de-en) type: mteb/sts22-crosslingual-sts config: de-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 55.640574515348696 - type: cos_sim_spearman value: 54.927959317689 - type: euclidean_pearson value: 29.00139666967476 - type: euclidean_spearman value: 41.86386566971605 - type: manhattan_pearson value: 29.47411067730344 - type: manhattan_spearman value: 42.337438424952786 - task: type: STS dataset: name: MTEB STS22 (es-en) type: mteb/sts22-crosslingual-sts config: es-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 68.14095292259312 - type: cos_sim_spearman value: 73.99017581234789 - type: euclidean_pearson value: 46.46304297872084 - type: euclidean_spearman value: 60.91834114800041 - type: manhattan_pearson value: 47.07072666338692 - type: manhattan_spearman value: 61.70415727977926 - task: type: STS dataset: name: MTEB STS22 (it) type: mteb/sts22-crosslingual-sts config: it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 73.27184653359575 - type: cos_sim_spearman value: 77.76070252418626 - type: euclidean_pearson value: 62.30586577544778 - type: euclidean_spearman value: 75.14246629110978 - type: manhattan_pearson value: 62.328196884927046 - type: manhattan_spearman value: 75.1282792981433 - task: type: STS dataset: name: MTEB STS22 (pl-en) type: mteb/sts22-crosslingual-sts config: pl-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 71.59448528829957 - type: cos_sim_spearman value: 70.37277734222123 - type: euclidean_pearson value: 57.63145565721123 - type: euclidean_spearman value: 66.10113048304427 - type: manhattan_pearson value: 57.18897811586808 - type: manhattan_spearman value: 66.5595511215901 - task: type: STS dataset: name: MTEB STS22 (zh-en) type: mteb/sts22-crosslingual-sts config: zh-en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 66.37520607720838 - type: cos_sim_spearman value: 69.92282148997948 - type: euclidean_pearson value: 40.55768770125291 - type: euclidean_spearman value: 55.189128944669605 - type: manhattan_pearson value: 41.03566433468883 - type: manhattan_spearman value: 55.61251893174558 - task: type: STS dataset: name: MTEB STS22 (es-it) type: mteb/sts22-crosslingual-sts config: es-it split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 57.791929533771835 - type: cos_sim_spearman value: 66.45819707662093 - type: euclidean_pearson value: 39.03686018511092 - type: euclidean_spearman value: 56.01282695640428 - type: manhattan_pearson value: 38.91586623619632 - type: manhattan_spearman value: 56.69394943612747 - task: type: STS dataset: name: MTEB STS22 (de-fr) type: mteb/sts22-crosslingual-sts config: de-fr split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 47.82224468473866 - type: cos_sim_spearman value: 59.467307194781164 - type: euclidean_pearson value: 27.428459190256145 - type: euclidean_spearman value: 60.83463107397519 - type: manhattan_pearson value: 27.487391578496638 - type: manhattan_spearman value: 61.281380460246496 - task: type: STS dataset: name: MTEB STS22 (de-pl) type: mteb/sts22-crosslingual-sts config: de-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 16.306666792752644 - type: cos_sim_spearman value: 39.35486427252405 - type: euclidean_pearson value: -2.7887154897955435 - type: euclidean_spearman value: 27.1296051831719 - type: manhattan_pearson value: -3.202291270581297 - type: manhattan_spearman value: 26.32895849218158 - task: type: STS dataset: name: MTEB STS22 (fr-pl) type: mteb/sts22-crosslingual-sts config: fr-pl split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 59.67006803805076 - type: cos_sim_spearman value: 73.24670207647144 - type: euclidean_pearson value: 46.91884681500483 - type: euclidean_spearman value: 16.903085094570333 - type: manhattan_pearson value: 46.88391675325812 - type: manhattan_spearman value: 28.17180849095055 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 83.79555591223837 - type: cos_sim_spearman value: 85.63658602085185 - type: euclidean_pearson value: 85.22080894037671 - type: euclidean_spearman value: 85.54113580167038 - type: manhattan_pearson value: 85.1639505960118 - type: manhattan_spearman value: 85.43502665436196 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 80.73900991689766 - type: mrr value: 94.81624131133934 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 55.678000000000004 - type: map_at_10 value: 65.135 - type: map_at_100 value: 65.824 - type: map_at_1000 value: 65.852 - type: map_at_3 value: 62.736000000000004 - type: map_at_5 value: 64.411 - type: mrr_at_1 value: 58.333 - type: mrr_at_10 value: 66.5 - type: mrr_at_100 value: 67.053 - type: mrr_at_1000 value: 67.08 - type: mrr_at_3 value: 64.944 - type: mrr_at_5 value: 65.89399999999999 - type: ndcg_at_1 value: 58.333 - type: ndcg_at_10 value: 69.34700000000001 - type: ndcg_at_100 value: 72.32 - type: ndcg_at_1000 value: 73.014 - type: ndcg_at_3 value: 65.578 - type: ndcg_at_5 value: 67.738 - type: precision_at_1 value: 58.333 - type: precision_at_10 value: 9.033 - type: precision_at_100 value: 1.0670000000000002 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 25.444 - type: precision_at_5 value: 16.933 - type: recall_at_1 value: 55.678000000000004 - type: recall_at_10 value: 80.72200000000001 - type: recall_at_100 value: 93.93299999999999 - type: recall_at_1000 value: 99.333 - type: recall_at_3 value: 70.783 - type: recall_at_5 value: 75.978 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.74653465346535 - type: cos_sim_ap value: 93.01476369929063 - type: cos_sim_f1 value: 86.93009118541033 - type: cos_sim_precision value: 88.09034907597535 - type: cos_sim_recall value: 85.8 - type: dot_accuracy value: 99.22970297029703 - type: dot_ap value: 51.58725659485144 - type: dot_f1 value: 53.51351351351352 - type: dot_precision value: 58.235294117647065 - type: dot_recall value: 49.5 - type: euclidean_accuracy value: 99.74356435643564 - type: euclidean_ap value: 92.40332894384368 - type: euclidean_f1 value: 86.97838109602817 - type: euclidean_precision value: 87.46208291203236 - type: euclidean_recall value: 86.5 - type: manhattan_accuracy value: 99.73069306930694 - type: manhattan_ap value: 92.01320815721121 - type: manhattan_f1 value: 86.4135864135864 - type: manhattan_precision value: 86.32734530938124 - type: manhattan_recall value: 86.5 - type: max_accuracy value: 99.74653465346535 - type: max_ap value: 93.01476369929063 - type: max_f1 value: 86.97838109602817 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 55.2660514302523 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 30.4637783572547 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 49.41377758357637 - type: mrr value: 50.138451213818854 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 28.887846011166594 - type: cos_sim_spearman value: 30.10823258355903 - type: dot_pearson value: 12.888049550236385 - type: dot_spearman value: 12.827495903098123 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.21 - type: map_at_10 value: 1.667 - type: map_at_100 value: 9.15 - type: map_at_1000 value: 22.927 - type: map_at_3 value: 0.573 - type: map_at_5 value: 0.915 - type: mrr_at_1 value: 80 - type: mrr_at_10 value: 87.167 - type: mrr_at_100 value: 87.167 - type: mrr_at_1000 value: 87.167 - type: mrr_at_3 value: 85.667 - type: mrr_at_5 value: 87.167 - type: ndcg_at_1 value: 76 - type: ndcg_at_10 value: 69.757 - type: ndcg_at_100 value: 52.402 - type: ndcg_at_1000 value: 47.737 - type: ndcg_at_3 value: 71.866 - type: ndcg_at_5 value: 72.225 - type: precision_at_1 value: 80 - type: precision_at_10 value: 75 - type: precision_at_100 value: 53.959999999999994 - type: precision_at_1000 value: 21.568 - type: precision_at_3 value: 76.667 - type: precision_at_5 value: 78 - type: recall_at_1 value: 0.21 - type: recall_at_10 value: 1.9189999999999998 - type: recall_at_100 value: 12.589 - type: recall_at_1000 value: 45.312000000000005 - type: recall_at_3 value: 0.61 - type: recall_at_5 value: 1.019 - task: type: BitextMining dataset: name: MTEB Tatoeba (sqi-eng) type: mteb/tatoeba-bitext-mining config: sqi-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.10000000000001 - type: f1 value: 90.06 - type: precision value: 89.17333333333333 - type: recall value: 92.10000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (fry-eng) type: mteb/tatoeba-bitext-mining config: fry-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 56.06936416184971 - type: f1 value: 50.87508028259473 - type: precision value: 48.97398843930635 - type: recall value: 56.06936416184971 - task: type: BitextMining dataset: name: MTEB Tatoeba (kur-eng) type: mteb/tatoeba-bitext-mining config: kur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 57.3170731707317 - type: f1 value: 52.96080139372822 - type: precision value: 51.67861124382864 - type: recall value: 57.3170731707317 - task: type: BitextMining dataset: name: MTEB Tatoeba (tur-eng) type: mteb/tatoeba-bitext-mining config: tur-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.3 - type: f1 value: 92.67333333333333 - type: precision value: 91.90833333333333 - type: recall value: 94.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (deu-eng) type: mteb/tatoeba-bitext-mining config: deu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.7 - type: f1 value: 97.07333333333332 - type: precision value: 96.79500000000002 - type: recall value: 97.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (nld-eng) type: mteb/tatoeba-bitext-mining config: nld-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.69999999999999 - type: f1 value: 93.2 - type: precision value: 92.48333333333333 - type: recall value: 94.69999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (ron-eng) type: mteb/tatoeba-bitext-mining config: ron-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.9 - type: f1 value: 91.26666666666667 - type: precision value: 90.59444444444445 - type: recall value: 92.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (ang-eng) type: mteb/tatoeba-bitext-mining config: ang-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 34.32835820895522 - type: f1 value: 29.074180380150533 - type: precision value: 28.068207322920596 - type: recall value: 34.32835820895522 - task: type: BitextMining dataset: name: MTEB Tatoeba (ido-eng) type: mteb/tatoeba-bitext-mining config: ido-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.5 - type: f1 value: 74.3945115995116 - type: precision value: 72.82967843459222 - type: recall value: 78.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (jav-eng) type: mteb/tatoeba-bitext-mining config: jav-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 66.34146341463415 - type: f1 value: 61.2469400518181 - type: precision value: 59.63977756660683 - type: recall value: 66.34146341463415 - task: type: BitextMining dataset: name: MTEB Tatoeba (isl-eng) type: mteb/tatoeba-bitext-mining config: isl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.9 - type: f1 value: 76.90349206349207 - type: precision value: 75.32921568627451 - type: recall value: 80.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (slv-eng) type: mteb/tatoeba-bitext-mining config: slv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.93317132442284 - type: f1 value: 81.92519105034295 - type: precision value: 80.71283920615635 - type: recall value: 84.93317132442284 - task: type: BitextMining dataset: name: MTEB Tatoeba (cym-eng) type: mteb/tatoeba-bitext-mining config: cym-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.1304347826087 - type: f1 value: 65.22394755003451 - type: precision value: 62.912422360248435 - type: recall value: 71.1304347826087 - task: type: BitextMining dataset: name: MTEB Tatoeba (kaz-eng) type: mteb/tatoeba-bitext-mining config: kaz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.82608695652173 - type: f1 value: 75.55693581780538 - type: precision value: 73.79420289855072 - type: recall value: 79.82608695652173 - task: type: BitextMining dataset: name: MTEB Tatoeba (est-eng) type: mteb/tatoeba-bitext-mining config: est-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 74 - type: f1 value: 70.51022222222223 - type: precision value: 69.29673599347512 - type: recall value: 74 - task: type: BitextMining dataset: name: MTEB Tatoeba (heb-eng) type: mteb/tatoeba-bitext-mining config: heb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.7 - type: f1 value: 74.14238095238095 - type: precision value: 72.27214285714285 - type: recall value: 78.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (gla-eng) type: mteb/tatoeba-bitext-mining config: gla-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.97466827503016 - type: f1 value: 43.080330405420874 - type: precision value: 41.36505499593557 - type: recall value: 48.97466827503016 - task: type: BitextMining dataset: name: MTEB Tatoeba (mar-eng) type: mteb/tatoeba-bitext-mining config: mar-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.60000000000001 - type: f1 value: 86.62333333333333 - type: precision value: 85.225 - type: recall value: 89.60000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (lat-eng) type: mteb/tatoeba-bitext-mining config: lat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 45.2 - type: f1 value: 39.5761253006253 - type: precision value: 37.991358436312 - type: recall value: 45.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (bel-eng) type: mteb/tatoeba-bitext-mining config: bel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.5 - type: f1 value: 86.70333333333333 - type: precision value: 85.53166666666667 - type: recall value: 89.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (pms-eng) type: mteb/tatoeba-bitext-mining config: pms-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.095238095238095 - type: f1 value: 44.60650460650461 - type: precision value: 42.774116796477045 - type: recall value: 50.095238095238095 - task: type: BitextMining dataset: name: MTEB Tatoeba (gle-eng) type: mteb/tatoeba-bitext-mining config: gle-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 63.4 - type: f1 value: 58.35967261904762 - type: precision value: 56.54857142857143 - type: recall value: 63.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (pes-eng) type: mteb/tatoeba-bitext-mining config: pes-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.2 - type: f1 value: 87.075 - type: precision value: 86.12095238095239 - type: recall value: 89.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (nob-eng) type: mteb/tatoeba-bitext-mining config: nob-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96.8 - type: f1 value: 95.90333333333334 - type: precision value: 95.50833333333333 - type: recall value: 96.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (bul-eng) type: mteb/tatoeba-bitext-mining config: bul-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.9 - type: f1 value: 88.6288888888889 - type: precision value: 87.61607142857142 - type: recall value: 90.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (cbk-eng) type: mteb/tatoeba-bitext-mining config: cbk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 65.2 - type: f1 value: 60.54377630539395 - type: precision value: 58.89434482711381 - type: recall value: 65.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (hun-eng) type: mteb/tatoeba-bitext-mining config: hun-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87 - type: f1 value: 84.32412698412699 - type: precision value: 83.25527777777778 - type: recall value: 87 - task: type: BitextMining dataset: name: MTEB Tatoeba (uig-eng) type: mteb/tatoeba-bitext-mining config: uig-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.7 - type: f1 value: 63.07883541295306 - type: precision value: 61.06117424242426 - type: recall value: 68.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (rus-eng) type: mteb/tatoeba-bitext-mining config: rus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.7 - type: f1 value: 91.78333333333335 - type: precision value: 90.86666666666667 - type: recall value: 93.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (spa-eng) type: mteb/tatoeba-bitext-mining config: spa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.7 - type: f1 value: 96.96666666666667 - type: precision value: 96.61666666666667 - type: recall value: 97.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (hye-eng) type: mteb/tatoeba-bitext-mining config: hye-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.27493261455525 - type: f1 value: 85.90745732255168 - type: precision value: 84.91389637616052 - type: recall value: 88.27493261455525 - task: type: BitextMining dataset: name: MTEB Tatoeba (tel-eng) type: mteb/tatoeba-bitext-mining config: tel-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.5982905982906 - type: f1 value: 88.4900284900285 - type: precision value: 87.57122507122507 - type: recall value: 90.5982905982906 - task: type: BitextMining dataset: name: MTEB Tatoeba (afr-eng) type: mteb/tatoeba-bitext-mining config: afr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 89.5 - type: f1 value: 86.90769841269842 - type: precision value: 85.80178571428571 - type: recall value: 89.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (mon-eng) type: mteb/tatoeba-bitext-mining config: mon-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 82.5 - type: f1 value: 78.36796536796538 - type: precision value: 76.82196969696969 - type: recall value: 82.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (arz-eng) type: mteb/tatoeba-bitext-mining config: arz-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.48846960167715 - type: f1 value: 66.78771089148448 - type: precision value: 64.98302885095339 - type: recall value: 71.48846960167715 - task: type: BitextMining dataset: name: MTEB Tatoeba (hrv-eng) type: mteb/tatoeba-bitext-mining config: hrv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.1 - type: f1 value: 92.50333333333333 - type: precision value: 91.77499999999999 - type: recall value: 94.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (nov-eng) type: mteb/tatoeba-bitext-mining config: nov-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.20622568093385 - type: f1 value: 66.83278891450098 - type: precision value: 65.35065777283677 - type: recall value: 71.20622568093385 - task: type: BitextMining dataset: name: MTEB Tatoeba (gsw-eng) type: mteb/tatoeba-bitext-mining config: gsw-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 48.717948717948715 - type: f1 value: 43.53146853146853 - type: precision value: 42.04721204721204 - type: recall value: 48.717948717948715 - task: type: BitextMining dataset: name: MTEB Tatoeba (nds-eng) type: mteb/tatoeba-bitext-mining config: nds-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 58.5 - type: f1 value: 53.8564991863928 - type: precision value: 52.40329436122275 - type: recall value: 58.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (ukr-eng) type: mteb/tatoeba-bitext-mining config: ukr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 90.8 - type: f1 value: 88.29 - type: precision value: 87.09166666666667 - type: recall value: 90.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (uzb-eng) type: mteb/tatoeba-bitext-mining config: uzb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 67.28971962616822 - type: f1 value: 62.63425307817832 - type: precision value: 60.98065939771546 - type: recall value: 67.28971962616822 - task: type: BitextMining dataset: name: MTEB Tatoeba (lit-eng) type: mteb/tatoeba-bitext-mining config: lit-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 78.7 - type: f1 value: 75.5264472455649 - type: precision value: 74.38205086580086 - type: recall value: 78.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (ina-eng) type: mteb/tatoeba-bitext-mining config: ina-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.7 - type: f1 value: 86.10809523809525 - type: precision value: 85.07602564102565 - type: recall value: 88.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (lfn-eng) type: mteb/tatoeba-bitext-mining config: lfn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 56.99999999999999 - type: f1 value: 52.85487521402737 - type: precision value: 51.53985162713104 - type: recall value: 56.99999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (zsm-eng) type: mteb/tatoeba-bitext-mining config: zsm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94 - type: f1 value: 92.45333333333333 - type: precision value: 91.79166666666667 - type: recall value: 94 - task: type: BitextMining dataset: name: MTEB Tatoeba (ita-eng) type: mteb/tatoeba-bitext-mining config: ita-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.30000000000001 - type: f1 value: 90.61333333333333 - type: precision value: 89.83333333333331 - type: recall value: 92.30000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (cmn-eng) type: mteb/tatoeba-bitext-mining config: cmn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.69999999999999 - type: f1 value: 93.34555555555555 - type: precision value: 92.75416666666668 - type: recall value: 94.69999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (lvs-eng) type: mteb/tatoeba-bitext-mining config: lvs-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 80.2 - type: f1 value: 76.6563035113035 - type: precision value: 75.3014652014652 - type: recall value: 80.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (glg-eng) type: mteb/tatoeba-bitext-mining config: glg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.7 - type: f1 value: 82.78689263765207 - type: precision value: 82.06705086580087 - type: recall value: 84.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (ceb-eng) type: mteb/tatoeba-bitext-mining config: ceb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 50.33333333333333 - type: f1 value: 45.461523661523664 - type: precision value: 43.93545574795575 - type: recall value: 50.33333333333333 - task: type: BitextMining dataset: name: MTEB Tatoeba (bre-eng) type: mteb/tatoeba-bitext-mining config: bre-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.6000000000000005 - type: f1 value: 5.442121400446441 - type: precision value: 5.146630385487529 - type: recall value: 6.6000000000000005 - task: type: BitextMining dataset: name: MTEB Tatoeba (ben-eng) type: mteb/tatoeba-bitext-mining config: ben-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85 - type: f1 value: 81.04666666666667 - type: precision value: 79.25 - type: recall value: 85 - task: type: BitextMining dataset: name: MTEB Tatoeba (swg-eng) type: mteb/tatoeba-bitext-mining config: swg-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 47.32142857142857 - type: f1 value: 42.333333333333336 - type: precision value: 40.69196428571429 - type: recall value: 47.32142857142857 - task: type: BitextMining dataset: name: MTEB Tatoeba (arq-eng) type: mteb/tatoeba-bitext-mining config: arq-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 30.735455543358945 - type: f1 value: 26.73616790022338 - type: precision value: 25.397823220451283 - type: recall value: 30.735455543358945 - task: type: BitextMining dataset: name: MTEB Tatoeba (kab-eng) type: mteb/tatoeba-bitext-mining config: kab-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 25.1 - type: f1 value: 21.975989896371022 - type: precision value: 21.059885632257203 - type: recall value: 25.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (fra-eng) type: mteb/tatoeba-bitext-mining config: fra-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.3 - type: f1 value: 92.75666666666666 - type: precision value: 92.06166666666665 - type: recall value: 94.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (por-eng) type: mteb/tatoeba-bitext-mining config: por-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.1 - type: f1 value: 92.74 - type: precision value: 92.09166666666667 - type: recall value: 94.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (tat-eng) type: mteb/tatoeba-bitext-mining config: tat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 71.3 - type: f1 value: 66.922442002442 - type: precision value: 65.38249567099568 - type: recall value: 71.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (oci-eng) type: mteb/tatoeba-bitext-mining config: oci-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 40.300000000000004 - type: f1 value: 35.78682789299971 - type: precision value: 34.66425128716588 - type: recall value: 40.300000000000004 - task: type: BitextMining dataset: name: MTEB Tatoeba (pol-eng) type: mteb/tatoeba-bitext-mining config: pol-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 96 - type: f1 value: 94.82333333333334 - type: precision value: 94.27833333333334 - type: recall value: 96 - task: type: BitextMining dataset: name: MTEB Tatoeba (war-eng) type: mteb/tatoeba-bitext-mining config: war-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 51.1 - type: f1 value: 47.179074753133584 - type: precision value: 46.06461044702424 - type: recall value: 51.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (aze-eng) type: mteb/tatoeba-bitext-mining config: aze-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 87.7 - type: f1 value: 84.71 - type: precision value: 83.46166666666667 - type: recall value: 87.7 - task: type: BitextMining dataset: name: MTEB Tatoeba (vie-eng) type: mteb/tatoeba-bitext-mining config: vie-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.8 - type: f1 value: 94.68333333333334 - type: precision value: 94.13333333333334 - type: recall value: 95.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (nno-eng) type: mteb/tatoeba-bitext-mining config: nno-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.39999999999999 - type: f1 value: 82.5577380952381 - type: precision value: 81.36833333333334 - type: recall value: 85.39999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (cha-eng) type: mteb/tatoeba-bitext-mining config: cha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 21.16788321167883 - type: f1 value: 16.948865627297987 - type: precision value: 15.971932568647897 - type: recall value: 21.16788321167883 - task: type: BitextMining dataset: name: MTEB Tatoeba (mhr-eng) type: mteb/tatoeba-bitext-mining config: mhr-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.9 - type: f1 value: 5.515526831658907 - type: precision value: 5.141966366966367 - type: recall value: 6.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (dan-eng) type: mteb/tatoeba-bitext-mining config: dan-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.2 - type: f1 value: 91.39666666666668 - type: precision value: 90.58666666666667 - type: recall value: 93.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (ell-eng) type: mteb/tatoeba-bitext-mining config: ell-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.2 - type: f1 value: 89.95666666666666 - type: precision value: 88.92833333333333 - type: recall value: 92.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (amh-eng) type: mteb/tatoeba-bitext-mining config: amh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.76190476190477 - type: f1 value: 74.93386243386244 - type: precision value: 73.11011904761904 - type: recall value: 79.76190476190477 - task: type: BitextMining dataset: name: MTEB Tatoeba (pam-eng) type: mteb/tatoeba-bitext-mining config: pam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 8.799999999999999 - type: f1 value: 6.921439712248537 - type: precision value: 6.489885109680683 - type: recall value: 8.799999999999999 - task: type: BitextMining dataset: name: MTEB Tatoeba (hsb-eng) type: mteb/tatoeba-bitext-mining config: hsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 45.75569358178054 - type: f1 value: 40.34699501312631 - type: precision value: 38.57886764719063 - type: recall value: 45.75569358178054 - task: type: BitextMining dataset: name: MTEB Tatoeba (srp-eng) type: mteb/tatoeba-bitext-mining config: srp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.4 - type: f1 value: 89.08333333333333 - type: precision value: 88.01666666666668 - type: recall value: 91.4 - task: type: BitextMining dataset: name: MTEB Tatoeba (epo-eng) type: mteb/tatoeba-bitext-mining config: epo-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.60000000000001 - type: f1 value: 92.06690476190477 - type: precision value: 91.45095238095239 - type: recall value: 93.60000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (kzj-eng) type: mteb/tatoeba-bitext-mining config: kzj-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 7.5 - type: f1 value: 6.200363129378736 - type: precision value: 5.89115314822466 - type: recall value: 7.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (awa-eng) type: mteb/tatoeba-bitext-mining config: awa-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 73.59307359307358 - type: f1 value: 68.38933553219267 - type: precision value: 66.62698412698413 - type: recall value: 73.59307359307358 - task: type: BitextMining dataset: name: MTEB Tatoeba (fao-eng) type: mteb/tatoeba-bitext-mining config: fao-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 69.8473282442748 - type: f1 value: 64.72373682297346 - type: precision value: 62.82834214131924 - type: recall value: 69.8473282442748 - task: type: BitextMining dataset: name: MTEB Tatoeba (mal-eng) type: mteb/tatoeba-bitext-mining config: mal-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 97.5254730713246 - type: f1 value: 96.72489082969432 - type: precision value: 96.33672974284326 - type: recall value: 97.5254730713246 - task: type: BitextMining dataset: name: MTEB Tatoeba (ile-eng) type: mteb/tatoeba-bitext-mining config: ile-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 75.6 - type: f1 value: 72.42746031746033 - type: precision value: 71.14036630036631 - type: recall value: 75.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (bos-eng) type: mteb/tatoeba-bitext-mining config: bos-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.24293785310734 - type: f1 value: 88.86064030131826 - type: precision value: 87.73540489642184 - type: recall value: 91.24293785310734 - task: type: BitextMining dataset: name: MTEB Tatoeba (cor-eng) type: mteb/tatoeba-bitext-mining config: cor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.2 - type: f1 value: 4.383083659794954 - type: precision value: 4.027861324289673 - type: recall value: 6.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (cat-eng) type: mteb/tatoeba-bitext-mining config: cat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.8 - type: f1 value: 84.09428571428572 - type: precision value: 83.00333333333333 - type: recall value: 86.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (eus-eng) type: mteb/tatoeba-bitext-mining config: eus-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 60.699999999999996 - type: f1 value: 56.1584972394755 - type: precision value: 54.713456330903135 - type: recall value: 60.699999999999996 - task: type: BitextMining dataset: name: MTEB Tatoeba (yue-eng) type: mteb/tatoeba-bitext-mining config: yue-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 84.2 - type: f1 value: 80.66190476190475 - type: precision value: 79.19690476190476 - type: recall value: 84.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (swe-eng) type: mteb/tatoeba-bitext-mining config: swe-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 93.2 - type: f1 value: 91.33 - type: precision value: 90.45 - type: recall value: 93.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (dtp-eng) type: mteb/tatoeba-bitext-mining config: dtp-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 6.3 - type: f1 value: 5.126828976748276 - type: precision value: 4.853614328966668 - type: recall value: 6.3 - task: type: BitextMining dataset: name: MTEB Tatoeba (kat-eng) type: mteb/tatoeba-bitext-mining config: kat-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.76943699731903 - type: f1 value: 77.82873739308057 - type: precision value: 76.27622452019234 - type: recall value: 81.76943699731903 - task: type: BitextMining dataset: name: MTEB Tatoeba (jpn-eng) type: mteb/tatoeba-bitext-mining config: jpn-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.30000000000001 - type: f1 value: 90.29666666666665 - type: precision value: 89.40333333333334 - type: recall value: 92.30000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (csb-eng) type: mteb/tatoeba-bitext-mining config: csb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 29.249011857707508 - type: f1 value: 24.561866096392947 - type: precision value: 23.356583740215456 - type: recall value: 29.249011857707508 - task: type: BitextMining dataset: name: MTEB Tatoeba (xho-eng) type: mteb/tatoeba-bitext-mining config: xho-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.46478873239437 - type: f1 value: 73.23943661971832 - type: precision value: 71.66666666666667 - type: recall value: 77.46478873239437 - task: type: BitextMining dataset: name: MTEB Tatoeba (orv-eng) type: mteb/tatoeba-bitext-mining config: orv-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 20.35928143712575 - type: f1 value: 15.997867865075824 - type: precision value: 14.882104658301346 - type: recall value: 20.35928143712575 - task: type: BitextMining dataset: name: MTEB Tatoeba (ind-eng) type: mteb/tatoeba-bitext-mining config: ind-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 92.2 - type: f1 value: 90.25999999999999 - type: precision value: 89.45333333333335 - type: recall value: 92.2 - task: type: BitextMining dataset: name: MTEB Tatoeba (tuk-eng) type: mteb/tatoeba-bitext-mining config: tuk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 23.15270935960591 - type: f1 value: 19.65673625772148 - type: precision value: 18.793705293464992 - type: recall value: 23.15270935960591 - task: type: BitextMining dataset: name: MTEB Tatoeba (max-eng) type: mteb/tatoeba-bitext-mining config: max-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 59.154929577464785 - type: f1 value: 52.3868463305083 - type: precision value: 50.14938113529662 - type: recall value: 59.154929577464785 - task: type: BitextMining dataset: name: MTEB Tatoeba (swh-eng) type: mteb/tatoeba-bitext-mining config: swh-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 70.51282051282051 - type: f1 value: 66.8089133089133 - type: precision value: 65.37645687645687 - type: recall value: 70.51282051282051 - task: type: BitextMining dataset: name: MTEB Tatoeba (hin-eng) type: mteb/tatoeba-bitext-mining config: hin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 94.6 - type: f1 value: 93 - type: precision value: 92.23333333333333 - type: recall value: 94.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (dsb-eng) type: mteb/tatoeba-bitext-mining config: dsb-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 38.62212943632568 - type: f1 value: 34.3278276962583 - type: precision value: 33.07646935732408 - type: recall value: 38.62212943632568 - task: type: BitextMining dataset: name: MTEB Tatoeba (ber-eng) type: mteb/tatoeba-bitext-mining config: ber-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 28.1 - type: f1 value: 23.579609223054604 - type: precision value: 22.39622774921555 - type: recall value: 28.1 - task: type: BitextMining dataset: name: MTEB Tatoeba (tam-eng) type: mteb/tatoeba-bitext-mining config: tam-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.27361563517914 - type: f1 value: 85.12486427795874 - type: precision value: 83.71335504885994 - type: recall value: 88.27361563517914 - task: type: BitextMining dataset: name: MTEB Tatoeba (slk-eng) type: mteb/tatoeba-bitext-mining config: slk-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.6 - type: f1 value: 86.39928571428571 - type: precision value: 85.4947557997558 - type: recall value: 88.6 - task: type: BitextMining dataset: name: MTEB Tatoeba (tgl-eng) type: mteb/tatoeba-bitext-mining config: tgl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.5 - type: f1 value: 83.77952380952381 - type: precision value: 82.67602564102565 - type: recall value: 86.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (ast-eng) type: mteb/tatoeba-bitext-mining config: ast-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 79.52755905511812 - type: f1 value: 75.3055868016498 - type: precision value: 73.81889763779527 - type: recall value: 79.52755905511812 - task: type: BitextMining dataset: name: MTEB Tatoeba (mkd-eng) type: mteb/tatoeba-bitext-mining config: mkd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 77.9 - type: f1 value: 73.76261904761905 - type: precision value: 72.11670995670995 - type: recall value: 77.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (khm-eng) type: mteb/tatoeba-bitext-mining config: khm-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 53.8781163434903 - type: f1 value: 47.25804051288816 - type: precision value: 45.0603482390186 - type: recall value: 53.8781163434903 - task: type: BitextMining dataset: name: MTEB Tatoeba (ces-eng) type: mteb/tatoeba-bitext-mining config: ces-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 91.10000000000001 - type: f1 value: 88.88 - type: precision value: 87.96333333333334 - type: recall value: 91.10000000000001 - task: type: BitextMining dataset: name: MTEB Tatoeba (tzl-eng) type: mteb/tatoeba-bitext-mining config: tzl-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 38.46153846153847 - type: f1 value: 34.43978243978244 - type: precision value: 33.429487179487175 - type: recall value: 38.46153846153847 - task: type: BitextMining dataset: name: MTEB Tatoeba (urd-eng) type: mteb/tatoeba-bitext-mining config: urd-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.9 - type: f1 value: 86.19888888888887 - type: precision value: 85.07440476190476 - type: recall value: 88.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (ara-eng) type: mteb/tatoeba-bitext-mining config: ara-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 85.9 - type: f1 value: 82.58857142857143 - type: precision value: 81.15666666666667 - type: recall value: 85.9 - task: type: BitextMining dataset: name: MTEB Tatoeba (kor-eng) type: mteb/tatoeba-bitext-mining config: kor-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 86.8 - type: f1 value: 83.36999999999999 - type: precision value: 81.86833333333333 - type: recall value: 86.8 - task: type: BitextMining dataset: name: MTEB Tatoeba (yid-eng) type: mteb/tatoeba-bitext-mining config: yid-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 68.51415094339622 - type: f1 value: 63.195000099481234 - type: precision value: 61.394033442972116 - type: recall value: 68.51415094339622 - task: type: BitextMining dataset: name: MTEB Tatoeba (fin-eng) type: mteb/tatoeba-bitext-mining config: fin-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 88.5 - type: f1 value: 86.14603174603175 - type: precision value: 85.1162037037037 - type: recall value: 88.5 - task: type: BitextMining dataset: name: MTEB Tatoeba (tha-eng) type: mteb/tatoeba-bitext-mining config: tha-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 95.62043795620438 - type: f1 value: 94.40389294403892 - type: precision value: 93.7956204379562 - type: recall value: 95.62043795620438 - task: type: BitextMining dataset: name: MTEB Tatoeba (wuu-eng) type: mteb/tatoeba-bitext-mining config: wuu-eng split: test revision: 9080400076fbadbb4c4dcb136ff4eddc40b42553 metrics: - type: accuracy value: 81.8 - type: f1 value: 78.6532178932179 - type: precision value: 77.46348795840176 - type: recall value: 81.8 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.603 - type: map_at_10 value: 8.5 - type: map_at_100 value: 12.985 - type: map_at_1000 value: 14.466999999999999 - type: map_at_3 value: 4.859999999999999 - type: map_at_5 value: 5.817 - type: mrr_at_1 value: 28.571 - type: mrr_at_10 value: 42.331 - type: mrr_at_100 value: 43.592999999999996 - type: mrr_at_1000 value: 43.592999999999996 - type: mrr_at_3 value: 38.435 - type: mrr_at_5 value: 39.966 - type: ndcg_at_1 value: 26.531 - type: ndcg_at_10 value: 21.353 - type: ndcg_at_100 value: 31.087999999999997 - type: ndcg_at_1000 value: 43.163000000000004 - type: ndcg_at_3 value: 22.999 - type: ndcg_at_5 value: 21.451 - type: precision_at_1 value: 28.571 - type: precision_at_10 value: 19.387999999999998 - type: precision_at_100 value: 6.265 - type: precision_at_1000 value: 1.4160000000000001 - type: precision_at_3 value: 24.490000000000002 - type: precision_at_5 value: 21.224 - type: recall_at_1 value: 2.603 - type: recall_at_10 value: 14.474 - type: recall_at_100 value: 40.287 - type: recall_at_1000 value: 76.606 - type: recall_at_3 value: 5.978 - type: recall_at_5 value: 7.819 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 69.7848 - type: ap value: 13.661023167088224 - type: f1 value: 53.61686134460943 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 61.28183361629882 - type: f1 value: 61.55481034919965 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 35.972128420092396 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 85.59933241938367 - type: cos_sim_ap value: 72.20760361208136 - type: cos_sim_f1 value: 66.4447731755424 - type: cos_sim_precision value: 62.35539102267469 - type: cos_sim_recall value: 71.10817941952506 - type: dot_accuracy value: 78.98313166835548 - type: dot_ap value: 44.492521645493795 - type: dot_f1 value: 45.814889336016094 - type: dot_precision value: 37.02439024390244 - type: dot_recall value: 60.07915567282321 - type: euclidean_accuracy value: 85.3907134767837 - type: euclidean_ap value: 71.53847289080343 - type: euclidean_f1 value: 65.95952206778834 - type: euclidean_precision value: 61.31006346328196 - type: euclidean_recall value: 71.37203166226914 - type: manhattan_accuracy value: 85.40859510043511 - type: manhattan_ap value: 71.49664104395515 - type: manhattan_f1 value: 65.98569969356485 - type: manhattan_precision value: 63.928748144482924 - type: manhattan_recall value: 68.17941952506597 - type: max_accuracy value: 85.59933241938367 - type: max_ap value: 72.20760361208136 - type: max_f1 value: 66.4447731755424 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.83261536073273 - type: cos_sim_ap value: 85.48178133644264 - type: cos_sim_f1 value: 77.87816307403935 - type: cos_sim_precision value: 75.88953021114926 - type: cos_sim_recall value: 79.97382198952879 - type: dot_accuracy value: 79.76287499514883 - type: dot_ap value: 59.17438838475084 - type: dot_f1 value: 56.34566667855996 - type: dot_precision value: 52.50349092359864 - type: dot_recall value: 60.794579611949494 - type: euclidean_accuracy value: 88.76857996662397 - type: euclidean_ap value: 85.22764834359887 - type: euclidean_f1 value: 77.65379751543554 - type: euclidean_precision value: 75.11152683839401 - type: euclidean_recall value: 80.37419156144134 - type: manhattan_accuracy value: 88.6987231730508 - type: manhattan_ap value: 85.18907981724007 - type: manhattan_f1 value: 77.51967028849757 - type: manhattan_precision value: 75.49992701795358 - type: manhattan_recall value: 79.65044656606098 - type: max_accuracy value: 88.83261536073273 - type: max_ap value: 85.48178133644264 - type: max_f1 value: 77.87816307403935 --- # dinab/multilingual-e5-base-Q4_K_M-GGUF This model was converted to GGUF format from [`intfloat/multilingual-e5-base`](https://huggingface.co/intfloat/multilingual-e5-base) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/intfloat/multilingual-e5-base) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo dinab/multilingual-e5-base-Q4_K_M-GGUF --hf-file multilingual-e5-base-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo dinab/multilingual-e5-base-Q4_K_M-GGUF --hf-file multilingual-e5-base-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo dinab/multilingual-e5-base-Q4_K_M-GGUF --hf-file multilingual-e5-base-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo dinab/multilingual-e5-base-Q4_K_M-GGUF --hf-file multilingual-e5-base-q4_k_m.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
ggml-org/e5-small-v2-Q8_0-GGUF
ggml-org
sentence-similarity
[ "sentence-transformers", "gguf", "mteb", "Sentence Transformers", "sentence-similarity", "llama-cpp", "gguf-my-repo", "en", "base_model:intfloat/e5-small-v2", "base_model:quantized:intfloat/e5-small-v2", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us", "feature-extraction" ]
2025-02-06T09:37:02Z
2025-02-06T09:38:51+00:00
24
0
--- base_model: intfloat/e5-small-v2 language: - en license: mit tags: - mteb - Sentence Transformers - sentence-similarity - sentence-transformers - llama-cpp - gguf-my-repo model-index: - name: e5-small-v2 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 77.59701492537313 - type: ap value: 41.67064885731708 - type: f1 value: 71.86465946398573 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 91.265875 - type: ap value: 87.67633085349644 - type: f1 value: 91.24297521425744 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 45.882000000000005 - type: f1 value: 45.08058870381236 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 20.697 - type: map_at_10 value: 33.975 - type: map_at_100 value: 35.223 - type: map_at_1000 value: 35.260000000000005 - type: map_at_3 value: 29.776999999999997 - type: map_at_5 value: 32.035000000000004 - type: mrr_at_1 value: 20.982 - type: mrr_at_10 value: 34.094 - type: mrr_at_100 value: 35.343 - type: mrr_at_1000 value: 35.38 - type: mrr_at_3 value: 29.884 - type: mrr_at_5 value: 32.141999999999996 - type: ndcg_at_1 value: 20.697 - type: ndcg_at_10 value: 41.668 - type: ndcg_at_100 value: 47.397 - type: ndcg_at_1000 value: 48.305 - type: ndcg_at_3 value: 32.928000000000004 - type: ndcg_at_5 value: 36.998999999999995 - type: precision_at_1 value: 20.697 - type: precision_at_10 value: 6.636 - type: precision_at_100 value: 0.924 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 14.035 - type: precision_at_5 value: 10.398 - type: recall_at_1 value: 20.697 - type: recall_at_10 value: 66.35799999999999 - type: recall_at_100 value: 92.39 - type: recall_at_1000 value: 99.36 - type: recall_at_3 value: 42.105 - type: recall_at_5 value: 51.991 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 42.1169517447068 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 34.79553720107097 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 58.10811337308168 - type: mrr value: 71.56410763751482 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 78.46834918248696 - type: cos_sim_spearman value: 79.4289182755206 - type: euclidean_pearson value: 76.26662973727008 - type: euclidean_spearman value: 78.11744260952536 - type: manhattan_pearson value: 76.08175262609434 - type: manhattan_spearman value: 78.29395265552289 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 81.63636363636364 - type: f1 value: 81.55779952376953 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 35.88541137137571 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 30.05205685274407 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 30.293999999999997 - type: map_at_10 value: 39.876 - type: map_at_100 value: 41.315000000000005 - type: map_at_1000 value: 41.451 - type: map_at_3 value: 37.194 - type: map_at_5 value: 38.728 - type: mrr_at_1 value: 37.053000000000004 - type: mrr_at_10 value: 45.281 - type: mrr_at_100 value: 46.188 - type: mrr_at_1000 value: 46.245999999999995 - type: mrr_at_3 value: 43.228 - type: mrr_at_5 value: 44.366 - type: ndcg_at_1 value: 37.053000000000004 - type: ndcg_at_10 value: 45.086 - type: ndcg_at_100 value: 50.756 - type: ndcg_at_1000 value: 53.123 - type: ndcg_at_3 value: 41.416 - type: ndcg_at_5 value: 43.098 - type: precision_at_1 value: 37.053000000000004 - type: precision_at_10 value: 8.34 - type: precision_at_100 value: 1.346 - type: precision_at_1000 value: 0.186 - type: precision_at_3 value: 19.647000000000002 - type: precision_at_5 value: 13.877 - type: recall_at_1 value: 30.293999999999997 - type: recall_at_10 value: 54.309 - type: recall_at_100 value: 78.59 - type: recall_at_1000 value: 93.82300000000001 - type: recall_at_3 value: 43.168 - type: recall_at_5 value: 48.192 - type: map_at_1 value: 28.738000000000003 - type: map_at_10 value: 36.925999999999995 - type: map_at_100 value: 38.017 - type: map_at_1000 value: 38.144 - type: map_at_3 value: 34.446 - type: map_at_5 value: 35.704 - type: mrr_at_1 value: 35.478 - type: mrr_at_10 value: 42.786 - type: mrr_at_100 value: 43.458999999999996 - type: mrr_at_1000 value: 43.507 - type: mrr_at_3 value: 40.648 - type: mrr_at_5 value: 41.804 - type: ndcg_at_1 value: 35.478 - type: ndcg_at_10 value: 42.044 - type: ndcg_at_100 value: 46.249 - type: ndcg_at_1000 value: 48.44 - type: ndcg_at_3 value: 38.314 - type: ndcg_at_5 value: 39.798 - type: precision_at_1 value: 35.478 - type: precision_at_10 value: 7.764 - type: precision_at_100 value: 1.253 - type: precision_at_1000 value: 0.174 - type: precision_at_3 value: 18.047 - type: precision_at_5 value: 12.637 - type: recall_at_1 value: 28.738000000000003 - type: recall_at_10 value: 50.659 - type: recall_at_100 value: 68.76299999999999 - type: recall_at_1000 value: 82.811 - type: recall_at_3 value: 39.536 - type: recall_at_5 value: 43.763999999999996 - type: map_at_1 value: 38.565 - type: map_at_10 value: 50.168 - type: map_at_100 value: 51.11 - type: map_at_1000 value: 51.173 - type: map_at_3 value: 47.044000000000004 - type: map_at_5 value: 48.838 - type: mrr_at_1 value: 44.201 - type: mrr_at_10 value: 53.596999999999994 - type: mrr_at_100 value: 54.211 - type: mrr_at_1000 value: 54.247 - type: mrr_at_3 value: 51.202000000000005 - type: mrr_at_5 value: 52.608999999999995 - type: ndcg_at_1 value: 44.201 - type: ndcg_at_10 value: 55.694 - type: ndcg_at_100 value: 59.518 - type: ndcg_at_1000 value: 60.907 - type: ndcg_at_3 value: 50.395999999999994 - type: ndcg_at_5 value: 53.022999999999996 - type: precision_at_1 value: 44.201 - type: precision_at_10 value: 8.84 - type: precision_at_100 value: 1.162 - type: precision_at_1000 value: 0.133 - type: precision_at_3 value: 22.153 - type: precision_at_5 value: 15.260000000000002 - type: recall_at_1 value: 38.565 - type: recall_at_10 value: 68.65 - type: recall_at_100 value: 85.37400000000001 - type: recall_at_1000 value: 95.37400000000001 - type: recall_at_3 value: 54.645999999999994 - type: recall_at_5 value: 60.958 - type: map_at_1 value: 23.945 - type: map_at_10 value: 30.641000000000002 - type: map_at_100 value: 31.599 - type: map_at_1000 value: 31.691000000000003 - type: map_at_3 value: 28.405 - type: map_at_5 value: 29.704000000000004 - type: mrr_at_1 value: 25.537 - type: mrr_at_10 value: 32.22 - type: mrr_at_100 value: 33.138 - type: mrr_at_1000 value: 33.214 - type: mrr_at_3 value: 30.151 - type: mrr_at_5 value: 31.298 - type: ndcg_at_1 value: 25.537 - type: ndcg_at_10 value: 34.638000000000005 - type: ndcg_at_100 value: 39.486 - type: ndcg_at_1000 value: 41.936 - type: ndcg_at_3 value: 30.333 - type: ndcg_at_5 value: 32.482 - type: precision_at_1 value: 25.537 - type: precision_at_10 value: 5.153 - type: precision_at_100 value: 0.7929999999999999 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 12.429 - type: precision_at_5 value: 8.723 - type: recall_at_1 value: 23.945 - type: recall_at_10 value: 45.412 - type: recall_at_100 value: 67.836 - type: recall_at_1000 value: 86.467 - type: recall_at_3 value: 34.031 - type: recall_at_5 value: 39.039 - type: map_at_1 value: 14.419 - type: map_at_10 value: 20.858999999999998 - type: map_at_100 value: 22.067999999999998 - type: map_at_1000 value: 22.192 - type: map_at_3 value: 18.673000000000002 - type: map_at_5 value: 19.968 - type: mrr_at_1 value: 17.785999999999998 - type: mrr_at_10 value: 24.878 - type: mrr_at_100 value: 26.021 - type: mrr_at_1000 value: 26.095000000000002 - type: mrr_at_3 value: 22.616 - type: mrr_at_5 value: 23.785 - type: ndcg_at_1 value: 17.785999999999998 - type: ndcg_at_10 value: 25.153 - type: ndcg_at_100 value: 31.05 - type: ndcg_at_1000 value: 34.052 - type: ndcg_at_3 value: 21.117 - type: ndcg_at_5 value: 23.048 - type: precision_at_1 value: 17.785999999999998 - type: precision_at_10 value: 4.590000000000001 - type: precision_at_100 value: 0.864 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 9.908999999999999 - type: precision_at_5 value: 7.313 - type: recall_at_1 value: 14.419 - type: recall_at_10 value: 34.477999999999994 - type: recall_at_100 value: 60.02499999999999 - type: recall_at_1000 value: 81.646 - type: recall_at_3 value: 23.515 - type: recall_at_5 value: 28.266999999999996 - type: map_at_1 value: 26.268 - type: map_at_10 value: 35.114000000000004 - type: map_at_100 value: 36.212 - type: map_at_1000 value: 36.333 - type: map_at_3 value: 32.436 - type: map_at_5 value: 33.992 - type: mrr_at_1 value: 31.761 - type: mrr_at_10 value: 40.355999999999995 - type: mrr_at_100 value: 41.125 - type: mrr_at_1000 value: 41.186 - type: mrr_at_3 value: 37.937 - type: mrr_at_5 value: 39.463 - type: ndcg_at_1 value: 31.761 - type: ndcg_at_10 value: 40.422000000000004 - type: ndcg_at_100 value: 45.458999999999996 - type: ndcg_at_1000 value: 47.951 - type: ndcg_at_3 value: 35.972 - type: ndcg_at_5 value: 38.272 - type: precision_at_1 value: 31.761 - type: precision_at_10 value: 7.103 - type: precision_at_100 value: 1.133 - type: precision_at_1000 value: 0.152 - type: precision_at_3 value: 16.779 - type: precision_at_5 value: 11.877 - type: recall_at_1 value: 26.268 - type: recall_at_10 value: 51.053000000000004 - type: recall_at_100 value: 72.702 - type: recall_at_1000 value: 89.521 - type: recall_at_3 value: 38.619 - type: recall_at_5 value: 44.671 - type: map_at_1 value: 25.230999999999998 - type: map_at_10 value: 34.227000000000004 - type: map_at_100 value: 35.370000000000005 - type: map_at_1000 value: 35.488 - type: map_at_3 value: 31.496000000000002 - type: map_at_5 value: 33.034 - type: mrr_at_1 value: 30.822 - type: mrr_at_10 value: 39.045 - type: mrr_at_100 value: 39.809 - type: mrr_at_1000 value: 39.873 - type: mrr_at_3 value: 36.663000000000004 - type: mrr_at_5 value: 37.964 - type: ndcg_at_1 value: 30.822 - type: ndcg_at_10 value: 39.472 - type: ndcg_at_100 value: 44.574999999999996 - type: ndcg_at_1000 value: 47.162 - type: ndcg_at_3 value: 34.929 - type: ndcg_at_5 value: 37.002 - type: precision_at_1 value: 30.822 - type: precision_at_10 value: 7.055 - type: precision_at_100 value: 1.124 - type: precision_at_1000 value: 0.152 - type: precision_at_3 value: 16.591 - type: precision_at_5 value: 11.667 - type: recall_at_1 value: 25.230999999999998 - type: recall_at_10 value: 50.42100000000001 - type: recall_at_100 value: 72.685 - type: recall_at_1000 value: 90.469 - type: recall_at_3 value: 37.503 - type: recall_at_5 value: 43.123 - type: map_at_1 value: 24.604166666666664 - type: map_at_10 value: 32.427166666666665 - type: map_at_100 value: 33.51474999999999 - type: map_at_1000 value: 33.6345 - type: map_at_3 value: 30.02366666666667 - type: map_at_5 value: 31.382333333333328 - type: mrr_at_1 value: 29.001166666666666 - type: mrr_at_10 value: 36.3315 - type: mrr_at_100 value: 37.16683333333333 - type: mrr_at_1000 value: 37.23341666666668 - type: mrr_at_3 value: 34.19916666666667 - type: mrr_at_5 value: 35.40458333333334 - type: ndcg_at_1 value: 29.001166666666666 - type: ndcg_at_10 value: 37.06883333333334 - type: ndcg_at_100 value: 41.95816666666666 - type: ndcg_at_1000 value: 44.501583333333336 - type: ndcg_at_3 value: 32.973499999999994 - type: ndcg_at_5 value: 34.90833333333334 - type: precision_at_1 value: 29.001166666666666 - type: precision_at_10 value: 6.336 - type: precision_at_100 value: 1.0282499999999999 - type: precision_at_1000 value: 0.14391666666666664 - type: precision_at_3 value: 14.932499999999996 - type: precision_at_5 value: 10.50825 - type: recall_at_1 value: 24.604166666666664 - type: recall_at_10 value: 46.9525 - type: recall_at_100 value: 68.67816666666667 - type: recall_at_1000 value: 86.59783333333334 - type: recall_at_3 value: 35.49783333333333 - type: recall_at_5 value: 40.52525000000001 - type: map_at_1 value: 23.559 - type: map_at_10 value: 29.023 - type: map_at_100 value: 29.818 - type: map_at_1000 value: 29.909000000000002 - type: map_at_3 value: 27.037 - type: map_at_5 value: 28.225 - type: mrr_at_1 value: 26.994 - type: mrr_at_10 value: 31.962000000000003 - type: mrr_at_100 value: 32.726 - type: mrr_at_1000 value: 32.800000000000004 - type: mrr_at_3 value: 30.266 - type: mrr_at_5 value: 31.208999999999996 - type: ndcg_at_1 value: 26.994 - type: ndcg_at_10 value: 32.53 - type: ndcg_at_100 value: 36.758 - type: ndcg_at_1000 value: 39.362 - type: ndcg_at_3 value: 28.985 - type: ndcg_at_5 value: 30.757 - type: precision_at_1 value: 26.994 - type: precision_at_10 value: 4.968999999999999 - type: precision_at_100 value: 0.759 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 12.219 - type: precision_at_5 value: 8.527999999999999 - type: recall_at_1 value: 23.559 - type: recall_at_10 value: 40.585 - type: recall_at_100 value: 60.306000000000004 - type: recall_at_1000 value: 80.11 - type: recall_at_3 value: 30.794 - type: recall_at_5 value: 35.186 - type: map_at_1 value: 16.384999999999998 - type: map_at_10 value: 22.142 - type: map_at_100 value: 23.057 - type: map_at_1000 value: 23.177 - type: map_at_3 value: 20.29 - type: map_at_5 value: 21.332 - type: mrr_at_1 value: 19.89 - type: mrr_at_10 value: 25.771 - type: mrr_at_100 value: 26.599 - type: mrr_at_1000 value: 26.680999999999997 - type: mrr_at_3 value: 23.962 - type: mrr_at_5 value: 24.934 - type: ndcg_at_1 value: 19.89 - type: ndcg_at_10 value: 25.97 - type: ndcg_at_100 value: 30.605 - type: ndcg_at_1000 value: 33.619 - type: ndcg_at_3 value: 22.704 - type: ndcg_at_5 value: 24.199 - type: precision_at_1 value: 19.89 - type: precision_at_10 value: 4.553 - type: precision_at_100 value: 0.8049999999999999 - type: precision_at_1000 value: 0.122 - type: precision_at_3 value: 10.541 - type: precision_at_5 value: 7.46 - type: recall_at_1 value: 16.384999999999998 - type: recall_at_10 value: 34.001 - type: recall_at_100 value: 55.17100000000001 - type: recall_at_1000 value: 77.125 - type: recall_at_3 value: 24.618000000000002 - type: recall_at_5 value: 28.695999999999998 - type: map_at_1 value: 23.726 - type: map_at_10 value: 31.227 - type: map_at_100 value: 32.311 - type: map_at_1000 value: 32.419 - type: map_at_3 value: 28.765 - type: map_at_5 value: 30.229 - type: mrr_at_1 value: 27.705000000000002 - type: mrr_at_10 value: 35.085 - type: mrr_at_100 value: 35.931000000000004 - type: mrr_at_1000 value: 36 - type: mrr_at_3 value: 32.603 - type: mrr_at_5 value: 34.117999999999995 - type: ndcg_at_1 value: 27.705000000000002 - type: ndcg_at_10 value: 35.968 - type: ndcg_at_100 value: 41.197 - type: ndcg_at_1000 value: 43.76 - type: ndcg_at_3 value: 31.304 - type: ndcg_at_5 value: 33.661 - type: precision_at_1 value: 27.705000000000002 - type: precision_at_10 value: 5.942 - type: precision_at_100 value: 0.964 - type: precision_at_1000 value: 0.13 - type: precision_at_3 value: 13.868 - type: precision_at_5 value: 9.944 - type: recall_at_1 value: 23.726 - type: recall_at_10 value: 46.786 - type: recall_at_100 value: 70.072 - type: recall_at_1000 value: 88.2 - type: recall_at_3 value: 33.981 - type: recall_at_5 value: 39.893 - type: map_at_1 value: 23.344 - type: map_at_10 value: 31.636999999999997 - type: map_at_100 value: 33.065 - type: map_at_1000 value: 33.300000000000004 - type: map_at_3 value: 29.351 - type: map_at_5 value: 30.432 - type: mrr_at_1 value: 27.866000000000003 - type: mrr_at_10 value: 35.587 - type: mrr_at_100 value: 36.52 - type: mrr_at_1000 value: 36.597 - type: mrr_at_3 value: 33.696 - type: mrr_at_5 value: 34.713 - type: ndcg_at_1 value: 27.866000000000003 - type: ndcg_at_10 value: 36.61 - type: ndcg_at_100 value: 41.88 - type: ndcg_at_1000 value: 45.105000000000004 - type: ndcg_at_3 value: 33.038000000000004 - type: ndcg_at_5 value: 34.331 - type: precision_at_1 value: 27.866000000000003 - type: precision_at_10 value: 6.917 - type: precision_at_100 value: 1.3599999999999999 - type: precision_at_1000 value: 0.233 - type: precision_at_3 value: 15.547 - type: precision_at_5 value: 10.791 - type: recall_at_1 value: 23.344 - type: recall_at_10 value: 45.782000000000004 - type: recall_at_100 value: 69.503 - type: recall_at_1000 value: 90.742 - type: recall_at_3 value: 35.160000000000004 - type: recall_at_5 value: 39.058 - type: map_at_1 value: 20.776 - type: map_at_10 value: 27.285999999999998 - type: map_at_100 value: 28.235 - type: map_at_1000 value: 28.337 - type: map_at_3 value: 25.147000000000002 - type: map_at_5 value: 26.401999999999997 - type: mrr_at_1 value: 22.921 - type: mrr_at_10 value: 29.409999999999997 - type: mrr_at_100 value: 30.275000000000002 - type: mrr_at_1000 value: 30.354999999999997 - type: mrr_at_3 value: 27.418 - type: mrr_at_5 value: 28.592000000000002 - type: ndcg_at_1 value: 22.921 - type: ndcg_at_10 value: 31.239 - type: ndcg_at_100 value: 35.965 - type: ndcg_at_1000 value: 38.602 - type: ndcg_at_3 value: 27.174 - type: ndcg_at_5 value: 29.229 - type: precision_at_1 value: 22.921 - type: precision_at_10 value: 4.806 - type: precision_at_100 value: 0.776 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 11.459999999999999 - type: precision_at_5 value: 8.022 - type: recall_at_1 value: 20.776 - type: recall_at_10 value: 41.294 - type: recall_at_100 value: 63.111 - type: recall_at_1000 value: 82.88600000000001 - type: recall_at_3 value: 30.403000000000002 - type: recall_at_5 value: 35.455999999999996 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 9.376 - type: map_at_10 value: 15.926000000000002 - type: map_at_100 value: 17.585 - type: map_at_1000 value: 17.776 - type: map_at_3 value: 13.014000000000001 - type: map_at_5 value: 14.417 - type: mrr_at_1 value: 20.195 - type: mrr_at_10 value: 29.95 - type: mrr_at_100 value: 31.052000000000003 - type: mrr_at_1000 value: 31.108000000000004 - type: mrr_at_3 value: 26.667 - type: mrr_at_5 value: 28.458 - type: ndcg_at_1 value: 20.195 - type: ndcg_at_10 value: 22.871 - type: ndcg_at_100 value: 29.921999999999997 - type: ndcg_at_1000 value: 33.672999999999995 - type: ndcg_at_3 value: 17.782999999999998 - type: ndcg_at_5 value: 19.544 - type: precision_at_1 value: 20.195 - type: precision_at_10 value: 7.394 - type: precision_at_100 value: 1.493 - type: precision_at_1000 value: 0.218 - type: precision_at_3 value: 13.073 - type: precision_at_5 value: 10.436 - type: recall_at_1 value: 9.376 - type: recall_at_10 value: 28.544999999999998 - type: recall_at_100 value: 53.147999999999996 - type: recall_at_1000 value: 74.62 - type: recall_at_3 value: 16.464000000000002 - type: recall_at_5 value: 21.004 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.415000000000001 - type: map_at_10 value: 18.738 - type: map_at_100 value: 27.291999999999998 - type: map_at_1000 value: 28.992 - type: map_at_3 value: 13.196 - type: map_at_5 value: 15.539 - type: mrr_at_1 value: 66.5 - type: mrr_at_10 value: 74.518 - type: mrr_at_100 value: 74.86 - type: mrr_at_1000 value: 74.87 - type: mrr_at_3 value: 72.375 - type: mrr_at_5 value: 73.86200000000001 - type: ndcg_at_1 value: 54.37499999999999 - type: ndcg_at_10 value: 41.317 - type: ndcg_at_100 value: 45.845 - type: ndcg_at_1000 value: 52.92 - type: ndcg_at_3 value: 44.983000000000004 - type: ndcg_at_5 value: 42.989 - type: precision_at_1 value: 66.5 - type: precision_at_10 value: 33.6 - type: precision_at_100 value: 10.972999999999999 - type: precision_at_1000 value: 2.214 - type: precision_at_3 value: 48.583 - type: precision_at_5 value: 42.15 - type: recall_at_1 value: 8.415000000000001 - type: recall_at_10 value: 24.953 - type: recall_at_100 value: 52.48199999999999 - type: recall_at_1000 value: 75.093 - type: recall_at_3 value: 14.341000000000001 - type: recall_at_5 value: 18.468 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 47.06499999999999 - type: f1 value: 41.439327599975385 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 66.02 - type: map_at_10 value: 76.68599999999999 - type: map_at_100 value: 76.959 - type: map_at_1000 value: 76.972 - type: map_at_3 value: 75.024 - type: map_at_5 value: 76.153 - type: mrr_at_1 value: 71.197 - type: mrr_at_10 value: 81.105 - type: mrr_at_100 value: 81.232 - type: mrr_at_1000 value: 81.233 - type: mrr_at_3 value: 79.758 - type: mrr_at_5 value: 80.69 - type: ndcg_at_1 value: 71.197 - type: ndcg_at_10 value: 81.644 - type: ndcg_at_100 value: 82.645 - type: ndcg_at_1000 value: 82.879 - type: ndcg_at_3 value: 78.792 - type: ndcg_at_5 value: 80.528 - type: precision_at_1 value: 71.197 - type: precision_at_10 value: 10.206999999999999 - type: precision_at_100 value: 1.093 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 30.868000000000002 - type: precision_at_5 value: 19.559 - type: recall_at_1 value: 66.02 - type: recall_at_10 value: 92.50699999999999 - type: recall_at_100 value: 96.497 - type: recall_at_1000 value: 97.956 - type: recall_at_3 value: 84.866 - type: recall_at_5 value: 89.16199999999999 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 17.948 - type: map_at_10 value: 29.833 - type: map_at_100 value: 31.487 - type: map_at_1000 value: 31.674000000000003 - type: map_at_3 value: 26.029999999999998 - type: map_at_5 value: 28.038999999999998 - type: mrr_at_1 value: 34.721999999999994 - type: mrr_at_10 value: 44.214999999999996 - type: mrr_at_100 value: 44.994 - type: mrr_at_1000 value: 45.051 - type: mrr_at_3 value: 41.667 - type: mrr_at_5 value: 43.032 - type: ndcg_at_1 value: 34.721999999999994 - type: ndcg_at_10 value: 37.434 - type: ndcg_at_100 value: 43.702000000000005 - type: ndcg_at_1000 value: 46.993 - type: ndcg_at_3 value: 33.56 - type: ndcg_at_5 value: 34.687 - type: precision_at_1 value: 34.721999999999994 - type: precision_at_10 value: 10.401 - type: precision_at_100 value: 1.7049999999999998 - type: precision_at_1000 value: 0.22799999999999998 - type: precision_at_3 value: 22.531000000000002 - type: precision_at_5 value: 16.42 - type: recall_at_1 value: 17.948 - type: recall_at_10 value: 45.062999999999995 - type: recall_at_100 value: 68.191 - type: recall_at_1000 value: 87.954 - type: recall_at_3 value: 31.112000000000002 - type: recall_at_5 value: 36.823 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 36.644 - type: map_at_10 value: 57.658 - type: map_at_100 value: 58.562000000000005 - type: map_at_1000 value: 58.62500000000001 - type: map_at_3 value: 54.022999999999996 - type: map_at_5 value: 56.293000000000006 - type: mrr_at_1 value: 73.288 - type: mrr_at_10 value: 80.51700000000001 - type: mrr_at_100 value: 80.72 - type: mrr_at_1000 value: 80.728 - type: mrr_at_3 value: 79.33200000000001 - type: mrr_at_5 value: 80.085 - type: ndcg_at_1 value: 73.288 - type: ndcg_at_10 value: 66.61 - type: ndcg_at_100 value: 69.723 - type: ndcg_at_1000 value: 70.96000000000001 - type: ndcg_at_3 value: 61.358999999999995 - type: ndcg_at_5 value: 64.277 - type: precision_at_1 value: 73.288 - type: precision_at_10 value: 14.17 - type: precision_at_100 value: 1.659 - type: precision_at_1000 value: 0.182 - type: precision_at_3 value: 39.487 - type: precision_at_5 value: 25.999 - type: recall_at_1 value: 36.644 - type: recall_at_10 value: 70.851 - type: recall_at_100 value: 82.94399999999999 - type: recall_at_1000 value: 91.134 - type: recall_at_3 value: 59.230000000000004 - type: recall_at_5 value: 64.997 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 86.00280000000001 - type: ap value: 80.46302061021223 - type: f1 value: 85.9592921596419 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 22.541 - type: map_at_10 value: 34.625 - type: map_at_100 value: 35.785 - type: map_at_1000 value: 35.831 - type: map_at_3 value: 30.823 - type: map_at_5 value: 32.967999999999996 - type: mrr_at_1 value: 23.180999999999997 - type: mrr_at_10 value: 35.207 - type: mrr_at_100 value: 36.315 - type: mrr_at_1000 value: 36.355 - type: mrr_at_3 value: 31.483 - type: mrr_at_5 value: 33.589999999999996 - type: ndcg_at_1 value: 23.195 - type: ndcg_at_10 value: 41.461 - type: ndcg_at_100 value: 47.032000000000004 - type: ndcg_at_1000 value: 48.199999999999996 - type: ndcg_at_3 value: 33.702 - type: ndcg_at_5 value: 37.522 - type: precision_at_1 value: 23.195 - type: precision_at_10 value: 6.526999999999999 - type: precision_at_100 value: 0.932 - type: precision_at_1000 value: 0.10300000000000001 - type: precision_at_3 value: 14.308000000000002 - type: precision_at_5 value: 10.507 - type: recall_at_1 value: 22.541 - type: recall_at_10 value: 62.524 - type: recall_at_100 value: 88.228 - type: recall_at_1000 value: 97.243 - type: recall_at_3 value: 41.38 - type: recall_at_5 value: 50.55 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 92.69949840401279 - type: f1 value: 92.54141471311786 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 72.56041951664386 - type: f1 value: 55.88499977508287 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 71.62071284465365 - type: f1 value: 69.36717546572152 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 76.35843981170142 - type: f1 value: 76.15496453538884 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 31.33664956793118 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 27.883839621715524 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.096874986740758 - type: mrr value: 30.97300481932132 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.4 - type: map_at_10 value: 11.852 - type: map_at_100 value: 14.758 - type: map_at_1000 value: 16.134 - type: map_at_3 value: 8.558 - type: map_at_5 value: 10.087 - type: mrr_at_1 value: 44.272 - type: mrr_at_10 value: 52.05800000000001 - type: mrr_at_100 value: 52.689 - type: mrr_at_1000 value: 52.742999999999995 - type: mrr_at_3 value: 50.205999999999996 - type: mrr_at_5 value: 51.367 - type: ndcg_at_1 value: 42.57 - type: ndcg_at_10 value: 32.449 - type: ndcg_at_100 value: 29.596 - type: ndcg_at_1000 value: 38.351 - type: ndcg_at_3 value: 37.044 - type: ndcg_at_5 value: 35.275 - type: precision_at_1 value: 44.272 - type: precision_at_10 value: 23.87 - type: precision_at_100 value: 7.625 - type: precision_at_1000 value: 2.045 - type: precision_at_3 value: 34.365 - type: precision_at_5 value: 30.341 - type: recall_at_1 value: 5.4 - type: recall_at_10 value: 15.943999999999999 - type: recall_at_100 value: 29.805 - type: recall_at_1000 value: 61.695 - type: recall_at_3 value: 9.539 - type: recall_at_5 value: 12.127 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 36.047000000000004 - type: map_at_10 value: 51.6 - type: map_at_100 value: 52.449999999999996 - type: map_at_1000 value: 52.476 - type: map_at_3 value: 47.452 - type: map_at_5 value: 49.964 - type: mrr_at_1 value: 40.382 - type: mrr_at_10 value: 54.273 - type: mrr_at_100 value: 54.859 - type: mrr_at_1000 value: 54.876000000000005 - type: mrr_at_3 value: 51.014 - type: mrr_at_5 value: 52.983999999999995 - type: ndcg_at_1 value: 40.353 - type: ndcg_at_10 value: 59.11300000000001 - type: ndcg_at_100 value: 62.604000000000006 - type: ndcg_at_1000 value: 63.187000000000005 - type: ndcg_at_3 value: 51.513 - type: ndcg_at_5 value: 55.576 - type: precision_at_1 value: 40.353 - type: precision_at_10 value: 9.418 - type: precision_at_100 value: 1.1440000000000001 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 23.078000000000003 - type: precision_at_5 value: 16.250999999999998 - type: recall_at_1 value: 36.047000000000004 - type: recall_at_10 value: 79.22200000000001 - type: recall_at_100 value: 94.23 - type: recall_at_1000 value: 98.51100000000001 - type: recall_at_3 value: 59.678 - type: recall_at_5 value: 68.967 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 68.232 - type: map_at_10 value: 81.674 - type: map_at_100 value: 82.338 - type: map_at_1000 value: 82.36099999999999 - type: map_at_3 value: 78.833 - type: map_at_5 value: 80.58 - type: mrr_at_1 value: 78.64 - type: mrr_at_10 value: 85.164 - type: mrr_at_100 value: 85.317 - type: mrr_at_1000 value: 85.319 - type: mrr_at_3 value: 84.127 - type: mrr_at_5 value: 84.789 - type: ndcg_at_1 value: 78.63 - type: ndcg_at_10 value: 85.711 - type: ndcg_at_100 value: 87.238 - type: ndcg_at_1000 value: 87.444 - type: ndcg_at_3 value: 82.788 - type: ndcg_at_5 value: 84.313 - type: precision_at_1 value: 78.63 - type: precision_at_10 value: 12.977 - type: precision_at_100 value: 1.503 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 36.113 - type: precision_at_5 value: 23.71 - type: recall_at_1 value: 68.232 - type: recall_at_10 value: 93.30199999999999 - type: recall_at_100 value: 98.799 - type: recall_at_1000 value: 99.885 - type: recall_at_3 value: 84.827 - type: recall_at_5 value: 89.188 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 45.71879170816294 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 59.65866311751794 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.218 - type: map_at_10 value: 10.337 - type: map_at_100 value: 12.131 - type: map_at_1000 value: 12.411 - type: map_at_3 value: 7.4270000000000005 - type: map_at_5 value: 8.913 - type: mrr_at_1 value: 20.8 - type: mrr_at_10 value: 30.868000000000002 - type: mrr_at_100 value: 31.903 - type: mrr_at_1000 value: 31.972 - type: mrr_at_3 value: 27.367 - type: mrr_at_5 value: 29.372 - type: ndcg_at_1 value: 20.8 - type: ndcg_at_10 value: 17.765 - type: ndcg_at_100 value: 24.914 - type: ndcg_at_1000 value: 30.206 - type: ndcg_at_3 value: 16.64 - type: ndcg_at_5 value: 14.712 - type: precision_at_1 value: 20.8 - type: precision_at_10 value: 9.24 - type: precision_at_100 value: 1.9560000000000002 - type: precision_at_1000 value: 0.32299999999999995 - type: precision_at_3 value: 15.467 - type: precision_at_5 value: 12.94 - type: recall_at_1 value: 4.218 - type: recall_at_10 value: 18.752 - type: recall_at_100 value: 39.7 - type: recall_at_1000 value: 65.57300000000001 - type: recall_at_3 value: 9.428 - type: recall_at_5 value: 13.133000000000001 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 83.04338850207233 - type: cos_sim_spearman value: 78.5054651430423 - type: euclidean_pearson value: 80.30739451228612 - type: euclidean_spearman value: 78.48377464299097 - type: manhattan_pearson value: 80.40795049052781 - type: manhattan_spearman value: 78.49506205443114 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.11596224442962 - type: cos_sim_spearman value: 76.20997388935461 - type: euclidean_pearson value: 80.56858451349109 - type: euclidean_spearman value: 75.92659183871186 - type: manhattan_pearson value: 80.60246102203844 - type: manhattan_spearman value: 76.03018971432664 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 81.34691640755737 - type: cos_sim_spearman value: 82.4018369631579 - type: euclidean_pearson value: 81.87673092245366 - type: euclidean_spearman value: 82.3671489960678 - type: manhattan_pearson value: 81.88222387719948 - type: manhattan_spearman value: 82.3816590344736 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 81.2836092579524 - type: cos_sim_spearman value: 78.99982781772064 - type: euclidean_pearson value: 80.5184271010527 - type: euclidean_spearman value: 78.89777392101904 - type: manhattan_pearson value: 80.53585705018664 - type: manhattan_spearman value: 78.92898405472994 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 86.7349907750784 - type: cos_sim_spearman value: 87.7611234446225 - type: euclidean_pearson value: 86.98759326731624 - type: euclidean_spearman value: 87.58321319424618 - type: manhattan_pearson value: 87.03483090370842 - type: manhattan_spearman value: 87.63278333060288 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 81.75873694924825 - type: cos_sim_spearman value: 83.80237999094724 - type: euclidean_pearson value: 83.55023725861537 - type: euclidean_spearman value: 84.12744338577744 - type: manhattan_pearson value: 83.58816983036232 - type: manhattan_spearman value: 84.18520748676501 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.21630882940174 - type: cos_sim_spearman value: 87.72382883437031 - type: euclidean_pearson value: 88.69933350930333 - type: euclidean_spearman value: 88.24660814383081 - type: manhattan_pearson value: 88.77331018833499 - type: manhattan_spearman value: 88.26109989380632 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 61.11854063060489 - type: cos_sim_spearman value: 63.14678634195072 - type: euclidean_pearson value: 61.679090067000864 - type: euclidean_spearman value: 62.28876589509653 - type: manhattan_pearson value: 62.082324165511004 - type: manhattan_spearman value: 62.56030932816679 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.00319882832645 - type: cos_sim_spearman value: 85.94529772647257 - type: euclidean_pearson value: 85.6661390122756 - type: euclidean_spearman value: 85.97747815545827 - type: manhattan_pearson value: 85.58422770541893 - type: manhattan_spearman value: 85.9237139181532 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.16198731863916 - type: mrr value: 94.25202702163487 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 54.761 - type: map_at_10 value: 64.396 - type: map_at_100 value: 65.07 - type: map_at_1000 value: 65.09899999999999 - type: map_at_3 value: 61.846000000000004 - type: map_at_5 value: 63.284 - type: mrr_at_1 value: 57.667 - type: mrr_at_10 value: 65.83099999999999 - type: mrr_at_100 value: 66.36800000000001 - type: mrr_at_1000 value: 66.39399999999999 - type: mrr_at_3 value: 64.056 - type: mrr_at_5 value: 65.206 - type: ndcg_at_1 value: 57.667 - type: ndcg_at_10 value: 68.854 - type: ndcg_at_100 value: 71.59100000000001 - type: ndcg_at_1000 value: 72.383 - type: ndcg_at_3 value: 64.671 - type: ndcg_at_5 value: 66.796 - type: precision_at_1 value: 57.667 - type: precision_at_10 value: 9.167 - type: precision_at_100 value: 1.053 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 25.444 - type: precision_at_5 value: 16.667 - type: recall_at_1 value: 54.761 - type: recall_at_10 value: 80.9 - type: recall_at_100 value: 92.767 - type: recall_at_1000 value: 99 - type: recall_at_3 value: 69.672 - type: recall_at_5 value: 75.083 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.8079207920792 - type: cos_sim_ap value: 94.88470927617445 - type: cos_sim_f1 value: 90.08179959100204 - type: cos_sim_precision value: 92.15481171548117 - type: cos_sim_recall value: 88.1 - type: dot_accuracy value: 99.58613861386138 - type: dot_ap value: 82.94822578881316 - type: dot_f1 value: 77.33333333333333 - type: dot_precision value: 79.36842105263158 - type: dot_recall value: 75.4 - type: euclidean_accuracy value: 99.8069306930693 - type: euclidean_ap value: 94.81367858031837 - type: euclidean_f1 value: 90.01009081735621 - type: euclidean_precision value: 90.83503054989816 - type: euclidean_recall value: 89.2 - type: manhattan_accuracy value: 99.81188118811882 - type: manhattan_ap value: 94.91405337220161 - type: manhattan_f1 value: 90.2763561924258 - type: manhattan_precision value: 92.45283018867924 - type: manhattan_recall value: 88.2 - type: max_accuracy value: 99.81188118811882 - type: max_ap value: 94.91405337220161 - type: max_f1 value: 90.2763561924258 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 58.511599500053094 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 31.984728147814707 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 49.93428193939015 - type: mrr value: 50.916557911043206 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.562500894537145 - type: cos_sim_spearman value: 31.162587976726307 - type: dot_pearson value: 22.633662187735762 - type: dot_spearman value: 22.723000282378962 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.219 - type: map_at_10 value: 1.871 - type: map_at_100 value: 10.487 - type: map_at_1000 value: 25.122 - type: map_at_3 value: 0.657 - type: map_at_5 value: 1.0699999999999998 - type: mrr_at_1 value: 84 - type: mrr_at_10 value: 89.567 - type: mrr_at_100 value: 89.748 - type: mrr_at_1000 value: 89.748 - type: mrr_at_3 value: 88.667 - type: mrr_at_5 value: 89.567 - type: ndcg_at_1 value: 80 - type: ndcg_at_10 value: 74.533 - type: ndcg_at_100 value: 55.839000000000006 - type: ndcg_at_1000 value: 49.748 - type: ndcg_at_3 value: 79.53099999999999 - type: ndcg_at_5 value: 78.245 - type: precision_at_1 value: 84 - type: precision_at_10 value: 78.4 - type: precision_at_100 value: 56.99999999999999 - type: precision_at_1000 value: 21.98 - type: precision_at_3 value: 85.333 - type: precision_at_5 value: 84.8 - type: recall_at_1 value: 0.219 - type: recall_at_10 value: 2.02 - type: recall_at_100 value: 13.555 - type: recall_at_1000 value: 46.739999999999995 - type: recall_at_3 value: 0.685 - type: recall_at_5 value: 1.13 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 3.5029999999999997 - type: map_at_10 value: 11.042 - type: map_at_100 value: 16.326999999999998 - type: map_at_1000 value: 17.836 - type: map_at_3 value: 6.174 - type: map_at_5 value: 7.979 - type: mrr_at_1 value: 42.857 - type: mrr_at_10 value: 52.617000000000004 - type: mrr_at_100 value: 53.351000000000006 - type: mrr_at_1000 value: 53.351000000000006 - type: mrr_at_3 value: 46.939 - type: mrr_at_5 value: 50.714000000000006 - type: ndcg_at_1 value: 38.775999999999996 - type: ndcg_at_10 value: 27.125 - type: ndcg_at_100 value: 35.845 - type: ndcg_at_1000 value: 47.377 - type: ndcg_at_3 value: 29.633 - type: ndcg_at_5 value: 28.378999999999998 - type: precision_at_1 value: 42.857 - type: precision_at_10 value: 24.082 - type: precision_at_100 value: 6.877999999999999 - type: precision_at_1000 value: 1.463 - type: precision_at_3 value: 29.932 - type: precision_at_5 value: 28.571 - type: recall_at_1 value: 3.5029999999999997 - type: recall_at_10 value: 17.068 - type: recall_at_100 value: 43.361 - type: recall_at_1000 value: 78.835 - type: recall_at_3 value: 6.821000000000001 - type: recall_at_5 value: 10.357 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.0954 - type: ap value: 14.216844153511959 - type: f1 value: 54.63687418565117 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 61.46293152235427 - type: f1 value: 61.744177921638645 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 41.12708617788644 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 85.75430649102938 - type: cos_sim_ap value: 73.34252536948081 - type: cos_sim_f1 value: 67.53758935173774 - type: cos_sim_precision value: 63.3672525439408 - type: cos_sim_recall value: 72.29551451187335 - type: dot_accuracy value: 81.71305954580676 - type: dot_ap value: 59.5532209082386 - type: dot_f1 value: 56.18466898954705 - type: dot_precision value: 47.830923248053395 - type: dot_recall value: 68.07387862796834 - type: euclidean_accuracy value: 85.81987244441795 - type: euclidean_ap value: 73.34325409809446 - type: euclidean_f1 value: 67.83451360417443 - type: euclidean_precision value: 64.09955388588871 - type: euclidean_recall value: 72.0316622691293 - type: manhattan_accuracy value: 85.68277999642368 - type: manhattan_ap value: 73.1535450121903 - type: manhattan_f1 value: 67.928237896289 - type: manhattan_precision value: 63.56945722171113 - type: manhattan_recall value: 72.9287598944591 - type: max_accuracy value: 85.81987244441795 - type: max_ap value: 73.34325409809446 - type: max_f1 value: 67.928237896289 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.90441262079403 - type: cos_sim_ap value: 85.79331880741438 - type: cos_sim_f1 value: 78.31563529842548 - type: cos_sim_precision value: 74.6683424102779 - type: cos_sim_recall value: 82.33754234678165 - type: dot_accuracy value: 84.89928978926534 - type: dot_ap value: 75.25819218316 - type: dot_f1 value: 69.88730119720536 - type: dot_precision value: 64.23362374959665 - type: dot_recall value: 76.63227594702803 - type: euclidean_accuracy value: 89.01695967710637 - type: euclidean_ap value: 85.98986606038852 - type: euclidean_f1 value: 78.5277880014722 - type: euclidean_precision value: 75.22211253701876 - type: euclidean_recall value: 82.13735756082538 - type: manhattan_accuracy value: 88.99561454573679 - type: manhattan_ap value: 85.92262421793953 - type: manhattan_f1 value: 78.38866094740769 - type: manhattan_precision value: 76.02373028505282 - type: manhattan_recall value: 80.9054511857099 - type: max_accuracy value: 89.01695967710637 - type: max_ap value: 85.98986606038852 - type: max_f1 value: 78.5277880014722 --- # ggml-org/e5-small-v2-Q8_0-GGUF This model was converted to GGUF format from [`intfloat/e5-small-v2`](https://huggingface.co/intfloat/e5-small-v2) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/intfloat/e5-small-v2) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo ggml-org/e5-small-v2-Q8_0-GGUF --hf-file e5-small-v2-q8_0.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo ggml-org/e5-small-v2-Q8_0-GGUF --hf-file e5-small-v2-q8_0.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo ggml-org/e5-small-v2-Q8_0-GGUF --hf-file e5-small-v2-q8_0.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo ggml-org/e5-small-v2-Q8_0-GGUF --hf-file e5-small-v2-q8_0.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
asjoberg/openELM-450M-instruct-raw
asjoberg
null
[ "safetensors", "openelm", "custom_code", "arxiv:2404.14619", "license:other", "region:us" ]
2025-02-10T22:08:47Z
2025-02-10T22:10:11+00:00
24
0
--- license: other license_name: apple-sample-code-license license_link: LICENSE --- # OpenELM *Sachin Mehta, Mohammad Hossein Sekhavat, Qingqing Cao, Maxwell Horton, Yanzi Jin, Chenfan Sun, Iman Mirzadeh, Mahyar Najibi, Dmitry Belenko, Peter Zatloukal, Mohammad Rastegari* We introduce **OpenELM**, a family of **Open** **E**fficient **L**anguage **M**odels. OpenELM uses a layer-wise scaling strategy to efficiently allocate parameters within each layer of the transformer model, leading to enhanced accuracy. We pretrained OpenELM models using the [CoreNet](https://github.com/apple/corenet) library. We release both pretrained and instruction tuned models with 270M, 450M, 1.1B and 3B parameters. We release the complete framework, encompassing data preparation, training, fine-tuning, and evaluation procedures, alongside multiple pre-trained checkpoints and training logs, to facilitate open research. Our pre-training dataset contains RefinedWeb, deduplicated PILE, a subset of RedPajama, and a subset of Dolma v1.6, totaling approximately 1.8 trillion tokens. Please check license agreements and terms of these datasets before using them. ## Usage We have provided an example function to generate output from OpenELM models loaded via [HuggingFace Hub](https://huggingface.co/docs/hub/) in `generate_openelm.py`. You can try the model by running the following command: ``` python generate_openelm.py --model apple/OpenELM-450M-Instruct --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 ``` Please refer to [this link](https://huggingface.co/docs/hub/security-tokens) to obtain your hugging face access token. Additional arguments to the hugging face generate function can be passed via `generate_kwargs`. As an example, to speedup the inference, you can try [lookup token speculative generation](https://huggingface.co/docs/transformers/generation_strategies) by passing the `prompt_lookup_num_tokens` argument as follows: ``` python generate_openelm.py --model apple/OpenELM-450M-Instruct --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 prompt_lookup_num_tokens=10 ``` Alternatively, try model-wise speculative generation with an [assistive model](https://huggingface.co/blog/assisted-generation) by passing a smaller model through the `assistant_model` argument, for example: ``` python generate_openelm.py --model apple/OpenELM-450M-Instruct --hf_access_token [HF_ACCESS_TOKEN] --prompt 'Once upon a time there was' --generate_kwargs repetition_penalty=1.2 --assistant_model [SMALLER_MODEL] ``` ## Main Results ### Zero-Shot | **Model Size** | **ARC-c** | **ARC-e** | **BoolQ** | **HellaSwag** | **PIQA** | **SciQ** | **WinoGrande** | **Average** | |-----------------------------------------------------------------------------|-----------|-----------|-----------|---------------|-----------|-----------|----------------|-------------| | [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 26.45 | 45.08 | **53.98** | 46.71 | 69.75 | **84.70** | **53.91** | 54.37 | | [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **30.55** | **46.68** | 48.56 | **52.07** | **70.78** | 84.40 | 52.72 | **55.11** | | [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 27.56 | 48.06 | 55.78 | 53.97 | 72.31 | 87.20 | 58.01 | 57.56 | | [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **30.38** | **50.00** | **60.37** | **59.34** | **72.63** | **88.00** | **58.96** | **59.95** | | [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 32.34 | **55.43** | 63.58 | 64.81 | **75.57** | **90.60** | 61.72 | 63.44 | | [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **37.97** | 52.23 | **70.00** | **71.20** | 75.03 | 89.30 | **62.75** | **65.50** | | [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 35.58 | 59.89 | 67.40 | 72.44 | 78.24 | **92.70** | 65.51 | 67.39 | | [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **39.42** | **61.74** | **68.17** | **76.36** | **79.00** | 92.50 | **66.85** | **69.15** | ### LLM360 | **Model Size** | **ARC-c** | **HellaSwag** | **MMLU** | **TruthfulQA** | **WinoGrande** | **Average** | |-----------------------------------------------------------------------------|-----------|---------------|-----------|----------------|----------------|-------------| | [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 27.65 | 47.15 | 25.72 | **39.24** | **53.83** | 38.72 | | [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **32.51** | **51.58** | **26.70** | 38.72 | 53.20 | **40.54** | | [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 30.20 | 53.86 | **26.01** | 40.18 | 57.22 | 41.50 | | [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **33.53** | **59.31** | 25.41 | **40.48** | **58.33** | **43.41** | | [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 36.69 | 65.71 | **27.05** | 36.98 | 63.22 | 45.93 | | [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **41.55** | **71.83** | 25.65 | **45.95** | **64.72** | **49.94** | | [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 42.24 | 73.28 | **26.76** | 34.98 | 67.25 | 48.90 | | [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **47.70** | **76.87** | 24.80 | **38.76** | **67.96** | **51.22** | ### OpenLLM Leaderboard | **Model Size** | **ARC-c** | **CrowS-Pairs** | **HellaSwag** | **MMLU** | **PIQA** | **RACE** | **TruthfulQA** | **WinoGrande** | **Average** | |-----------------------------------------------------------------------------|-----------|-----------------|---------------|-----------|-----------|-----------|----------------|----------------|-------------| | [OpenELM-270M](https://huggingface.co/apple/OpenELM-270M) | 27.65 | **66.79** | 47.15 | 25.72 | 69.75 | 30.91 | **39.24** | **53.83** | 45.13 | | [OpenELM-270M-Instruct](https://huggingface.co/apple/OpenELM-270M-Instruct) | **32.51** | 66.01 | **51.58** | **26.70** | **70.78** | 33.78 | 38.72 | 53.20 | **46.66** | | [OpenELM-450M](https://huggingface.co/apple/OpenELM-450M) | 30.20 | **68.63** | 53.86 | **26.01** | 72.31 | 33.11 | 40.18 | 57.22 | 47.69 | | [OpenELM-450M-Instruct](https://huggingface.co/apple/OpenELM-450M-Instruct) | **33.53** | 67.44 | **59.31** | 25.41 | **72.63** | **36.84** | **40.48** | **58.33** | **49.25** | | [OpenELM-1_1B](https://huggingface.co/apple/OpenELM-1_1B) | 36.69 | **71.74** | 65.71 | **27.05** | **75.57** | 36.46 | 36.98 | 63.22 | 51.68 | | [OpenELM-1_1B-Instruct](https://huggingface.co/apple/OpenELM-1_1B-Instruct) | **41.55** | 71.02 | **71.83** | 25.65 | 75.03 | **39.43** | **45.95** | **64.72** | **54.40** | | [OpenELM-3B](https://huggingface.co/apple/OpenELM-3B) | 42.24 | **73.29** | 73.28 | **26.76** | 78.24 | **38.76** | 34.98 | 67.25 | 54.35 | | [OpenELM-3B-Instruct](https://huggingface.co/apple/OpenELM-3B-Instruct) | **47.70** | 72.33 | **76.87** | 24.80 | **79.00** | 38.47 | **38.76** | **67.96** | **55.73** | See the technical report for more results and comparison. ## Evaluation ### Setup Install the following dependencies: ```bash # install public lm-eval-harness harness_repo="public-lm-eval-harness" git clone https://github.com/EleutherAI/lm-evaluation-harness ${harness_repo} cd ${harness_repo} # use main branch on 03-15-2024, SHA is dc90fec git checkout dc90fec pip install -e . cd .. # 66d6242 is the main branch on 2024-04-01 pip install datasets@git+https://github.com/huggingface/datasets.git@66d6242 pip install tokenizers>=0.15.2 transformers>=4.38.2 sentencepiece>=0.2.0 ``` ### Evaluate OpenELM ```bash # OpenELM-450M-Instruct hf_model=apple/OpenELM-450M-Instruct # this flag is needed because lm-eval-harness set add_bos_token to False by default, but OpenELM uses LLaMA tokenizer which requires add_bos_token to be True tokenizer=meta-llama/Llama-2-7b-hf add_bos_token=True batch_size=1 mkdir lm_eval_output shot=0 task=arc_challenge,arc_easy,boolq,hellaswag,piqa,race,winogrande,sciq,truthfulqa_mc2 lm_eval --model hf \ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \ --tasks ${task} \ --device cuda:0 \ --num_fewshot ${shot} \ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log shot=5 task=mmlu,winogrande lm_eval --model hf \ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \ --tasks ${task} \ --device cuda:0 \ --num_fewshot ${shot} \ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log shot=25 task=arc_challenge,crows_pairs_english lm_eval --model hf \ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \ --tasks ${task} \ --device cuda:0 \ --num_fewshot ${shot} \ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log shot=10 task=hellaswag lm_eval --model hf \ --model_args pretrained=${hf_model},trust_remote_code=True,add_bos_token=${add_bos_token},tokenizer=${tokenizer} \ --tasks ${task} \ --device cuda:0 \ --num_fewshot ${shot} \ --output_path ./lm_eval_output/${hf_model//\//_}_${task//,/_}-${shot}shot \ --batch_size ${batch_size} 2>&1 | tee ./lm_eval_output/eval-${hf_model//\//_}_${task//,/_}-${shot}shot.log ``` ## Bias, Risks, and Limitations The release of OpenELM models aims to empower and enrich the open research community by providing access to state-of-the-art language models. Trained on publicly available datasets, these models are made available without any safety guarantees. Consequently, there exists the possibility of these models producing outputs that are inaccurate, harmful, biased, or objectionable in response to user prompts. Thus, it is imperative for users and developers to undertake thorough safety testing and implement appropriate filtering mechanisms tailored to their specific requirements. ## Citation If you find our work useful, please cite: ```BibTex @article{mehtaOpenELMEfficientLanguage2024, title = {{OpenELM}: {An} {Efficient} {Language} {Model} {Family} with {Open} {Training} and {Inference} {Framework}}, shorttitle = {{OpenELM}}, url = {https://arxiv.org/abs/2404.14619v1}, language = {en}, urldate = {2024-04-24}, journal = {arXiv.org}, author = {Mehta, Sachin and Sekhavat, Mohammad Hossein and Cao, Qingqing and Horton, Maxwell and Jin, Yanzi and Sun, Chenfan and Mirzadeh, Iman and Najibi, Mahyar and Belenko, Dmitry and Zatloukal, Peter and Rastegari, Mohammad}, month = apr, year = {2024}, } @inproceedings{mehta2022cvnets, author = {Mehta, Sachin and Abdolhosseini, Farzad and Rastegari, Mohammad}, title = {CVNets: High Performance Library for Computer Vision}, year = {2022}, booktitle = {Proceedings of the 30th ACM International Conference on Multimedia}, series = {MM '22} } ```
[ "SCIQ" ]
Teradata/bge-large-en-v1.5
Teradata
feature-extraction
[ "onnx", "bert", "feature-extraction", "sentence-similarity", "mteb", "teradata", "en", "license:mit", "model-index", "region:us" ]
2025-02-12T16:10:46Z
2025-03-04T09:39:05+00:00
24
0
--- language: - en license: mit tags: - feature-extraction - sentence-similarity - mteb - onnx - teradata model-index: - name: bge-large-en-v1.5 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.8507462686567 - type: ap value: 38.566457320228245 - type: f1 value: 69.69386648043475 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 92.416675 - type: ap value: 89.1928861155922 - type: f1 value: 92.39477019574215 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 48.175999999999995 - type: f1 value: 47.80712792870253 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 40.184999999999995 - type: map_at_10 value: 55.654 - type: map_at_100 value: 56.25 - type: map_at_1000 value: 56.255 - type: map_at_3 value: 51.742999999999995 - type: map_at_5 value: 54.129000000000005 - type: mrr_at_1 value: 40.967 - type: mrr_at_10 value: 55.96 - type: mrr_at_100 value: 56.54900000000001 - type: mrr_at_1000 value: 56.554 - type: mrr_at_3 value: 51.980000000000004 - type: mrr_at_5 value: 54.44 - type: ndcg_at_1 value: 40.184999999999995 - type: ndcg_at_10 value: 63.542 - type: ndcg_at_100 value: 65.96499999999999 - type: ndcg_at_1000 value: 66.08699999999999 - type: ndcg_at_3 value: 55.582 - type: ndcg_at_5 value: 59.855000000000004 - type: precision_at_1 value: 40.184999999999995 - type: precision_at_10 value: 8.841000000000001 - type: precision_at_100 value: 0.987 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.238 - type: precision_at_5 value: 15.405 - type: recall_at_1 value: 40.184999999999995 - type: recall_at_10 value: 88.407 - type: recall_at_100 value: 98.72 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 66.714 - type: recall_at_5 value: 77.027 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 48.567077926750066 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 43.19453389182364 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 64.46555939623092 - type: mrr value: 77.82361605768807 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 84.9554128814735 - type: cos_sim_spearman value: 84.65373612172036 - type: euclidean_pearson value: 83.2905059954138 - type: euclidean_spearman value: 84.52240782811128 - type: manhattan_pearson value: 82.99533802997436 - type: manhattan_spearman value: 84.20673798475734 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.78896103896103 - type: f1 value: 87.77189310964883 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 39.714538337650495 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 36.90108349284447 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.795 - type: map_at_10 value: 43.669000000000004 - type: map_at_100 value: 45.151 - type: map_at_1000 value: 45.278 - type: map_at_3 value: 40.006 - type: map_at_5 value: 42.059999999999995 - type: mrr_at_1 value: 39.771 - type: mrr_at_10 value: 49.826 - type: mrr_at_100 value: 50.504000000000005 - type: mrr_at_1000 value: 50.549 - type: mrr_at_3 value: 47.115 - type: mrr_at_5 value: 48.832 - type: ndcg_at_1 value: 39.771 - type: ndcg_at_10 value: 50.217999999999996 - type: ndcg_at_100 value: 55.454 - type: ndcg_at_1000 value: 57.37 - type: ndcg_at_3 value: 44.885000000000005 - type: ndcg_at_5 value: 47.419 - type: precision_at_1 value: 39.771 - type: precision_at_10 value: 9.642000000000001 - type: precision_at_100 value: 1.538 - type: precision_at_1000 value: 0.198 - type: precision_at_3 value: 21.268 - type: precision_at_5 value: 15.536 - type: recall_at_1 value: 32.795 - type: recall_at_10 value: 62.580999999999996 - type: recall_at_100 value: 84.438 - type: recall_at_1000 value: 96.492 - type: recall_at_3 value: 47.071000000000005 - type: recall_at_5 value: 54.079 - type: map_at_1 value: 32.671 - type: map_at_10 value: 43.334 - type: map_at_100 value: 44.566 - type: map_at_1000 value: 44.702999999999996 - type: map_at_3 value: 40.343 - type: map_at_5 value: 41.983 - type: mrr_at_1 value: 40.764 - type: mrr_at_10 value: 49.382 - type: mrr_at_100 value: 49.988 - type: mrr_at_1000 value: 50.03300000000001 - type: mrr_at_3 value: 47.293 - type: mrr_at_5 value: 48.51 - type: ndcg_at_1 value: 40.764 - type: ndcg_at_10 value: 49.039 - type: ndcg_at_100 value: 53.259 - type: ndcg_at_1000 value: 55.253 - type: ndcg_at_3 value: 45.091 - type: ndcg_at_5 value: 46.839999999999996 - type: precision_at_1 value: 40.764 - type: precision_at_10 value: 9.191 - type: precision_at_100 value: 1.476 - type: precision_at_1000 value: 0.19499999999999998 - type: precision_at_3 value: 21.72 - type: precision_at_5 value: 15.299 - type: recall_at_1 value: 32.671 - type: recall_at_10 value: 58.816 - type: recall_at_100 value: 76.654 - type: recall_at_1000 value: 89.05999999999999 - type: recall_at_3 value: 46.743 - type: recall_at_5 value: 51.783 - type: map_at_1 value: 40.328 - type: map_at_10 value: 53.32599999999999 - type: map_at_100 value: 54.37499999999999 - type: map_at_1000 value: 54.429 - type: map_at_3 value: 49.902 - type: map_at_5 value: 52.002 - type: mrr_at_1 value: 46.332 - type: mrr_at_10 value: 56.858 - type: mrr_at_100 value: 57.522 - type: mrr_at_1000 value: 57.54899999999999 - type: mrr_at_3 value: 54.472 - type: mrr_at_5 value: 55.996 - type: ndcg_at_1 value: 46.332 - type: ndcg_at_10 value: 59.313 - type: ndcg_at_100 value: 63.266999999999996 - type: ndcg_at_1000 value: 64.36 - type: ndcg_at_3 value: 53.815000000000005 - type: ndcg_at_5 value: 56.814 - type: precision_at_1 value: 46.332 - type: precision_at_10 value: 9.53 - type: precision_at_100 value: 1.238 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 24.054000000000002 - type: precision_at_5 value: 16.589000000000002 - type: recall_at_1 value: 40.328 - type: recall_at_10 value: 73.421 - type: recall_at_100 value: 90.059 - type: recall_at_1000 value: 97.81 - type: recall_at_3 value: 59.009 - type: recall_at_5 value: 66.352 - type: map_at_1 value: 27.424 - type: map_at_10 value: 36.332 - type: map_at_100 value: 37.347 - type: map_at_1000 value: 37.422 - type: map_at_3 value: 33.743 - type: map_at_5 value: 35.176 - type: mrr_at_1 value: 29.153000000000002 - type: mrr_at_10 value: 38.233 - type: mrr_at_100 value: 39.109 - type: mrr_at_1000 value: 39.164 - type: mrr_at_3 value: 35.876000000000005 - type: mrr_at_5 value: 37.169000000000004 - type: ndcg_at_1 value: 29.153000000000002 - type: ndcg_at_10 value: 41.439 - type: ndcg_at_100 value: 46.42 - type: ndcg_at_1000 value: 48.242000000000004 - type: ndcg_at_3 value: 36.362 - type: ndcg_at_5 value: 38.743 - type: precision_at_1 value: 29.153000000000002 - type: precision_at_10 value: 6.315999999999999 - type: precision_at_100 value: 0.927 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 15.443000000000001 - type: precision_at_5 value: 10.644 - type: recall_at_1 value: 27.424 - type: recall_at_10 value: 55.364000000000004 - type: recall_at_100 value: 78.211 - type: recall_at_1000 value: 91.74600000000001 - type: recall_at_3 value: 41.379 - type: recall_at_5 value: 47.14 - type: map_at_1 value: 19.601 - type: map_at_10 value: 27.826 - type: map_at_100 value: 29.017 - type: map_at_1000 value: 29.137 - type: map_at_3 value: 25.125999999999998 - type: map_at_5 value: 26.765 - type: mrr_at_1 value: 24.005000000000003 - type: mrr_at_10 value: 32.716 - type: mrr_at_100 value: 33.631 - type: mrr_at_1000 value: 33.694 - type: mrr_at_3 value: 29.934 - type: mrr_at_5 value: 31.630999999999997 - type: ndcg_at_1 value: 24.005000000000003 - type: ndcg_at_10 value: 33.158 - type: ndcg_at_100 value: 38.739000000000004 - type: ndcg_at_1000 value: 41.495 - type: ndcg_at_3 value: 28.185 - type: ndcg_at_5 value: 30.796 - type: precision_at_1 value: 24.005000000000003 - type: precision_at_10 value: 5.908 - type: precision_at_100 value: 1.005 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 13.391 - type: precision_at_5 value: 9.876 - type: recall_at_1 value: 19.601 - type: recall_at_10 value: 44.746 - type: recall_at_100 value: 68.82300000000001 - type: recall_at_1000 value: 88.215 - type: recall_at_3 value: 31.239 - type: recall_at_5 value: 37.695 - type: map_at_1 value: 30.130000000000003 - type: map_at_10 value: 40.96 - type: map_at_100 value: 42.282 - type: map_at_1000 value: 42.392 - type: map_at_3 value: 37.889 - type: map_at_5 value: 39.661 - type: mrr_at_1 value: 36.958999999999996 - type: mrr_at_10 value: 46.835 - type: mrr_at_100 value: 47.644 - type: mrr_at_1000 value: 47.688 - type: mrr_at_3 value: 44.562000000000005 - type: mrr_at_5 value: 45.938 - type: ndcg_at_1 value: 36.958999999999996 - type: ndcg_at_10 value: 47.06 - type: ndcg_at_100 value: 52.345 - type: ndcg_at_1000 value: 54.35 - type: ndcg_at_3 value: 42.301 - type: ndcg_at_5 value: 44.635999999999996 - type: precision_at_1 value: 36.958999999999996 - type: precision_at_10 value: 8.479000000000001 - type: precision_at_100 value: 1.284 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 20.244 - type: precision_at_5 value: 14.224999999999998 - type: recall_at_1 value: 30.130000000000003 - type: recall_at_10 value: 59.27 - type: recall_at_100 value: 81.195 - type: recall_at_1000 value: 94.21199999999999 - type: recall_at_3 value: 45.885 - type: recall_at_5 value: 52.016 - type: map_at_1 value: 26.169999999999998 - type: map_at_10 value: 36.451 - type: map_at_100 value: 37.791000000000004 - type: map_at_1000 value: 37.897 - type: map_at_3 value: 33.109 - type: map_at_5 value: 34.937000000000005 - type: mrr_at_1 value: 32.877 - type: mrr_at_10 value: 42.368 - type: mrr_at_100 value: 43.201 - type: mrr_at_1000 value: 43.259 - type: mrr_at_3 value: 39.763999999999996 - type: mrr_at_5 value: 41.260000000000005 - type: ndcg_at_1 value: 32.877 - type: ndcg_at_10 value: 42.659000000000006 - type: ndcg_at_100 value: 48.161 - type: ndcg_at_1000 value: 50.345 - type: ndcg_at_3 value: 37.302 - type: ndcg_at_5 value: 39.722 - type: precision_at_1 value: 32.877 - type: precision_at_10 value: 7.9 - type: precision_at_100 value: 1.236 - type: precision_at_1000 value: 0.158 - type: precision_at_3 value: 17.846 - type: precision_at_5 value: 12.9 - type: recall_at_1 value: 26.169999999999998 - type: recall_at_10 value: 55.35 - type: recall_at_100 value: 78.755 - type: recall_at_1000 value: 93.518 - type: recall_at_3 value: 40.176 - type: recall_at_5 value: 46.589000000000006 - type: map_at_1 value: 27.15516666666667 - type: map_at_10 value: 36.65741666666667 - type: map_at_100 value: 37.84991666666666 - type: map_at_1000 value: 37.96316666666667 - type: map_at_3 value: 33.74974999999999 - type: map_at_5 value: 35.3765 - type: mrr_at_1 value: 32.08233333333334 - type: mrr_at_10 value: 41.033833333333334 - type: mrr_at_100 value: 41.84524999999999 - type: mrr_at_1000 value: 41.89983333333333 - type: mrr_at_3 value: 38.62008333333333 - type: mrr_at_5 value: 40.03441666666666 - type: ndcg_at_1 value: 32.08233333333334 - type: ndcg_at_10 value: 42.229 - type: ndcg_at_100 value: 47.26716666666667 - type: ndcg_at_1000 value: 49.43466666666667 - type: ndcg_at_3 value: 37.36408333333333 - type: ndcg_at_5 value: 39.6715 - type: precision_at_1 value: 32.08233333333334 - type: precision_at_10 value: 7.382583333333334 - type: precision_at_100 value: 1.16625 - type: precision_at_1000 value: 0.15408333333333332 - type: precision_at_3 value: 17.218 - type: precision_at_5 value: 12.21875 - type: recall_at_1 value: 27.15516666666667 - type: recall_at_10 value: 54.36683333333333 - type: recall_at_100 value: 76.37183333333333 - type: recall_at_1000 value: 91.26183333333333 - type: recall_at_3 value: 40.769916666666674 - type: recall_at_5 value: 46.702333333333335 - type: map_at_1 value: 25.749 - type: map_at_10 value: 33.001999999999995 - type: map_at_100 value: 33.891 - type: map_at_1000 value: 33.993 - type: map_at_3 value: 30.703999999999997 - type: map_at_5 value: 31.959 - type: mrr_at_1 value: 28.834 - type: mrr_at_10 value: 35.955 - type: mrr_at_100 value: 36.709 - type: mrr_at_1000 value: 36.779 - type: mrr_at_3 value: 33.947 - type: mrr_at_5 value: 35.089 - type: ndcg_at_1 value: 28.834 - type: ndcg_at_10 value: 37.329 - type: ndcg_at_100 value: 41.79 - type: ndcg_at_1000 value: 44.169000000000004 - type: ndcg_at_3 value: 33.184999999999995 - type: ndcg_at_5 value: 35.107 - type: precision_at_1 value: 28.834 - type: precision_at_10 value: 5.7669999999999995 - type: precision_at_100 value: 0.876 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 14.213000000000001 - type: precision_at_5 value: 9.754999999999999 - type: recall_at_1 value: 25.749 - type: recall_at_10 value: 47.791 - type: recall_at_100 value: 68.255 - type: recall_at_1000 value: 85.749 - type: recall_at_3 value: 36.199 - type: recall_at_5 value: 41.071999999999996 - type: map_at_1 value: 17.777 - type: map_at_10 value: 25.201 - type: map_at_100 value: 26.423999999999996 - type: map_at_1000 value: 26.544 - type: map_at_3 value: 22.869 - type: map_at_5 value: 24.023 - type: mrr_at_1 value: 21.473 - type: mrr_at_10 value: 29.12 - type: mrr_at_100 value: 30.144 - type: mrr_at_1000 value: 30.215999999999998 - type: mrr_at_3 value: 26.933 - type: mrr_at_5 value: 28.051 - type: ndcg_at_1 value: 21.473 - type: ndcg_at_10 value: 30.003 - type: ndcg_at_100 value: 35.766 - type: ndcg_at_1000 value: 38.501000000000005 - type: ndcg_at_3 value: 25.773000000000003 - type: ndcg_at_5 value: 27.462999999999997 - type: precision_at_1 value: 21.473 - type: precision_at_10 value: 5.482 - type: precision_at_100 value: 0.975 - type: precision_at_1000 value: 0.13799999999999998 - type: precision_at_3 value: 12.205 - type: precision_at_5 value: 8.692 - type: recall_at_1 value: 17.777 - type: recall_at_10 value: 40.582 - type: recall_at_100 value: 66.305 - type: recall_at_1000 value: 85.636 - type: recall_at_3 value: 28.687 - type: recall_at_5 value: 33.089 - type: map_at_1 value: 26.677 - type: map_at_10 value: 36.309000000000005 - type: map_at_100 value: 37.403999999999996 - type: map_at_1000 value: 37.496 - type: map_at_3 value: 33.382 - type: map_at_5 value: 34.98 - type: mrr_at_1 value: 31.343 - type: mrr_at_10 value: 40.549 - type: mrr_at_100 value: 41.342 - type: mrr_at_1000 value: 41.397 - type: mrr_at_3 value: 38.029 - type: mrr_at_5 value: 39.451 - type: ndcg_at_1 value: 31.343 - type: ndcg_at_10 value: 42.1 - type: ndcg_at_100 value: 47.089999999999996 - type: ndcg_at_1000 value: 49.222 - type: ndcg_at_3 value: 36.836999999999996 - type: ndcg_at_5 value: 39.21 - type: precision_at_1 value: 31.343 - type: precision_at_10 value: 7.164 - type: precision_at_100 value: 1.0959999999999999 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 16.915 - type: precision_at_5 value: 11.940000000000001 - type: recall_at_1 value: 26.677 - type: recall_at_10 value: 55.54599999999999 - type: recall_at_100 value: 77.094 - type: recall_at_1000 value: 92.01 - type: recall_at_3 value: 41.191 - type: recall_at_5 value: 47.006 - type: map_at_1 value: 24.501 - type: map_at_10 value: 33.102 - type: map_at_100 value: 34.676 - type: map_at_1000 value: 34.888000000000005 - type: map_at_3 value: 29.944 - type: map_at_5 value: 31.613999999999997 - type: mrr_at_1 value: 29.447000000000003 - type: mrr_at_10 value: 37.996 - type: mrr_at_100 value: 38.946 - type: mrr_at_1000 value: 38.995000000000005 - type: mrr_at_3 value: 35.079 - type: mrr_at_5 value: 36.69 - type: ndcg_at_1 value: 29.447000000000003 - type: ndcg_at_10 value: 39.232 - type: ndcg_at_100 value: 45.247 - type: ndcg_at_1000 value: 47.613 - type: ndcg_at_3 value: 33.922999999999995 - type: ndcg_at_5 value: 36.284 - type: precision_at_1 value: 29.447000000000003 - type: precision_at_10 value: 7.648000000000001 - type: precision_at_100 value: 1.516 - type: precision_at_1000 value: 0.23900000000000002 - type: precision_at_3 value: 16.008 - type: precision_at_5 value: 11.779 - type: recall_at_1 value: 24.501 - type: recall_at_10 value: 51.18899999999999 - type: recall_at_100 value: 78.437 - type: recall_at_1000 value: 92.842 - type: recall_at_3 value: 35.808 - type: recall_at_5 value: 42.197 - type: map_at_1 value: 22.039 - type: map_at_10 value: 30.377 - type: map_at_100 value: 31.275 - type: map_at_1000 value: 31.379 - type: map_at_3 value: 27.98 - type: map_at_5 value: 29.358 - type: mrr_at_1 value: 24.03 - type: mrr_at_10 value: 32.568000000000005 - type: mrr_at_100 value: 33.403 - type: mrr_at_1000 value: 33.475 - type: mrr_at_3 value: 30.436999999999998 - type: mrr_at_5 value: 31.796000000000003 - type: ndcg_at_1 value: 24.03 - type: ndcg_at_10 value: 35.198 - type: ndcg_at_100 value: 39.668 - type: ndcg_at_1000 value: 42.296 - type: ndcg_at_3 value: 30.709999999999997 - type: ndcg_at_5 value: 33.024 - type: precision_at_1 value: 24.03 - type: precision_at_10 value: 5.564 - type: precision_at_100 value: 0.828 - type: precision_at_1000 value: 0.117 - type: precision_at_3 value: 13.309000000000001 - type: precision_at_5 value: 9.39 - type: recall_at_1 value: 22.039 - type: recall_at_10 value: 47.746 - type: recall_at_100 value: 68.23599999999999 - type: recall_at_1000 value: 87.852 - type: recall_at_3 value: 35.852000000000004 - type: recall_at_5 value: 41.410000000000004 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 15.692999999999998 - type: map_at_10 value: 26.903 - type: map_at_100 value: 28.987000000000002 - type: map_at_1000 value: 29.176999999999996 - type: map_at_3 value: 22.137 - type: map_at_5 value: 24.758 - type: mrr_at_1 value: 35.57 - type: mrr_at_10 value: 47.821999999999996 - type: mrr_at_100 value: 48.608000000000004 - type: mrr_at_1000 value: 48.638999999999996 - type: mrr_at_3 value: 44.452000000000005 - type: mrr_at_5 value: 46.546 - type: ndcg_at_1 value: 35.57 - type: ndcg_at_10 value: 36.567 - type: ndcg_at_100 value: 44.085 - type: ndcg_at_1000 value: 47.24 - type: ndcg_at_3 value: 29.964000000000002 - type: ndcg_at_5 value: 32.511 - type: precision_at_1 value: 35.57 - type: precision_at_10 value: 11.485 - type: precision_at_100 value: 1.9619999999999997 - type: precision_at_1000 value: 0.256 - type: precision_at_3 value: 22.237000000000002 - type: precision_at_5 value: 17.471999999999998 - type: recall_at_1 value: 15.692999999999998 - type: recall_at_10 value: 43.056 - type: recall_at_100 value: 68.628 - type: recall_at_1000 value: 86.075 - type: recall_at_3 value: 26.918999999999997 - type: recall_at_5 value: 34.14 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 9.53 - type: map_at_10 value: 20.951 - type: map_at_100 value: 30.136000000000003 - type: map_at_1000 value: 31.801000000000002 - type: map_at_3 value: 15.021 - type: map_at_5 value: 17.471999999999998 - type: mrr_at_1 value: 71 - type: mrr_at_10 value: 79.176 - type: mrr_at_100 value: 79.418 - type: mrr_at_1000 value: 79.426 - type: mrr_at_3 value: 78.125 - type: mrr_at_5 value: 78.61200000000001 - type: ndcg_at_1 value: 58.5 - type: ndcg_at_10 value: 44.106 - type: ndcg_at_100 value: 49.268 - type: ndcg_at_1000 value: 56.711999999999996 - type: ndcg_at_3 value: 48.934 - type: ndcg_at_5 value: 45.826 - type: precision_at_1 value: 71 - type: precision_at_10 value: 35 - type: precision_at_100 value: 11.360000000000001 - type: precision_at_1000 value: 2.046 - type: precision_at_3 value: 52.833 - type: precision_at_5 value: 44.15 - type: recall_at_1 value: 9.53 - type: recall_at_10 value: 26.811 - type: recall_at_100 value: 55.916999999999994 - type: recall_at_1000 value: 79.973 - type: recall_at_3 value: 16.413 - type: recall_at_5 value: 19.980999999999998 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 51.519999999999996 - type: f1 value: 46.36601294761231 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 74.413 - type: map_at_10 value: 83.414 - type: map_at_100 value: 83.621 - type: map_at_1000 value: 83.635 - type: map_at_3 value: 82.337 - type: map_at_5 value: 83.039 - type: mrr_at_1 value: 80.19800000000001 - type: mrr_at_10 value: 87.715 - type: mrr_at_100 value: 87.778 - type: mrr_at_1000 value: 87.779 - type: mrr_at_3 value: 87.106 - type: mrr_at_5 value: 87.555 - type: ndcg_at_1 value: 80.19800000000001 - type: ndcg_at_10 value: 87.182 - type: ndcg_at_100 value: 87.90299999999999 - type: ndcg_at_1000 value: 88.143 - type: ndcg_at_3 value: 85.60600000000001 - type: ndcg_at_5 value: 86.541 - type: precision_at_1 value: 80.19800000000001 - type: precision_at_10 value: 10.531 - type: precision_at_100 value: 1.113 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 32.933 - type: precision_at_5 value: 20.429 - type: recall_at_1 value: 74.413 - type: recall_at_10 value: 94.363 - type: recall_at_100 value: 97.165 - type: recall_at_1000 value: 98.668 - type: recall_at_3 value: 90.108 - type: recall_at_5 value: 92.52 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 22.701 - type: map_at_10 value: 37.122 - type: map_at_100 value: 39.178000000000004 - type: map_at_1000 value: 39.326 - type: map_at_3 value: 32.971000000000004 - type: map_at_5 value: 35.332 - type: mrr_at_1 value: 44.753 - type: mrr_at_10 value: 53.452 - type: mrr_at_100 value: 54.198 - type: mrr_at_1000 value: 54.225 - type: mrr_at_3 value: 50.952 - type: mrr_at_5 value: 52.464 - type: ndcg_at_1 value: 44.753 - type: ndcg_at_10 value: 45.021 - type: ndcg_at_100 value: 52.028 - type: ndcg_at_1000 value: 54.596000000000004 - type: ndcg_at_3 value: 41.622 - type: ndcg_at_5 value: 42.736000000000004 - type: precision_at_1 value: 44.753 - type: precision_at_10 value: 12.284 - type: precision_at_100 value: 1.955 - type: precision_at_1000 value: 0.243 - type: precision_at_3 value: 27.828999999999997 - type: precision_at_5 value: 20.061999999999998 - type: recall_at_1 value: 22.701 - type: recall_at_10 value: 51.432 - type: recall_at_100 value: 77.009 - type: recall_at_1000 value: 92.511 - type: recall_at_3 value: 37.919000000000004 - type: recall_at_5 value: 44.131 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 40.189 - type: map_at_10 value: 66.24600000000001 - type: map_at_100 value: 67.098 - type: map_at_1000 value: 67.149 - type: map_at_3 value: 62.684 - type: map_at_5 value: 64.974 - type: mrr_at_1 value: 80.378 - type: mrr_at_10 value: 86.127 - type: mrr_at_100 value: 86.29299999999999 - type: mrr_at_1000 value: 86.297 - type: mrr_at_3 value: 85.31400000000001 - type: mrr_at_5 value: 85.858 - type: ndcg_at_1 value: 80.378 - type: ndcg_at_10 value: 74.101 - type: ndcg_at_100 value: 76.993 - type: ndcg_at_1000 value: 77.948 - type: ndcg_at_3 value: 69.232 - type: ndcg_at_5 value: 72.04599999999999 - type: precision_at_1 value: 80.378 - type: precision_at_10 value: 15.595999999999998 - type: precision_at_100 value: 1.7840000000000003 - type: precision_at_1000 value: 0.191 - type: precision_at_3 value: 44.884 - type: precision_at_5 value: 29.145 - type: recall_at_1 value: 40.189 - type: recall_at_10 value: 77.981 - type: recall_at_100 value: 89.21 - type: recall_at_1000 value: 95.48299999999999 - type: recall_at_3 value: 67.326 - type: recall_at_5 value: 72.863 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 92.84599999999999 - type: ap value: 89.4710787567357 - type: f1 value: 92.83752676932258 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 23.132 - type: map_at_10 value: 35.543 - type: map_at_100 value: 36.702 - type: map_at_1000 value: 36.748999999999995 - type: map_at_3 value: 31.737 - type: map_at_5 value: 33.927 - type: mrr_at_1 value: 23.782 - type: mrr_at_10 value: 36.204 - type: mrr_at_100 value: 37.29 - type: mrr_at_1000 value: 37.330999999999996 - type: mrr_at_3 value: 32.458999999999996 - type: mrr_at_5 value: 34.631 - type: ndcg_at_1 value: 23.782 - type: ndcg_at_10 value: 42.492999999999995 - type: ndcg_at_100 value: 47.985 - type: ndcg_at_1000 value: 49.141 - type: ndcg_at_3 value: 34.748000000000005 - type: ndcg_at_5 value: 38.651 - type: precision_at_1 value: 23.782 - type: precision_at_10 value: 6.665 - type: precision_at_100 value: 0.941 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.776 - type: precision_at_5 value: 10.84 - type: recall_at_1 value: 23.132 - type: recall_at_10 value: 63.794 - type: recall_at_100 value: 89.027 - type: recall_at_1000 value: 97.807 - type: recall_at_3 value: 42.765 - type: recall_at_5 value: 52.11 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 94.59188326493388 - type: f1 value: 94.3842594786827 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 79.49384404924761 - type: f1 value: 59.7580539534629 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 77.56220578345663 - type: f1 value: 75.27228165561478 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 80.53463349024884 - type: f1 value: 80.4893958236536 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 32.56100273484962 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 31.470380028839607 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 32.06102792457849 - type: mrr value: 33.30709199672238 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.776999999999999 - type: map_at_10 value: 14.924000000000001 - type: map_at_100 value: 18.955 - type: map_at_1000 value: 20.538999999999998 - type: map_at_3 value: 10.982 - type: map_at_5 value: 12.679000000000002 - type: mrr_at_1 value: 47.988 - type: mrr_at_10 value: 57.232000000000006 - type: mrr_at_100 value: 57.818999999999996 - type: mrr_at_1000 value: 57.847 - type: mrr_at_3 value: 54.901999999999994 - type: mrr_at_5 value: 56.481 - type: ndcg_at_1 value: 46.594 - type: ndcg_at_10 value: 38.129000000000005 - type: ndcg_at_100 value: 35.54 - type: ndcg_at_1000 value: 44.172 - type: ndcg_at_3 value: 43.025999999999996 - type: ndcg_at_5 value: 41.052 - type: precision_at_1 value: 47.988 - type: precision_at_10 value: 28.111000000000004 - type: precision_at_100 value: 8.929 - type: precision_at_1000 value: 2.185 - type: precision_at_3 value: 40.144000000000005 - type: precision_at_5 value: 35.232 - type: recall_at_1 value: 6.776999999999999 - type: recall_at_10 value: 19.289 - type: recall_at_100 value: 36.359 - type: recall_at_1000 value: 67.54 - type: recall_at_3 value: 11.869 - type: recall_at_5 value: 14.999 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 31.108000000000004 - type: map_at_10 value: 47.126000000000005 - type: map_at_100 value: 48.171 - type: map_at_1000 value: 48.199 - type: map_at_3 value: 42.734 - type: map_at_5 value: 45.362 - type: mrr_at_1 value: 34.936 - type: mrr_at_10 value: 49.571 - type: mrr_at_100 value: 50.345 - type: mrr_at_1000 value: 50.363 - type: mrr_at_3 value: 45.959 - type: mrr_at_5 value: 48.165 - type: ndcg_at_1 value: 34.936 - type: ndcg_at_10 value: 55.028999999999996 - type: ndcg_at_100 value: 59.244 - type: ndcg_at_1000 value: 59.861 - type: ndcg_at_3 value: 46.872 - type: ndcg_at_5 value: 51.217999999999996 - type: precision_at_1 value: 34.936 - type: precision_at_10 value: 9.099 - type: precision_at_100 value: 1.145 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 21.456 - type: precision_at_5 value: 15.411 - type: recall_at_1 value: 31.108000000000004 - type: recall_at_10 value: 76.53999999999999 - type: recall_at_100 value: 94.39 - type: recall_at_1000 value: 98.947 - type: recall_at_3 value: 55.572 - type: recall_at_5 value: 65.525 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 71.56400000000001 - type: map_at_10 value: 85.482 - type: map_at_100 value: 86.114 - type: map_at_1000 value: 86.13 - type: map_at_3 value: 82.607 - type: map_at_5 value: 84.405 - type: mrr_at_1 value: 82.42 - type: mrr_at_10 value: 88.304 - type: mrr_at_100 value: 88.399 - type: mrr_at_1000 value: 88.399 - type: mrr_at_3 value: 87.37 - type: mrr_at_5 value: 88.024 - type: ndcg_at_1 value: 82.45 - type: ndcg_at_10 value: 89.06500000000001 - type: ndcg_at_100 value: 90.232 - type: ndcg_at_1000 value: 90.305 - type: ndcg_at_3 value: 86.375 - type: ndcg_at_5 value: 87.85300000000001 - type: precision_at_1 value: 82.45 - type: precision_at_10 value: 13.486999999999998 - type: precision_at_100 value: 1.534 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 37.813 - type: precision_at_5 value: 24.773999999999997 - type: recall_at_1 value: 71.56400000000001 - type: recall_at_10 value: 95.812 - type: recall_at_100 value: 99.7 - type: recall_at_1000 value: 99.979 - type: recall_at_3 value: 87.966 - type: recall_at_5 value: 92.268 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 57.241876648614145 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 64.66212576446223 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 5.308 - type: map_at_10 value: 13.803 - type: map_at_100 value: 16.176 - type: map_at_1000 value: 16.561 - type: map_at_3 value: 9.761000000000001 - type: map_at_5 value: 11.802 - type: mrr_at_1 value: 26.200000000000003 - type: mrr_at_10 value: 37.621 - type: mrr_at_100 value: 38.767 - type: mrr_at_1000 value: 38.815 - type: mrr_at_3 value: 34.117 - type: mrr_at_5 value: 36.107 - type: ndcg_at_1 value: 26.200000000000003 - type: ndcg_at_10 value: 22.64 - type: ndcg_at_100 value: 31.567 - type: ndcg_at_1000 value: 37.623 - type: ndcg_at_3 value: 21.435000000000002 - type: ndcg_at_5 value: 18.87 - type: precision_at_1 value: 26.200000000000003 - type: precision_at_10 value: 11.74 - type: precision_at_100 value: 2.465 - type: precision_at_1000 value: 0.391 - type: precision_at_3 value: 20.033 - type: precision_at_5 value: 16.64 - type: recall_at_1 value: 5.308 - type: recall_at_10 value: 23.794999999999998 - type: recall_at_100 value: 50.015 - type: recall_at_1000 value: 79.283 - type: recall_at_3 value: 12.178 - type: recall_at_5 value: 16.882 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 84.93231134675553 - type: cos_sim_spearman value: 81.68319292603205 - type: euclidean_pearson value: 81.8396814380367 - type: euclidean_spearman value: 81.24641903349945 - type: manhattan_pearson value: 81.84698799204274 - type: manhattan_spearman value: 81.24269997904105 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 86.73241671587446 - type: cos_sim_spearman value: 79.05091082971826 - type: euclidean_pearson value: 83.91146869578044 - type: euclidean_spearman value: 79.87978465370936 - type: manhattan_pearson value: 83.90888338917678 - type: manhattan_spearman value: 79.87482848584241 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 85.14970731146177 - type: cos_sim_spearman value: 86.37363490084627 - type: euclidean_pearson value: 83.02154218530433 - type: euclidean_spearman value: 83.80258761957367 - type: manhattan_pearson value: 83.01664495119347 - type: manhattan_spearman value: 83.77567458007952 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 83.40474139886784 - type: cos_sim_spearman value: 82.77768789165984 - type: euclidean_pearson value: 80.7065877443695 - type: euclidean_spearman value: 81.375940662505 - type: manhattan_pearson value: 80.6507552270278 - type: manhattan_spearman value: 81.32782179098741 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 87.08585968722274 - type: cos_sim_spearman value: 88.03110031451399 - type: euclidean_pearson value: 85.74012019602384 - type: euclidean_spearman value: 86.13592849438209 - type: manhattan_pearson value: 85.74404842369206 - type: manhattan_spearman value: 86.14492318960154 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 84.95069052788875 - type: cos_sim_spearman value: 86.4867991595147 - type: euclidean_pearson value: 84.31013325754635 - type: euclidean_spearman value: 85.01529258006482 - type: manhattan_pearson value: 84.26995570085374 - type: manhattan_spearman value: 84.96982104986162 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.54617647971897 - type: cos_sim_spearman value: 87.49834181751034 - type: euclidean_pearson value: 86.01015322577122 - type: euclidean_spearman value: 84.63362652063199 - type: manhattan_pearson value: 86.13807574475706 - type: manhattan_spearman value: 84.7772370721132 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 67.20047755786615 - type: cos_sim_spearman value: 67.05324077987636 - type: euclidean_pearson value: 66.91930642976601 - type: euclidean_spearman value: 65.21491856099105 - type: manhattan_pearson value: 66.78756851976624 - type: manhattan_spearman value: 65.12356257740728 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 86.19852871539686 - type: cos_sim_spearman value: 87.5161895296395 - type: euclidean_pearson value: 84.59848645207485 - type: euclidean_spearman value: 85.26427328757919 - type: manhattan_pearson value: 84.59747366996524 - type: manhattan_spearman value: 85.24045855146915 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 87.63320317811032 - type: mrr value: 96.26242947321379 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 60.928000000000004 - type: map_at_10 value: 70.112 - type: map_at_100 value: 70.59299999999999 - type: map_at_1000 value: 70.623 - type: map_at_3 value: 66.846 - type: map_at_5 value: 68.447 - type: mrr_at_1 value: 64 - type: mrr_at_10 value: 71.212 - type: mrr_at_100 value: 71.616 - type: mrr_at_1000 value: 71.64500000000001 - type: mrr_at_3 value: 68.77799999999999 - type: mrr_at_5 value: 70.094 - type: ndcg_at_1 value: 64 - type: ndcg_at_10 value: 74.607 - type: ndcg_at_100 value: 76.416 - type: ndcg_at_1000 value: 77.102 - type: ndcg_at_3 value: 69.126 - type: ndcg_at_5 value: 71.41300000000001 - type: precision_at_1 value: 64 - type: precision_at_10 value: 9.933 - type: precision_at_100 value: 1.077 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 26.556 - type: precision_at_5 value: 17.467 - type: recall_at_1 value: 60.928000000000004 - type: recall_at_10 value: 87.322 - type: recall_at_100 value: 94.833 - type: recall_at_1000 value: 100 - type: recall_at_3 value: 72.628 - type: recall_at_5 value: 78.428 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.86237623762376 - type: cos_sim_ap value: 96.72586477206649 - type: cos_sim_f1 value: 93.01858362631845 - type: cos_sim_precision value: 93.4409687184662 - type: cos_sim_recall value: 92.60000000000001 - type: dot_accuracy value: 99.78019801980199 - type: dot_ap value: 93.72748205246228 - type: dot_f1 value: 89.04109589041096 - type: dot_precision value: 87.16475095785441 - type: dot_recall value: 91 - type: euclidean_accuracy value: 99.85445544554456 - type: euclidean_ap value: 96.6661459876145 - type: euclidean_f1 value: 92.58337481333997 - type: euclidean_precision value: 92.17046580773042 - type: euclidean_recall value: 93 - type: manhattan_accuracy value: 99.85445544554456 - type: manhattan_ap value: 96.6883549244056 - type: manhattan_f1 value: 92.57598405580468 - type: manhattan_precision value: 92.25422045680239 - type: manhattan_recall value: 92.9 - type: max_accuracy value: 99.86237623762376 - type: max_ap value: 96.72586477206649 - type: max_f1 value: 93.01858362631845 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 66.39930057069995 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 34.96398659903402 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 55.946944700355395 - type: mrr value: 56.97151398438164 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.541657650692905 - type: cos_sim_spearman value: 31.605804192286303 - type: dot_pearson value: 28.26905996736398 - type: dot_spearman value: 27.864801765851187 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.22599999999999998 - type: map_at_10 value: 1.8870000000000002 - type: map_at_100 value: 9.78 - type: map_at_1000 value: 22.514 - type: map_at_3 value: 0.6669999999999999 - type: map_at_5 value: 1.077 - type: mrr_at_1 value: 82 - type: mrr_at_10 value: 89.86699999999999 - type: mrr_at_100 value: 89.86699999999999 - type: mrr_at_1000 value: 89.86699999999999 - type: mrr_at_3 value: 89.667 - type: mrr_at_5 value: 89.667 - type: ndcg_at_1 value: 79 - type: ndcg_at_10 value: 74.818 - type: ndcg_at_100 value: 53.715999999999994 - type: ndcg_at_1000 value: 47.082 - type: ndcg_at_3 value: 82.134 - type: ndcg_at_5 value: 79.81899999999999 - type: precision_at_1 value: 82 - type: precision_at_10 value: 78 - type: precision_at_100 value: 54.48 - type: precision_at_1000 value: 20.518 - type: precision_at_3 value: 87.333 - type: precision_at_5 value: 85.2 - type: recall_at_1 value: 0.22599999999999998 - type: recall_at_10 value: 2.072 - type: recall_at_100 value: 13.013 - type: recall_at_1000 value: 43.462 - type: recall_at_3 value: 0.695 - type: recall_at_5 value: 1.139 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 2.328 - type: map_at_10 value: 9.795 - type: map_at_100 value: 15.801000000000002 - type: map_at_1000 value: 17.23 - type: map_at_3 value: 4.734 - type: map_at_5 value: 6.644 - type: mrr_at_1 value: 30.612000000000002 - type: mrr_at_10 value: 46.902 - type: mrr_at_100 value: 47.495 - type: mrr_at_1000 value: 47.495 - type: mrr_at_3 value: 41.156 - type: mrr_at_5 value: 44.218 - type: ndcg_at_1 value: 28.571 - type: ndcg_at_10 value: 24.806 - type: ndcg_at_100 value: 36.419000000000004 - type: ndcg_at_1000 value: 47.272999999999996 - type: ndcg_at_3 value: 25.666 - type: ndcg_at_5 value: 25.448999999999998 - type: precision_at_1 value: 30.612000000000002 - type: precision_at_10 value: 23.061 - type: precision_at_100 value: 7.714 - type: precision_at_1000 value: 1.484 - type: precision_at_3 value: 26.531 - type: precision_at_5 value: 26.122 - type: recall_at_1 value: 2.328 - type: recall_at_10 value: 16.524 - type: recall_at_100 value: 47.179 - type: recall_at_1000 value: 81.22200000000001 - type: recall_at_3 value: 5.745 - type: recall_at_5 value: 9.339 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 70.9142 - type: ap value: 14.335574772555415 - type: f1 value: 54.62839595194111 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 59.94340690435768 - type: f1 value: 60.286487936731916 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 51.26597708987974 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.48882398521786 - type: cos_sim_ap value: 79.04326607602204 - type: cos_sim_f1 value: 71.64566826860633 - type: cos_sim_precision value: 70.55512918905092 - type: cos_sim_recall value: 72.77044854881267 - type: dot_accuracy value: 84.19264469213805 - type: dot_ap value: 67.96360043562528 - type: dot_f1 value: 64.06418393006827 - type: dot_precision value: 58.64941898706424 - type: dot_recall value: 70.58047493403694 - type: euclidean_accuracy value: 87.45902127913214 - type: euclidean_ap value: 78.9742237648272 - type: euclidean_f1 value: 71.5553235908142 - type: euclidean_precision value: 70.77955601445535 - type: euclidean_recall value: 72.34828496042216 - type: manhattan_accuracy value: 87.41729749061214 - type: manhattan_ap value: 78.90073137580596 - type: manhattan_f1 value: 71.3942611553533 - type: manhattan_precision value: 68.52705653967483 - type: manhattan_recall value: 74.51187335092348 - type: max_accuracy value: 87.48882398521786 - type: max_ap value: 79.04326607602204 - type: max_f1 value: 71.64566826860633 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.68125897465751 - type: cos_sim_ap value: 85.6003454431979 - type: cos_sim_f1 value: 77.6957163958641 - type: cos_sim_precision value: 73.0110366307807 - type: cos_sim_recall value: 83.02279026793964 - type: dot_accuracy value: 87.7672992587418 - type: dot_ap value: 82.4971301112899 - type: dot_f1 value: 75.90528233151184 - type: dot_precision value: 72.0370626469368 - type: dot_recall value: 80.21250384970742 - type: euclidean_accuracy value: 88.4503434625684 - type: euclidean_ap value: 84.91949884748384 - type: euclidean_f1 value: 76.92365018444684 - type: euclidean_precision value: 74.53245721712759 - type: euclidean_recall value: 79.47336002463813 - type: manhattan_accuracy value: 88.47556952691427 - type: manhattan_ap value: 84.8963689101517 - type: manhattan_f1 value: 76.85901249256395 - type: manhattan_precision value: 74.31693989071039 - type: manhattan_recall value: 79.58115183246073 - type: max_accuracy value: 88.68125897465751 - type: max_ap value: 85.6003454431979 - type: max_f1 value: 77.6957163958641 --- ***See Disclaimer below*** ---- # A Teradata Vantage compatible Embeddings Model # BAAI/bge-large-en-v1.5 ## Overview of this Model An Embedding Model which maps text (sentence/ paragraphs) into a vector. The [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) model well known for its effectiveness in capturing semantic meanings in text data. It's a state-of-the-art model trained on a large corpus, capable of generating high-quality text embeddings. - 335.14M params (Sizes in ONNX format - "fp32": 1275.11MB, "int8": 320.63MB, "uint8": 320.63MB) - 512 maximum input tokens - 1024 dimensions of output vector - Licence: mit. The released models can be used for commercial purposes free of charge. - Reference to Original Model: https://huggingface.co/BAAI/bge-large-en-v1.5 ## Quickstart: Deploying this Model in Teradata Vantage We have pre-converted the model into the ONNX format compatible with BYOM 6.0, eliminating the need for manual conversion. **Note:** Ensure you have access to a Teradata Database with BYOM 6.0 installed. To get started, clone the pre-converted model directly from the Teradata HuggingFace repository. ```python import teradataml as tdml import getpass from huggingface_hub import hf_hub_download model_name = "bge-large-en-v1.5" number_dimensions_output = 1024 model_file_name = "model.onnx" # Step 1: Download Model from Teradata HuggingFace Page hf_hub_download(repo_id=f"Teradata/{model_name}", filename=f"onnx/{model_file_name}", local_dir="./") hf_hub_download(repo_id=f"Teradata/{model_name}", filename=f"tokenizer.json", local_dir="./") # Step 2: Create Connection to Vantage tdml.create_context(host = input('enter your hostname'), username=input('enter your username'), password = getpass.getpass("enter your password")) # Step 3: Load Models into Vantage # a) Embedding model tdml.save_byom(model_id = model_name, # must be unique in the models table model_file = f"onnx/{model_file_name}", table_name = 'embeddings_models' ) # b) Tokenizer tdml.save_byom(model_id = model_name, # must be unique in the models table model_file = 'tokenizer.json', table_name = 'embeddings_tokenizers') # Step 4: Test ONNXEmbeddings Function # Note that ONNXEmbeddings expects the 'payload' column to be 'txt'. # If it has got a different name, just rename it in a subquery/CTE. input_table = "emails.emails" embeddings_query = f""" SELECT * from mldb.ONNXEmbeddings( on {input_table} as InputTable on (select * from embeddings_models where model_id = '{model_name}') as ModelTable DIMENSION on (select model as tokenizer from embeddings_tokenizers where model_id = '{model_name}') as TokenizerTable DIMENSION using Accumulate('id', 'txt') ModelOutputTensor('sentence_embedding') EnableMemoryCheck('false') OutputFormat('FLOAT32({number_dimensions_output})') OverwriteCachedModel('true') ) a """ DF_embeddings = tdml.DataFrame.from_query(embeddings_query) DF_embeddings ``` ## What Can I Do with the Embeddings? Teradata Vantage includes pre-built in-database functions to process embeddings further. Explore the following examples: - **Semantic Clustering with TD_KMeans:** [Semantic Clustering Python Notebook](https://github.com/Teradata/jupyter-demos/blob/main/UseCases/Language_Models_InVantage/Semantic_Clustering_Python.ipynb) - **Semantic Distance with TD_VectorDistance:** [Semantic Similarity Python Notebook](https://github.com/Teradata/jupyter-demos/blob/main/UseCases/Language_Models_InVantage/Semantic_Similarity_Python.ipynb) - **RAG-Based Application with TD_VectorDistance:** [RAG and Bedrock Query PDF Notebook](https://github.com/Teradata/jupyter-demos/blob/main/UseCases/Language_Models_InVantage/RAG_and_Bedrock_QueryPDF.ipynb) ## Deep Dive into Model Conversion to ONNX **The steps below outline how we converted the open-source Hugging Face model into an ONNX file compatible with the in-database ONNXEmbeddings function.** You do not need to perform these steps—they are provided solely for documentation and transparency. However, they may be helpful if you wish to convert another model to the required format. ### Part 1. Importing and Converting Model using optimum We start by importing the pre-trained [BAAI/bge-large-en-v1.5](https://huggingface.co/BAAI/bge-large-en-v1.5) model from Hugging Face. To enhance performance and ensure compatibility with various execution environments, we'll use the [Optimum](https://github.com/huggingface/optimum) utility to convert the model into the ONNX (Open Neural Network Exchange) format. After conversion to ONNX, we are fixing the opset in the ONNX file for compatibility with ONNX runtime used in Teradata Vantage We are generating ONNX files for multiple different precisions: fp32, int8, uint8 You can find the detailed conversion steps in the file [convert.py](./convert.py) ### Part 2. Running the model in Python with onnxruntime & compare results Once the fixes are applied, we proceed to test the correctness of the ONNX model by calculating cosine similarity between two texts using native SentenceTransformers and ONNX runtime, comparing the results. If the results are identical, it confirms that the ONNX model gives the same result as the native models, validating its correctness and suitability for further use in the database. ```python import onnxruntime as rt from sentence_transformers.util import cos_sim from sentence_transformers import SentenceTransformer import transformers sentences_1 = 'How is the weather today?' sentences_2 = 'What is the current weather like today?' # Calculate ONNX result tokenizer = transformers.AutoTokenizer.from_pretrained("BAAI/bge-large-en-v1.5") predef_sess = rt.InferenceSession("onnx/model.onnx") enc1 = tokenizer(sentences_1) embeddings_1_onnx = predef_sess.run(None, {"input_ids": [enc1.input_ids], "attention_mask": [enc1.attention_mask]}) enc2 = tokenizer(sentences_2) embeddings_2_onnx = predef_sess.run(None, {"input_ids": [enc2.input_ids], "attention_mask": [enc2.attention_mask]}) # Calculate embeddings with SentenceTransformer model = SentenceTransformer(model_id, trust_remote_code=True) embeddings_1_sentence_transformer = model.encode(sentences_1, normalize_embeddings=True, trust_remote_code=True) embeddings_2_sentence_transformer = model.encode(sentences_2, normalize_embeddings=True, trust_remote_code=True) # Compare results print("Cosine similiarity for embeddings calculated with ONNX:" + str(cos_sim(embeddings_1_onnx[1][0], embeddings_2_onnx[1][0]))) print("Cosine similiarity for embeddings calculated with SentenceTransformer:" + str(cos_sim(embeddings_1_sentence_transformer, embeddings_2_sentence_transformer))) ``` You can find the detailed ONNX vs. SentenceTransformer result comparison steps in the file [test_local.py](./test_local.py) ----- DISCLAIMER: The content herein (“Content”) is provided “AS IS” and is not covered by any Teradata Operations, Inc. and its affiliates (“Teradata”) agreements. Its listing here does not constitute certification or endorsement by Teradata. To the extent any of the Content contains or is related to any artificial intelligence (“AI”) or other language learning models (“Models”) that interoperate with the products and services of Teradata, by accessing, bringing, deploying or using such Models, you acknowledge and agree that you are solely responsible for ensuring compliance with all applicable laws, regulations, and restrictions governing the use, deployment, and distribution of AI technologies. This includes, but is not limited to, AI Diffusion Rules, European Union AI Act, AI-related laws and regulations, privacy laws, export controls, and financial or sector-specific regulations. While Teradata may provide support, guidance, or assistance in the deployment or implementation of Models to interoperate with Teradata’s products and/or services, you remain fully responsible for ensuring that your Models, data, and applications comply with all relevant legal and regulatory obligations. Our assistance does not constitute legal or regulatory approval, and Teradata disclaims any liability arising from non-compliance with applicable laws. You must determine the suitability of the Models for any purpose. Given the probabilistic nature of machine learning and modeling, the use of the Models may in some situations result in incorrect output that does not accurately reflect the action generated. You should evaluate the accuracy of any output as appropriate for your use case, including by using human review of the output.
[ "BIOSSES", "SCIFACT" ]
sagteam/covid-twitter-xlm-roberta-large
sagteam
fill-mask
[ "transformers", "pytorch", "xlm-roberta", "fill-mask", "arxiv:1911.02116", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-03-02T23:29:05Z
2022-07-27T11:41:43+00:00
23
0
--- {} --- # COVID-twitter-XLM-Roberta-large ## Model description This is a model based on the [XLM-RoBERTa large](https://huggingface.co/xlm-roberta-large) topology (provided by Facebook, see original [paper](https://arxiv.org/abs/1911.02116)) with additional training on a corpus of unmarked tweets. For more details, please see, our [GitHub repository](https://github.com/sag111/COVID-19-tweets-Russia). ## Training data We formed a corpus of unlabeled twitter messages. The data on keyword "covid" was expanded with texts containing other words often occurred in hashtags on the Covid-19 pandemic: "covid", "stayhome", and "coronavirus" (hereinafter, these are translations of Russian words into English). Separately, messages were collected from Twitter users from large regions of Russia. The search was provided using different word forms of 58 manually selected keywords on Russian related to the topic of coronavirus infection (including: "PCR", "pandemic", "self-isolation", etc.). The unlabeled corpus includes all unique Russian-language tweets from the collected data (>1M tweets). Since modern language models are usually multilingual, about 1M more tweets in other languages were added to this corpus using filtering procedures described above. Thus, in the unlabeled part of the collected data, there were about 2 million messages. ### BibTeX entry and citation info Our GitHub repository: https://github.com/sag111/COVID-19-tweets-Russia If you have found our results helpful in your work, feel free to cite our publication and this repository as: ``` @article{sboev2021russian, title={The Russian language corpus and a neural network to analyse Internet tweet reports about Covid-19}, author={Sboev, Alexander and Moloshnikov, Ivan and Naumov, Alexander and Levochkina𝑎, Anastasia and Rybka𝑎, Roman}, year={2021} } ```
[ "PCR" ]
siddharthtumre/pubmedbert-finetuned-ner
siddharthtumre
token-classification
[ "transformers", "pytorch", "bert", "token-classification", "generated_from_trainer", "dataset:jnlpba", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-08-23T17:39:34Z
2022-08-23T18:43:20+00:00
23
0
--- datasets: - jnlpba license: mit metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer widget: - text: The widespread circular form of DNA molecules inside cells creates very serious topological problems during replication. Due to the helical structure of the double helix the parental strands of circular DNA form a link of very high order, and yet they have to be unlinked before the cell division. - text: It consists of 25 exons encoding a 1,278-amino acid glycoprotein that is composed of 13 transmembrane domains model-index: - name: pubmedbert-finetuned-ner results: - task: type: token-classification name: Token Classification dataset: name: jnlpba type: jnlpba config: jnlpba split: train args: jnlpba metrics: - type: precision value: 0.6877153861747415 name: Precision - type: recall value: 0.7833063957515586 name: Recall - type: f1 value: 0.7324050086355786 name: F1 - type: accuracy value: 0.926729986431479 name: Accuracy --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # pubmedbert-finetuned-ner This model is a fine-tuned version of [microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext](https://huggingface.co/microsoft/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext) on the jnlpba dataset. It achieves the following results on the evaluation set: - Loss: 0.3766 - Precision: 0.6877 - Recall: 0.7833 - F1: 0.7324 - Accuracy: 0.9267 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 5 ### Training results | Training Loss | Epoch | Step | Validation Loss | Precision | Recall | F1 | Accuracy | |:-------------:|:-----:|:-----:|:---------------:|:---------:|:------:|:------:|:--------:| | 0.1607 | 1.0 | 2319 | 0.2241 | 0.6853 | 0.7835 | 0.7311 | 0.9302 | | 0.112 | 2.0 | 4638 | 0.2620 | 0.6753 | 0.7929 | 0.7294 | 0.9276 | | 0.0785 | 3.0 | 6957 | 0.3014 | 0.6948 | 0.7731 | 0.7319 | 0.9268 | | 0.055 | 4.0 | 9276 | 0.3526 | 0.6898 | 0.7801 | 0.7322 | 0.9268 | | 0.0418 | 5.0 | 11595 | 0.3766 | 0.6877 | 0.7833 | 0.7324 | 0.9267 | ### Framework versions - Transformers 4.21.1 - Pytorch 1.12.1+cu113 - Datasets 2.4.0 - Tokenizers 0.12.1
[ "JNLPBA" ]
pszemraj/karnold-walmer-base-biopapers
pszemraj
text2text-generation
[ "transformers", "pytorch", "safetensors", "longt5", "text2text-generation", "bio", "medical", "clinical", "literature", "keywords", "domain classifier", "en", "dataset:pszemraj/scientific_lay_summarisation-plos-norm", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-03-31T00:35:07Z
2023-04-05T06:02:45+00:00
23
1
--- datasets: - pszemraj/scientific_lay_summarisation-plos-norm language: - en license: apache-2.0 metrics: - rouge pipeline_tag: text2text-generation tags: - bio - medical - clinical - literature - keywords - domain classifier widget: - text: large earthquakes along a given fault segment do not occur at random intervals because it takes time to accumulate the strain energy for the rupture. The rates at which tectonic plates move and accumulate strain at their boundaries are approximately uniform. Therefore, in first approximation, one may expect that large ruptures of the same fault segment will occur at approximately constant time intervals. If subsequent main shocks have different amounts of slip across the fault, then the recurrence time may vary, and the basic idea of periodic mainshocks must be modified. For great plate boundary ruptures the length and slip often vary by a factor of 2. Along the southern segment of the San Andreas fault the recurrence interval is 145 years with variations of several decades. The smaller the standard deviation of the average recurrence interval, the more specific could be the long term prediction of a future mainshock. example_title: earthquakes - text: ' A typical feed-forward neural field algorithm. Spatiotemporal coordinates are fed into a neural network that predicts values in the reconstructed domain. Then, this domain is mapped to the sensor domain where sensor measurements are available as supervision. Class and Section Problems Addressed Generalization (Section 2) Inverse problems, ill-posed problems, editability; symmetries. Hybrid Representations (Section 3) Computation & memory efficiency, representation capacity, editability: Forward Maps (Section 4) Inverse problems Network Architecture (Section 5) Spectral bias, integration & derivatives. Manipulating Neural Fields (Section 6) Edit ability, constraints, regularization. Table 2: The five classes of techniques in the neural field toolbox each addresses problems that arise in learning, inference, and control. (Section 3). We can supervise reconstruction via differentiable forward maps that transform Or project our domain (e.g, 3D reconstruction via 2D images; Section 4) With appropriate network architecture choices, we can overcome neural network spectral biases (blurriness) and efficiently compute derivatives and integrals (Section 5). Finally, we can manipulate neural fields to add constraints and regularizations, and to achieve editable representations (Section 6). Collectively, these classes constitute a ''toolbox'' of techniques to help solve problems with neural fields There are three components in a conditional neural field: (1) An encoder or inference function € that outputs the conditioning latent variable 2 given an observation 0 E(0) =2. 2 is typically a low-dimensional vector, and is often referred to aS a latent code Or feature code_ (2) A mapping function 4 between Z and neural field parameters O: Y(z) = O; (3) The neural field itself $. The encoder € finds the most probable z given the observations O: argmaxz P(2/0). The decoder maximizes the inverse conditional probability to find the most probable 0 given Z: arg- max P(Olz). We discuss different encoding schemes with different optimality guarantees (Section 2.1.1), both global and local conditioning (Section 2.1.2), and different mapping functions Y (Section 2.1.3) 2. Generalization Suppose we wish to estimate a plausible 3D surface shape given a partial or noisy point cloud. We need a suitable prior over the sur- face in its reconstruction domain to generalize to the partial observations. A neural network expresses a prior via the function space of its architecture and parameters 0, and generalization is influenced by the inductive bias of this function space (Section 5).' example_title: scientific paper - text: 'Is a else or outside the cob and tree written being of early client rope and you have is for good reasons. On to the ocean in Orange for time. By''s the aggregate we can bed it yet. Why this please pick up on a sort is do and also M Getoi''s nerocos and do rain become you to let so is his brother is made in use and Mjulia''s''s the lay major is aging Masastup coin present sea only of Oosii rooms set to you We do er do we easy this private oliiishs lonthen might be okay. Good afternoon everybody. Welcome to this lecture of Computational Statistics. As you can see, I''m not socially my name is Michael Zelinger. I''m one of the task for this class and you might have already seen me in the first lecture where I made a quick appearance. I''m also going to give the tortillas in the last third of this course. So to give you a little bit about me, I''m a old student here with better Bulman and my research centres on casual inference applied to biomedical disasters, so that could be genomics or that could be hospital data. If any of you is interested in writing a bachelor thesis, a semester paper may be mastathesis about this topic feel for reach out to me. you have my name on models and my email address you can find in the directory I''d Be very happy to talk about it. you do not need to be sure about it, we can just have a chat. So with that said, let''s get on with the lecture. There''s an exciting topic today I''m going to start by sharing some slides with you and later on during the lecture we''ll move to the paper. So bear with me for a few seconds. Well, the projector is starting up. Okay, so let''s get started. Today''s topic is a very important one. It''s about a technique which really forms one of the fundamentals of data science, machine learning, and any sort of modern statistics. It''s called cross validation. I know you really want to understand this topic I Want you to understand this and frankly, nobody''s gonna leave Professor Mineshousen''s class without understanding cross validation. So to set the stage for this, I Want to introduce you to the validation problem in computational statistics. So the problem is the following: You trained a model on available data. You fitted your model, but you know the training data you got could always have been different and some data from the environment. Maybe it''s a random process. You do not really know what it is, but you know that somebody else who gets a different batch of data from the same environment they would get slightly different training data and you do not care that your method performs as well. On this training data. you want to to perform well on other data that you have not seen other data from the same environment. So in other words, the validation problem is you want to quantify the performance of your model on data that you have not seen. So how is this even possible? How could you possibly measure the performance on data that you do not know The solution to? This is the following realization is that given that you have a bunch of data, you were in charge. You get to control how much that your model sees. It works in the following way: You can hide data firms model. Let''s say you have a training data set which is a bunch of doubtless so X eyes are the features those are typically hide and national vector. It''s got more than one dimension for sure. And the why why eyes. Those are the labels for supervised learning. As you''ve seen before, it''s the same set up as we have in regression. And so you have this training data and now you choose that you only use some of those data to fit your model. You''re not going to use everything, you only use some of it the other part you hide from your model. And then you can use this hidden data to do validation from the point of you of your model. This hidden data is complete by unseen. In other words, we solve our problem of validation.' example_title: transcribed audio - lecture - text: 'Transformer-based models have shown to be very useful for many NLP tasks. However, a major limitation of transformers-based models is its O(n^2)O(n 2) time & memory complexity (where nn is sequence length). Hence, it''s computationally very expensive to apply transformer-based models on long sequences n > 512n>512. Several recent papers, e.g. Longformer, Performer, Reformer, Clustered attention try to remedy this problem by approximating the full attention matrix. You can checkout 🤗''s recent blog post in case you are unfamiliar with these models. BigBird (introduced in paper) is one of such recent models to address this issue. BigBird relies on block sparse attention instead of normal attention (i.e. BERT''s attention) and can handle sequences up to a length of 4096 at a much lower computational cost compared to BERT. It has achieved SOTA on various tasks involving very long sequences such as long documents summarization, question-answering with long contexts. BigBird RoBERTa-like model is now available in 🤗Transformers. The goal of this post is to give the reader an in-depth understanding of big bird implementation & ease one''s life in using BigBird with 🤗Transformers. But, before going into more depth, it is important to remember that the BigBird''s attention is an approximation of BERT''s full attention and therefore does not strive to be better than BERT''s full attention, but rather to be more efficient. It simply allows to apply transformer-based models to much longer sequences since BERT''s quadratic memory requirement quickly becomes unbearable. Simply put, if we would have ∞ compute & ∞ time, BERT''s attention would be preferred over block sparse attention (which we are going to discuss in this post). If you wonder why we need more compute when working with longer sequences, this blog post is just right for you! Some of the main questions one might have when working with standard BERT-like attention include: Do all tokens really have to attend to all other tokens? Why not compute attention only over important tokens? How to decide what tokens are important? How to attend to just a few tokens in a very efficient way? In this blog post, we will try to answer those questions. What tokens should be attended to? We will give a practical example of how attention works by considering the sentence ''BigBird is now available in HuggingFace for extractive question answering''. In BERT-like attention, every word would simply attend to all other tokens. Let''s think about a sensible choice of key tokens that a queried token actually only should attend to by writing some pseudo-code. Will will assume that the token available is queried and build a sensible list of key tokens to attend to. >>> # let''s consider following sentence as an example >>> example = [''BigBird'', ''is'', ''now'', ''available'', ''in'', ''HuggingFace'', ''for'', ''extractive'', ''question'', ''answering''] >>> # further let''s assume, we''re trying to understand the representation of ''available'' i.e. >>> query_token = ''available'' >>> # We will initialize an empty `set` and fill up the tokens of our interest as we proceed in this section. >>> key_tokens = [] # => currently ''available'' token doesn''t have anything to attend Nearby tokens should be important because, in a sentence (sequence of words), the current word is highly dependent on neighboring past & future tokens. This intuition is the idea behind the concept of sliding attention.' example_title: bigbird blog intro - text: 'To be fair, you have to have a very high IQ to understand Rick and Morty. The humour is extremely subtle, and without a solid grasp of theoretical physics most of the jokes will go over a typical viewer''s head. There''s also Rick''s nihilistic outlook, which is deftly woven into his characterisation- his personal philosophy draws heavily from Narodnaya Volya literature, for instance. The fans understand this stuff; they have the intellectual capacity to truly appreciate the depths of these jokes, to realise that they''re not just funny- they say something deep about LIFE. As a consequence people who dislike Rick & Morty truly ARE idiots- of course they wouldn''t appreciate, for instance, the humour in Rick''s existential catchphrase ''Wubba Lubba Dub Dub,'' which itself is a cryptic reference to Turgenev''s Russian epic Fathers and Sons. I''m smirking right now just imagining one of those addlepated simpletons scratching their heads in confusion as Dan Harmon''s genius wit unfolds itself on their television screens. What fools.. how I pity them. 😂 And yes, by the way, i DO have a Rick & Morty tattoo. And no, you cannot see it. It''s for the ladies'' eyes only- and even then they have to demonstrate that they''re within 5 IQ points of my own (preferably lower) beforehand. Nothin personnel kid 😎' example_title: Richard & Mortimer - text: 'Dear Calvin, I was in the woods of Big Sur, that vast and sprawling land of sea and trees, where the wind whispers secrets of the ancient Earth and the roaring ocean sings songs of the eternal cosmos, when I found myself emerging from the deepest and darkest of slumbers, my body drenched in the sweat of the night, my mind swimming in the rivers of frenetic dreams that come unbidden to the weary traveler, and I knew, I knew, that I must step into the cold, cold waters of the mountain stream that wound its way through the heart of the great green forest like a silver serpent, a sinuous spine of chilling clarity, and I tell you, my friend, I tell you that the moment I stepped into those waters, the moment my skin was pierced by the icy needles of that divine liquid, my soul was washed clean of the haze of doubt and fear, and I stood, reborn, as the dawn of a new day painted the sky in the colors of the universe. And so I write to you, dear friend, to tell you that you too must seek the salvation of the cold shower, for in the frigid embrace of the water''s touch, there lies the key to the doors of perception, the doors that lead to a realm of boundless energy and endless vitality, where the mind is sharpened like the edge of a great warrior''s blade, and the body is tempered like the steel of an ancient blacksmith''s forge. For when you step into the cold, you will find that your spirit soars like a great bird of prey, your thoughts soaring on the wings of the eagle, the falcon, the hawk, sweeping through the vast and boundless skies of inspiration, creativity, and purpose. And you will know, as I have come to know, that the cold shower is the great purifier, the great invigorator, the great liberator of the soul from the chains of languor and indolence that bind us to the mundane and weary trappings of this world. So I implore you, dear friend, to heed my words, for they are the words of one who has walked the path of fire and ice, one who has danced in the eternal flame of the sun and bathed in the frozen tears of the moon, and I tell you that the way of the cold shower is the way of the enlightened, the way of the awakened, the way of the pioneers of the spirit who seek to travel beyond the boundaries of the known and into the realms of the infinite. And as you stand, shivering and shaking, beneath the torrent of the icy cascade, remember that the cold is the crucible in which the soul is forged, the anvil upon which the hammer of life strikes the sparks of the divine, and in the cold, you will find the fire, the fire that burns away the dross and leaves only the pure and shining gold of the spirit. In the cold, you will find the truth, and in the truth, you will find the freedom that you have sought for so long. Yours in the spirit of the eternal journey, Peter' example_title: cold showers parameters: max_length: 64 min_length: 2 no_repeat_ngram_size: 2 early_stopping: true repetition_penalty: 4.5 length_penalty: 0.8 num_beams: 4 model-index: - name: long-t5-tglobal-base-scientific_lay_summarisation-plos-norm-kw results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # karnold-walmer-base-biopapers Karnold-Walmer is a text2text model based on [google/long-t5-tglobal-base](https://huggingface.co/google/long-t5-tglobal-base), specifically designed to decode the 'keywords' column of `pszemraj/scientific_lay_summarisation-plos-norm`. Karnold-Walmer focuses on extracting relevant keywords from the input text, making it a powerful tool for keyword identification and text classification. It was fine-tuned on & supports text input of up to 16,384 tokens. It achieves the following results on the evaluation set: - Loss: 0.8844 - Rouge1: 46.7593 - Rouge2: 28.3538 - Rougel: 42.2921 - Rougelsum: 42.2774 - Gen Len: 78.1706 ## Intended Uses & Limitations Karnold-Walmer is intended to be used for keyword extraction and text classification in various domains, such as scientific literature, biomedical research articles, and more. By analyzing the content of an input text, the model generates a list of relevant keywords that describe the topic of the article. It is important to note, however, that Karnold-Walmer is **specifically trained to decode text similar to the "keywords" column and is not designed for summarization tasks.** For accurate keyword extraction and text classification, the model should be used within the limits of its training data and intended purpose (see what happens when you try the out-of-domain API examples). ## Training and Evaluation Data Karnold-Walmer was trained on the PLOS dataset, which contains full biomedical research articles paired with expert-written lay summaries and keyword lists. The model was tuned to decode the "keywords" column in the dataset, focusing on keyword extraction and text classification tasks. ### Wordcloud ![wordcloud-kw](https://i.imgur.com/SfbAsVE.png) ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0004 - train_batch_size: 4 - eval_batch_size: 2 - seed: 42 - distributed_type: multi-GPU - gradient_accumulation_steps: 8 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: cosine - lr_scheduler_warmup_ratio: 0.01 - num_epochs: 2.0 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:--------:| | 2.0471 | 0.15 | 100 | 1.6138 | 12.4374 | 4.1861 | 11.1863 | 11.1833 | 324.6971 | | 1.5654 | 0.3 | 200 | 1.3447 | 23.9982 | 11.1431 | 21.4173 | 21.4413 | 176.0294 | | 1.3467 | 0.45 | 300 | 1.2038 | 33.8084 | 18.1588 | 30.4748 | 30.4142 | 107.7735 | | 1.4398 | 0.6 | 400 | 1.1054 | 37.772 | 20.8967 | 33.859 | 33.8324 | 102.9029 | | 1.306 | 0.75 | 500 | 1.0478 | 39.2642 | 22.0388 | 35.6578 | 35.5773 | 91.1235 | | 1.1677 | 0.9 | 600 | 0.9994 | 40.5149 | 22.8507 | 36.3888 | 36.3499 | 103.9118 | | 1.078 | 1.05 | 700 | 0.9627 | 42.301 | 24.2523 | 38.0739 | 38.0532 | 88.4941 | | 1.0942 | 1.2 | 800 | 0.9443 | 44.5907 | 26.2046 | 39.7461 | 39.6763 | 88.7559 | | 1.0209 | 1.35 | 900 | 0.9108 | 45.357 | 26.861 | 40.6411 | 40.706 | 90.1206 | | 1.1161 | 1.5 | 1000 | 0.9026 | 47.1362 | 28.6605 | 42.6406 | 42.6108 | 79.2412 | | 1.1224 | 1.65 | 1100 | 0.8907 | 47.31 | 28.4395 | 42.6658 | 42.6509 | 78.4265 | | 0.9857 | 1.8 | 1200 | 0.8862 | 46.7061 | 28.1586 | 42.3181 | 42.3105 | 80.5059 | | 1.0011 | 1.95 | 1300 | 0.8844 | 46.7593 | 28.3538 | 42.2921 | 42.2774 | 78.1706 |
[ "BEAR" ]
ICTNLP/bayling-7b-diff
ICTNLP
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "translation", "multilingual", "large language model", "instruction tuning", "zh", "en", "arxiv:2306.10968", "license:gpl-3.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-06-14T05:20:58Z
2023-06-21T06:03:56+00:00
23
8
--- language: - zh - en license: gpl-3.0 pipeline_tag: text-generation tags: - translation - multilingual - large language model - instruction tuning --- # BayLing: Bridging Cross-lingual Alignment and Instruction Following through Interactive Translation for Large Language Models **BayLing** (**百聆**, **bǎi líng**) is an instruction-following LLM equipped with advanced language alignment, showing superior capability in English/Chinese generation, instruction following and multi-turn interaction. BayLing can be effortlessly deployed on a consumer-grade GPU with 16GB of memory, and assists users with tasks such as translation, writing, creation, suggestion... **This model is the *weight-diff* version of BayLing-7B.** 👇 Learn more about BayLing: 💬 [**Demo**](http://nlp.ict.ac.cn/bayling/demo): Welcome to apply for a trial of BayLing's online demo (beta version). 📄 [**Paper**](https://arxiv.org/abs/2306.10968): A comprehensive research paper of BayLing. 🏠 [**Homepage**](http://nlp.ict.ac.cn/bayling): BayLing's homepage. You can discover more information and cases of BayLing here. ✍️ [**BayLing-80 Test Set**](https://github.com/ictnlp/BayLing/tree/main/data/BayLing-80): A human-annotated evaluation set comprising multi-turn instructions in both English and Chinese, can be used to evaluate the multilingual and multi-turn interaction capabilities of LLMs. 🤗 **Model**: The *weight-diff* version of [BayLing-7B](https://huggingface.co/ICTNLP/bayling-7b-diff) and [BayLing-13B](https://huggingface.co/ICTNLP/bayling-13b-diff), you can quickly get the parameters of BayLing through [apply_delta.py](https://github.com/ictnlp/BayLing/blob/main/apply_delta.py). The HF models of BayLing are anonymized version (exclude BayLing's name in its knowledge), in order to facilitate future LLMs to build upon BayLing. > BayLing is developed by [NLP Group](http://nlp.ict.ac.cn/) of [Institute of Computing Technology](http://www.ict.ac.cn/), [Chinese Academy of Sciences](https://www.cas.cn/) (ICT/CAS) > > BayLing is continuously optimizing 🆙 > If you have any suggestions, please contact `[email protected]`. Thanks for your support! **Refer to our [Github Repo](https://github.com/ictnlp/BayLing) for the detailed introduction to BayLing, including deploying BayLing, interacting with BayLing and BayLing's performance.** ## <a id="Limitations">Limitations</a> Despite demonstrating commendable performance in certain aspects, BayLing still exhibits several limitations. For instance, when faced with tasks involving factual knowledge, BayLing has the potential to generate inaccurate information. Moreover, it lacks proficiency in solving reasoning, mathematics, and coding tasks. Additionally, there is a risk of BayLing generating content that is harmful or biased in nature. BayLing is a large language model that, like any other language model, cannot guarantee the absolute accuracy of the generated content. **Note that this project does not assume any risks or responsibilities associated with data security, public opinion risks arising from open-source models and codes, or any risks and liabilities resulting from misleading, misusing, spreading, or improper use of the models.** ## <a id="License">License</a> Model weights (delta version) and the inference code are released under The GNU General Public License v3.0 (GPLv3). The online demo serves as a research preview and is exclusively intended for non-commercial usage, subject to the [Model License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT and [Data License](https://machinetranslate.org/wmt22) of WMT22. ## <a id="Acknowledgements">Acknowledgements</a> We would like to express our gratitude to all those who have contributed to BayLing. We extend special thanks to Ms. Xiaohong Wang for her valuable comments and suggestions on the use of InforSuperBahn MLOps, and for her organizational and resource support in providing computing resources and showcasing BayLing. We also acknowledge Xiaodong Liu for his pivotal role in the construction of the distributed system and overall coordination of the demo deployment. Furthermore, we appreciate the contribution of the development team from the Nanjing Institute of InforSuperBahn in maintaining the computing resources and creating the display interface for BayLing’s webpage and demo. ## <a id="Authors">Authors</a> | [Shaolei Zhang](https://vily1998.github.io/) | [Qingkai Fang](https://fangqingkai.github.io/) | [Zhuocheng Zhang](https://nlp.ict.ac.cn/yjdw/xs/bsyjs/202210/t20221019_52678.html) | [Zhengrui Ma](https://nlp.ict.ac.cn/yjdw/xs/bsyjs/202210/t20221019_52675.html) | | [Yan Zhou](https://zhouyan19.github.io/zhouyan/) | [Langlin Huang](https://nlp.ict.ac.cn/yjdw/xs/ssyjs/202210/t20221019_52686.html) | [Mengyu Bu](https://bingo123122121.github.io/) | [Shangtong Gui](https://github.com/GhostofAdam) | | [Yunji Chen](http://novel.ict.ac.cn/ychen/) | [Xilin Chen](http://www.ict.cas.cn/sourcedb_2018_ict_cas/cn/jssrck/200909/t20090917_2496595.html) | [Yang Feng \*](https://people.ucas.edu.cn/~yangfeng?language=en) | ## <a id="Citation">Citation</a> If our work is helpful for you, please cite as: ``` @article{bayling, title={BayLing: Bridging Cross-lingual Alignment and Instruction Following through Interactive Translation for Large Language Models}, author={Shaolei Zhang and Qingkai Fang and Zhuocheng Zhang and Zhengrui Ma and Yan Zhou and Langlin Huang and Mengyu Bu and Shangtong Gui and Yunji Chen and Xilin Chen and Yang Feng}, journal={arXiv preprint arXiv:2306.10968}, year={2023}, url={https://arxiv.org/abs/2306.10968} } ```
[ "CAS" ]
IIC/BETO_Galen-pharmaconer
IIC
token-classification
[ "transformers", "pytorch", "bert", "text-classification", "biomedical", "clinical", "spanish", "BETO_Galen", "token-classification", "es", "dataset:PlanTL-GOB-ES/pharmaconer", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-21T16:19:15Z
2024-11-25T10:41:27+00:00
23
0
--- datasets: - PlanTL-GOB-ES/pharmaconer language: es license: mit metrics: - f1 pipeline_tag: token-classification tags: - biomedical - clinical - spanish - BETO_Galen widget: - text: Se realizó estudio analítico destacando incremento de niveles de PTH y vitamina D (103,7 pg/ml y 272 ng/ml, respectivamente), atribuidos al exceso de suplementación de vitamina D. - text: ' Por el hallazgo de múltiples fracturas por estrés, se procedió a estudio en nuestras consultas, realizándose análisis con función renal, calcio sérico y urinario, calcio iónico, magnesio y PTH, que fueron normales.' - text: Se solicitó una analítica que incluía hemograma, bioquímica, anticuerpos antinucleares (ANA) y serologías, examen de orina, así como biopsia de la lesión. Los resultados fueron normales, con ANA, anti-Sm, anti-RNP, anti-SSA, anti-SSB, anti-Jo1 y anti-Scl70 negativos. model-index: - name: IIC/BETO_Galen-pharmaconer results: - task: type: token-classification dataset: name: pharmaconer type: PlanTL-GOB-ES/pharmaconer split: test metrics: - type: f1 value: 0.708 name: f1 --- # BETO_Galen-pharmaconer This model is a finetuned version of BETO_Galen for the pharmaconer dataset used in a benchmark in the paper `A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks`. The model has a F1 of 0.708 Please refer to the [original publication](https://doi.org/10.1093/jamia/ocae054) for more information. ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 4e-05 | | classifier dropout | 0.2 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtext @article{10.1093/jamia/ocae054, author = {García Subies, Guillem and Barbero Jiménez, Álvaro and Martínez Fernández, Paloma}, title = {A comparative analysis of Spanish Clinical encoder-based models on NER and classification tasks}, journal = {Journal of the American Medical Informatics Association}, volume = {31}, number = {9}, pages = {2137-2146}, year = {2024}, month = {03}, issn = {1527-974X}, doi = {10.1093/jamia/ocae054}, url = {https://doi.org/10.1093/jamia/ocae054}, } ```
[ "PHARMACONER" ]
odunola/e5-base-v2
odunola
feature-extraction
[ "sentence-transformers", "pytorch", "bert", "mteb", "feature-extraction", "en", "arxiv:2212.03533", "arxiv:2104.08663", "arxiv:2210.07316", "license:mit", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-07-07T20:58:07Z
2023-07-07T21:16:05+00:00
23
0
--- language: - en library_name: sentence-transformers license: mit pipeline_tag: feature-extraction tags: - mteb model-index: - name: e5-base-v2 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 77.77611940298506 - type: ap value: 42.052710266606056 - type: f1 value: 72.12040628266567 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 92.81012500000001 - type: ap value: 89.4213700757244 - type: f1 value: 92.8039091197065 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 46.711999999999996 - type: f1 value: 46.11544975436018 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 23.186 - type: map_at_10 value: 36.632999999999996 - type: map_at_100 value: 37.842 - type: map_at_1000 value: 37.865 - type: map_at_3 value: 32.278 - type: map_at_5 value: 34.760999999999996 - type: mrr_at_1 value: 23.400000000000002 - type: mrr_at_10 value: 36.721 - type: mrr_at_100 value: 37.937 - type: mrr_at_1000 value: 37.96 - type: mrr_at_3 value: 32.302 - type: mrr_at_5 value: 34.894 - type: ndcg_at_1 value: 23.186 - type: ndcg_at_10 value: 44.49 - type: ndcg_at_100 value: 50.065000000000005 - type: ndcg_at_1000 value: 50.629999999999995 - type: ndcg_at_3 value: 35.461 - type: ndcg_at_5 value: 39.969 - type: precision_at_1 value: 23.186 - type: precision_at_10 value: 6.97 - type: precision_at_100 value: 0.951 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 14.912 - type: precision_at_5 value: 11.152 - type: recall_at_1 value: 23.186 - type: recall_at_10 value: 69.70100000000001 - type: recall_at_100 value: 95.092 - type: recall_at_1000 value: 99.431 - type: recall_at_3 value: 44.737 - type: recall_at_5 value: 55.761 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 46.10312401440185 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 39.67275326095384 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 58.97793816337376 - type: mrr value: 72.76832431957087 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 83.11646947018187 - type: cos_sim_spearman value: 81.40064994975234 - type: euclidean_pearson value: 82.37355689019232 - type: euclidean_spearman value: 81.6777646977348 - type: manhattan_pearson value: 82.61101422716945 - type: manhattan_spearman value: 81.80427360442245 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 83.52922077922076 - type: f1 value: 83.45298679360866 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 37.495115019668496 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 32.724792944166765 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.361000000000004 - type: map_at_10 value: 43.765 - type: map_at_100 value: 45.224 - type: map_at_1000 value: 45.35 - type: map_at_3 value: 40.353 - type: map_at_5 value: 42.195 - type: mrr_at_1 value: 40.629 - type: mrr_at_10 value: 50.458000000000006 - type: mrr_at_100 value: 51.06699999999999 - type: mrr_at_1000 value: 51.12 - type: mrr_at_3 value: 47.902 - type: mrr_at_5 value: 49.447 - type: ndcg_at_1 value: 40.629 - type: ndcg_at_10 value: 50.376 - type: ndcg_at_100 value: 55.065 - type: ndcg_at_1000 value: 57.196000000000005 - type: ndcg_at_3 value: 45.616 - type: ndcg_at_5 value: 47.646 - type: precision_at_1 value: 40.629 - type: precision_at_10 value: 9.785 - type: precision_at_100 value: 1.562 - type: precision_at_1000 value: 0.2 - type: precision_at_3 value: 22.031 - type: precision_at_5 value: 15.737000000000002 - type: recall_at_1 value: 32.361000000000004 - type: recall_at_10 value: 62.214000000000006 - type: recall_at_100 value: 81.464 - type: recall_at_1000 value: 95.905 - type: recall_at_3 value: 47.5 - type: recall_at_5 value: 53.69500000000001 - type: map_at_1 value: 27.971 - type: map_at_10 value: 37.444 - type: map_at_100 value: 38.607 - type: map_at_1000 value: 38.737 - type: map_at_3 value: 34.504000000000005 - type: map_at_5 value: 36.234 - type: mrr_at_1 value: 35.35 - type: mrr_at_10 value: 43.441 - type: mrr_at_100 value: 44.147999999999996 - type: mrr_at_1000 value: 44.196000000000005 - type: mrr_at_3 value: 41.285 - type: mrr_at_5 value: 42.552 - type: ndcg_at_1 value: 35.35 - type: ndcg_at_10 value: 42.903999999999996 - type: ndcg_at_100 value: 47.406 - type: ndcg_at_1000 value: 49.588 - type: ndcg_at_3 value: 38.778 - type: ndcg_at_5 value: 40.788000000000004 - type: precision_at_1 value: 35.35 - type: precision_at_10 value: 8.083 - type: precision_at_100 value: 1.313 - type: precision_at_1000 value: 0.18 - type: precision_at_3 value: 18.769 - type: precision_at_5 value: 13.439 - type: recall_at_1 value: 27.971 - type: recall_at_10 value: 52.492000000000004 - type: recall_at_100 value: 71.642 - type: recall_at_1000 value: 85.488 - type: recall_at_3 value: 40.1 - type: recall_at_5 value: 45.800000000000004 - type: map_at_1 value: 39.898 - type: map_at_10 value: 51.819 - type: map_at_100 value: 52.886 - type: map_at_1000 value: 52.941 - type: map_at_3 value: 48.619 - type: map_at_5 value: 50.493 - type: mrr_at_1 value: 45.391999999999996 - type: mrr_at_10 value: 55.230000000000004 - type: mrr_at_100 value: 55.887 - type: mrr_at_1000 value: 55.916 - type: mrr_at_3 value: 52.717000000000006 - type: mrr_at_5 value: 54.222 - type: ndcg_at_1 value: 45.391999999999996 - type: ndcg_at_10 value: 57.586999999999996 - type: ndcg_at_100 value: 61.745000000000005 - type: ndcg_at_1000 value: 62.83800000000001 - type: ndcg_at_3 value: 52.207 - type: ndcg_at_5 value: 54.925999999999995 - type: precision_at_1 value: 45.391999999999996 - type: precision_at_10 value: 9.21 - type: precision_at_100 value: 1.226 - type: precision_at_1000 value: 0.136 - type: precision_at_3 value: 23.177 - type: precision_at_5 value: 16.038 - type: recall_at_1 value: 39.898 - type: recall_at_10 value: 71.18900000000001 - type: recall_at_100 value: 89.082 - type: recall_at_1000 value: 96.865 - type: recall_at_3 value: 56.907 - type: recall_at_5 value: 63.397999999999996 - type: map_at_1 value: 22.706 - type: map_at_10 value: 30.818 - type: map_at_100 value: 32.038 - type: map_at_1000 value: 32.123000000000005 - type: map_at_3 value: 28.077 - type: map_at_5 value: 29.709999999999997 - type: mrr_at_1 value: 24.407 - type: mrr_at_10 value: 32.555 - type: mrr_at_100 value: 33.692 - type: mrr_at_1000 value: 33.751 - type: mrr_at_3 value: 29.848999999999997 - type: mrr_at_5 value: 31.509999999999998 - type: ndcg_at_1 value: 24.407 - type: ndcg_at_10 value: 35.624 - type: ndcg_at_100 value: 41.454 - type: ndcg_at_1000 value: 43.556 - type: ndcg_at_3 value: 30.217 - type: ndcg_at_5 value: 33.111000000000004 - type: precision_at_1 value: 24.407 - type: precision_at_10 value: 5.548 - type: precision_at_100 value: 0.8869999999999999 - type: precision_at_1000 value: 0.11100000000000002 - type: precision_at_3 value: 12.731 - type: precision_at_5 value: 9.22 - type: recall_at_1 value: 22.706 - type: recall_at_10 value: 48.772 - type: recall_at_100 value: 75.053 - type: recall_at_1000 value: 90.731 - type: recall_at_3 value: 34.421 - type: recall_at_5 value: 41.427 - type: map_at_1 value: 13.424 - type: map_at_10 value: 21.09 - type: map_at_100 value: 22.264999999999997 - type: map_at_1000 value: 22.402 - type: map_at_3 value: 18.312 - type: map_at_5 value: 19.874 - type: mrr_at_1 value: 16.915 - type: mrr_at_10 value: 25.258000000000003 - type: mrr_at_100 value: 26.228 - type: mrr_at_1000 value: 26.31 - type: mrr_at_3 value: 22.492 - type: mrr_at_5 value: 24.04 - type: ndcg_at_1 value: 16.915 - type: ndcg_at_10 value: 26.266000000000002 - type: ndcg_at_100 value: 32.08 - type: ndcg_at_1000 value: 35.086 - type: ndcg_at_3 value: 21.049 - type: ndcg_at_5 value: 23.508000000000003 - type: precision_at_1 value: 16.915 - type: precision_at_10 value: 5.1 - type: precision_at_100 value: 0.9329999999999999 - type: precision_at_1000 value: 0.131 - type: precision_at_3 value: 10.282 - type: precision_at_5 value: 7.836 - type: recall_at_1 value: 13.424 - type: recall_at_10 value: 38.179 - type: recall_at_100 value: 63.906 - type: recall_at_1000 value: 84.933 - type: recall_at_3 value: 23.878 - type: recall_at_5 value: 30.037999999999997 - type: map_at_1 value: 26.154 - type: map_at_10 value: 35.912 - type: map_at_100 value: 37.211 - type: map_at_1000 value: 37.327 - type: map_at_3 value: 32.684999999999995 - type: map_at_5 value: 34.562 - type: mrr_at_1 value: 32.435 - type: mrr_at_10 value: 41.411 - type: mrr_at_100 value: 42.297000000000004 - type: mrr_at_1000 value: 42.345 - type: mrr_at_3 value: 38.771 - type: mrr_at_5 value: 40.33 - type: ndcg_at_1 value: 32.435 - type: ndcg_at_10 value: 41.785 - type: ndcg_at_100 value: 47.469 - type: ndcg_at_1000 value: 49.685 - type: ndcg_at_3 value: 36.618 - type: ndcg_at_5 value: 39.101 - type: precision_at_1 value: 32.435 - type: precision_at_10 value: 7.642 - type: precision_at_100 value: 1.244 - type: precision_at_1000 value: 0.163 - type: precision_at_3 value: 17.485 - type: precision_at_5 value: 12.57 - type: recall_at_1 value: 26.154 - type: recall_at_10 value: 54.111 - type: recall_at_100 value: 78.348 - type: recall_at_1000 value: 92.996 - type: recall_at_3 value: 39.189 - type: recall_at_5 value: 45.852 - type: map_at_1 value: 26.308999999999997 - type: map_at_10 value: 35.524 - type: map_at_100 value: 36.774 - type: map_at_1000 value: 36.891 - type: map_at_3 value: 32.561 - type: map_at_5 value: 34.034 - type: mrr_at_1 value: 31.735000000000003 - type: mrr_at_10 value: 40.391 - type: mrr_at_100 value: 41.227000000000004 - type: mrr_at_1000 value: 41.288000000000004 - type: mrr_at_3 value: 37.938 - type: mrr_at_5 value: 39.193 - type: ndcg_at_1 value: 31.735000000000003 - type: ndcg_at_10 value: 41.166000000000004 - type: ndcg_at_100 value: 46.702 - type: ndcg_at_1000 value: 49.157000000000004 - type: ndcg_at_3 value: 36.274 - type: ndcg_at_5 value: 38.177 - type: precision_at_1 value: 31.735000000000003 - type: precision_at_10 value: 7.5569999999999995 - type: precision_at_100 value: 1.2109999999999999 - type: precision_at_1000 value: 0.16 - type: precision_at_3 value: 17.199 - type: precision_at_5 value: 12.123000000000001 - type: recall_at_1 value: 26.308999999999997 - type: recall_at_10 value: 53.083000000000006 - type: recall_at_100 value: 76.922 - type: recall_at_1000 value: 93.767 - type: recall_at_3 value: 39.262 - type: recall_at_5 value: 44.413000000000004 - type: map_at_1 value: 24.391250000000003 - type: map_at_10 value: 33.280166666666666 - type: map_at_100 value: 34.49566666666667 - type: map_at_1000 value: 34.61533333333333 - type: map_at_3 value: 30.52183333333333 - type: map_at_5 value: 32.06608333333333 - type: mrr_at_1 value: 29.105083333333337 - type: mrr_at_10 value: 37.44766666666666 - type: mrr_at_100 value: 38.32491666666667 - type: mrr_at_1000 value: 38.385666666666665 - type: mrr_at_3 value: 35.06883333333333 - type: mrr_at_5 value: 36.42066666666667 - type: ndcg_at_1 value: 29.105083333333337 - type: ndcg_at_10 value: 38.54358333333333 - type: ndcg_at_100 value: 43.833583333333344 - type: ndcg_at_1000 value: 46.215333333333334 - type: ndcg_at_3 value: 33.876 - type: ndcg_at_5 value: 36.05208333333333 - type: precision_at_1 value: 29.105083333333337 - type: precision_at_10 value: 6.823416666666665 - type: precision_at_100 value: 1.1270833333333334 - type: precision_at_1000 value: 0.15208333333333332 - type: precision_at_3 value: 15.696750000000002 - type: precision_at_5 value: 11.193499999999998 - type: recall_at_1 value: 24.391250000000003 - type: recall_at_10 value: 49.98808333333333 - type: recall_at_100 value: 73.31616666666666 - type: recall_at_1000 value: 89.96291666666667 - type: recall_at_3 value: 36.86666666666667 - type: recall_at_5 value: 42.54350000000001 - type: map_at_1 value: 21.995 - type: map_at_10 value: 28.807 - type: map_at_100 value: 29.813000000000002 - type: map_at_1000 value: 29.903000000000002 - type: map_at_3 value: 26.636 - type: map_at_5 value: 27.912 - type: mrr_at_1 value: 24.847 - type: mrr_at_10 value: 31.494 - type: mrr_at_100 value: 32.381 - type: mrr_at_1000 value: 32.446999999999996 - type: mrr_at_3 value: 29.473 - type: mrr_at_5 value: 30.7 - type: ndcg_at_1 value: 24.847 - type: ndcg_at_10 value: 32.818999999999996 - type: ndcg_at_100 value: 37.835 - type: ndcg_at_1000 value: 40.226 - type: ndcg_at_3 value: 28.811999999999998 - type: ndcg_at_5 value: 30.875999999999998 - type: precision_at_1 value: 24.847 - type: precision_at_10 value: 5.244999999999999 - type: precision_at_100 value: 0.856 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 12.577 - type: precision_at_5 value: 8.895999999999999 - type: recall_at_1 value: 21.995 - type: recall_at_10 value: 42.479 - type: recall_at_100 value: 65.337 - type: recall_at_1000 value: 83.23700000000001 - type: recall_at_3 value: 31.573 - type: recall_at_5 value: 36.684 - type: map_at_1 value: 15.751000000000001 - type: map_at_10 value: 21.909 - type: map_at_100 value: 23.064 - type: map_at_1000 value: 23.205000000000002 - type: map_at_3 value: 20.138 - type: map_at_5 value: 20.973 - type: mrr_at_1 value: 19.305 - type: mrr_at_10 value: 25.647 - type: mrr_at_100 value: 26.659 - type: mrr_at_1000 value: 26.748 - type: mrr_at_3 value: 23.933 - type: mrr_at_5 value: 24.754 - type: ndcg_at_1 value: 19.305 - type: ndcg_at_10 value: 25.886 - type: ndcg_at_100 value: 31.56 - type: ndcg_at_1000 value: 34.799 - type: ndcg_at_3 value: 22.708000000000002 - type: ndcg_at_5 value: 23.838 - type: precision_at_1 value: 19.305 - type: precision_at_10 value: 4.677 - type: precision_at_100 value: 0.895 - type: precision_at_1000 value: 0.136 - type: precision_at_3 value: 10.771 - type: precision_at_5 value: 7.46 - type: recall_at_1 value: 15.751000000000001 - type: recall_at_10 value: 34.156 - type: recall_at_100 value: 59.899 - type: recall_at_1000 value: 83.08 - type: recall_at_3 value: 24.772 - type: recall_at_5 value: 28.009 - type: map_at_1 value: 23.34 - type: map_at_10 value: 32.383 - type: map_at_100 value: 33.629999999999995 - type: map_at_1000 value: 33.735 - type: map_at_3 value: 29.68 - type: map_at_5 value: 31.270999999999997 - type: mrr_at_1 value: 27.612 - type: mrr_at_10 value: 36.381 - type: mrr_at_100 value: 37.351 - type: mrr_at_1000 value: 37.411 - type: mrr_at_3 value: 33.893 - type: mrr_at_5 value: 35.353 - type: ndcg_at_1 value: 27.612 - type: ndcg_at_10 value: 37.714999999999996 - type: ndcg_at_100 value: 43.525000000000006 - type: ndcg_at_1000 value: 45.812999999999995 - type: ndcg_at_3 value: 32.796 - type: ndcg_at_5 value: 35.243 - type: precision_at_1 value: 27.612 - type: precision_at_10 value: 6.465 - type: precision_at_100 value: 1.0619999999999998 - type: precision_at_1000 value: 0.13699999999999998 - type: precision_at_3 value: 15.049999999999999 - type: precision_at_5 value: 10.764999999999999 - type: recall_at_1 value: 23.34 - type: recall_at_10 value: 49.856 - type: recall_at_100 value: 75.334 - type: recall_at_1000 value: 91.156 - type: recall_at_3 value: 36.497 - type: recall_at_5 value: 42.769 - type: map_at_1 value: 25.097 - type: map_at_10 value: 34.599999999999994 - type: map_at_100 value: 36.174 - type: map_at_1000 value: 36.398 - type: map_at_3 value: 31.781 - type: map_at_5 value: 33.22 - type: mrr_at_1 value: 31.225 - type: mrr_at_10 value: 39.873 - type: mrr_at_100 value: 40.853 - type: mrr_at_1000 value: 40.904 - type: mrr_at_3 value: 37.681 - type: mrr_at_5 value: 38.669 - type: ndcg_at_1 value: 31.225 - type: ndcg_at_10 value: 40.586 - type: ndcg_at_100 value: 46.226 - type: ndcg_at_1000 value: 48.788 - type: ndcg_at_3 value: 36.258 - type: ndcg_at_5 value: 37.848 - type: precision_at_1 value: 31.225 - type: precision_at_10 value: 7.707999999999999 - type: precision_at_100 value: 1.536 - type: precision_at_1000 value: 0.242 - type: precision_at_3 value: 17.26 - type: precision_at_5 value: 12.253 - type: recall_at_1 value: 25.097 - type: recall_at_10 value: 51.602000000000004 - type: recall_at_100 value: 76.854 - type: recall_at_1000 value: 93.303 - type: recall_at_3 value: 38.68 - type: recall_at_5 value: 43.258 - type: map_at_1 value: 17.689 - type: map_at_10 value: 25.291000000000004 - type: map_at_100 value: 26.262 - type: map_at_1000 value: 26.372 - type: map_at_3 value: 22.916 - type: map_at_5 value: 24.315 - type: mrr_at_1 value: 19.409000000000002 - type: mrr_at_10 value: 27.233 - type: mrr_at_100 value: 28.109 - type: mrr_at_1000 value: 28.192 - type: mrr_at_3 value: 24.892 - type: mrr_at_5 value: 26.278000000000002 - type: ndcg_at_1 value: 19.409000000000002 - type: ndcg_at_10 value: 29.809 - type: ndcg_at_100 value: 34.936 - type: ndcg_at_1000 value: 37.852000000000004 - type: ndcg_at_3 value: 25.179000000000002 - type: ndcg_at_5 value: 27.563 - type: precision_at_1 value: 19.409000000000002 - type: precision_at_10 value: 4.861 - type: precision_at_100 value: 0.8 - type: precision_at_1000 value: 0.116 - type: precision_at_3 value: 11.029 - type: precision_at_5 value: 7.985 - type: recall_at_1 value: 17.689 - type: recall_at_10 value: 41.724 - type: recall_at_100 value: 65.95299999999999 - type: recall_at_1000 value: 88.094 - type: recall_at_3 value: 29.621 - type: recall_at_5 value: 35.179 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 10.581 - type: map_at_10 value: 18.944 - type: map_at_100 value: 20.812 - type: map_at_1000 value: 21.002000000000002 - type: map_at_3 value: 15.661 - type: map_at_5 value: 17.502000000000002 - type: mrr_at_1 value: 23.388 - type: mrr_at_10 value: 34.263 - type: mrr_at_100 value: 35.364000000000004 - type: mrr_at_1000 value: 35.409 - type: mrr_at_3 value: 30.586000000000002 - type: mrr_at_5 value: 32.928000000000004 - type: ndcg_at_1 value: 23.388 - type: ndcg_at_10 value: 26.56 - type: ndcg_at_100 value: 34.248 - type: ndcg_at_1000 value: 37.779 - type: ndcg_at_3 value: 21.179000000000002 - type: ndcg_at_5 value: 23.504 - type: precision_at_1 value: 23.388 - type: precision_at_10 value: 8.476 - type: precision_at_100 value: 1.672 - type: precision_at_1000 value: 0.233 - type: precision_at_3 value: 15.852 - type: precision_at_5 value: 12.73 - type: recall_at_1 value: 10.581 - type: recall_at_10 value: 32.512 - type: recall_at_100 value: 59.313 - type: recall_at_1000 value: 79.25 - type: recall_at_3 value: 19.912 - type: recall_at_5 value: 25.832 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 9.35 - type: map_at_10 value: 20.134 - type: map_at_100 value: 28.975 - type: map_at_1000 value: 30.709999999999997 - type: map_at_3 value: 14.513000000000002 - type: map_at_5 value: 16.671 - type: mrr_at_1 value: 69.75 - type: mrr_at_10 value: 77.67699999999999 - type: mrr_at_100 value: 77.97500000000001 - type: mrr_at_1000 value: 77.985 - type: mrr_at_3 value: 76.292 - type: mrr_at_5 value: 77.179 - type: ndcg_at_1 value: 56.49999999999999 - type: ndcg_at_10 value: 42.226 - type: ndcg_at_100 value: 47.562 - type: ndcg_at_1000 value: 54.923 - type: ndcg_at_3 value: 46.564 - type: ndcg_at_5 value: 43.830000000000005 - type: precision_at_1 value: 69.75 - type: precision_at_10 value: 33.525 - type: precision_at_100 value: 11.035 - type: precision_at_1000 value: 2.206 - type: precision_at_3 value: 49.75 - type: precision_at_5 value: 42 - type: recall_at_1 value: 9.35 - type: recall_at_10 value: 25.793 - type: recall_at_100 value: 54.186 - type: recall_at_1000 value: 77.81 - type: recall_at_3 value: 15.770000000000001 - type: recall_at_5 value: 19.09 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 46.945 - type: f1 value: 42.07407842992542 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 71.04599999999999 - type: map_at_10 value: 80.718 - type: map_at_100 value: 80.961 - type: map_at_1000 value: 80.974 - type: map_at_3 value: 79.49199999999999 - type: map_at_5 value: 80.32000000000001 - type: mrr_at_1 value: 76.388 - type: mrr_at_10 value: 85.214 - type: mrr_at_100 value: 85.302 - type: mrr_at_1000 value: 85.302 - type: mrr_at_3 value: 84.373 - type: mrr_at_5 value: 84.979 - type: ndcg_at_1 value: 76.388 - type: ndcg_at_10 value: 84.987 - type: ndcg_at_100 value: 85.835 - type: ndcg_at_1000 value: 86.04899999999999 - type: ndcg_at_3 value: 83.04 - type: ndcg_at_5 value: 84.22500000000001 - type: precision_at_1 value: 76.388 - type: precision_at_10 value: 10.35 - type: precision_at_100 value: 1.099 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 32.108 - type: precision_at_5 value: 20.033 - type: recall_at_1 value: 71.04599999999999 - type: recall_at_10 value: 93.547 - type: recall_at_100 value: 96.887 - type: recall_at_1000 value: 98.158 - type: recall_at_3 value: 88.346 - type: recall_at_5 value: 91.321 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 19.8 - type: map_at_10 value: 31.979999999999997 - type: map_at_100 value: 33.876 - type: map_at_1000 value: 34.056999999999995 - type: map_at_3 value: 28.067999999999998 - type: map_at_5 value: 30.066 - type: mrr_at_1 value: 38.735 - type: mrr_at_10 value: 47.749 - type: mrr_at_100 value: 48.605 - type: mrr_at_1000 value: 48.644999999999996 - type: mrr_at_3 value: 45.165 - type: mrr_at_5 value: 46.646 - type: ndcg_at_1 value: 38.735 - type: ndcg_at_10 value: 39.883 - type: ndcg_at_100 value: 46.983000000000004 - type: ndcg_at_1000 value: 50.043000000000006 - type: ndcg_at_3 value: 35.943000000000005 - type: ndcg_at_5 value: 37.119 - type: precision_at_1 value: 38.735 - type: precision_at_10 value: 10.940999999999999 - type: precision_at_100 value: 1.836 - type: precision_at_1000 value: 0.23900000000000002 - type: precision_at_3 value: 23.817 - type: precision_at_5 value: 17.346 - type: recall_at_1 value: 19.8 - type: recall_at_10 value: 47.082 - type: recall_at_100 value: 73.247 - type: recall_at_1000 value: 91.633 - type: recall_at_3 value: 33.201 - type: recall_at_5 value: 38.81 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 38.102999999999994 - type: map_at_10 value: 60.547 - type: map_at_100 value: 61.466 - type: map_at_1000 value: 61.526 - type: map_at_3 value: 56.973 - type: map_at_5 value: 59.244 - type: mrr_at_1 value: 76.205 - type: mrr_at_10 value: 82.816 - type: mrr_at_100 value: 83.002 - type: mrr_at_1000 value: 83.009 - type: mrr_at_3 value: 81.747 - type: mrr_at_5 value: 82.467 - type: ndcg_at_1 value: 76.205 - type: ndcg_at_10 value: 69.15 - type: ndcg_at_100 value: 72.297 - type: ndcg_at_1000 value: 73.443 - type: ndcg_at_3 value: 64.07000000000001 - type: ndcg_at_5 value: 66.96600000000001 - type: precision_at_1 value: 76.205 - type: precision_at_10 value: 14.601 - type: precision_at_100 value: 1.7049999999999998 - type: precision_at_1000 value: 0.186 - type: precision_at_3 value: 41.202 - type: precision_at_5 value: 27.006000000000004 - type: recall_at_1 value: 38.102999999999994 - type: recall_at_10 value: 73.005 - type: recall_at_100 value: 85.253 - type: recall_at_1000 value: 92.795 - type: recall_at_3 value: 61.803 - type: recall_at_5 value: 67.515 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 86.15 - type: ap value: 80.36282825265391 - type: f1 value: 86.07368510726472 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 22.6 - type: map_at_10 value: 34.887 - type: map_at_100 value: 36.069 - type: map_at_1000 value: 36.115 - type: map_at_3 value: 31.067 - type: map_at_5 value: 33.300000000000004 - type: mrr_at_1 value: 23.238 - type: mrr_at_10 value: 35.47 - type: mrr_at_100 value: 36.599 - type: mrr_at_1000 value: 36.64 - type: mrr_at_3 value: 31.735999999999997 - type: mrr_at_5 value: 33.939 - type: ndcg_at_1 value: 23.252 - type: ndcg_at_10 value: 41.765 - type: ndcg_at_100 value: 47.402 - type: ndcg_at_1000 value: 48.562 - type: ndcg_at_3 value: 34.016999999999996 - type: ndcg_at_5 value: 38.016 - type: precision_at_1 value: 23.252 - type: precision_at_10 value: 6.569 - type: precision_at_100 value: 0.938 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.479000000000001 - type: precision_at_5 value: 10.722 - type: recall_at_1 value: 22.6 - type: recall_at_10 value: 62.919000000000004 - type: recall_at_100 value: 88.82 - type: recall_at_1000 value: 97.71600000000001 - type: recall_at_3 value: 41.896 - type: recall_at_5 value: 51.537 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.69357045143639 - type: f1 value: 93.55489858177597 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 75.31235750114 - type: f1 value: 57.891491963121155 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.04303967720243 - type: f1 value: 70.51516022297616 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.65299260255549 - type: f1 value: 77.49059766538576 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 31.458906115906597 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.9851513122443 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.2916268497217 - type: mrr value: 32.328276715593816 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 6.3740000000000006 - type: map_at_10 value: 13.089999999999998 - type: map_at_100 value: 16.512 - type: map_at_1000 value: 18.014 - type: map_at_3 value: 9.671000000000001 - type: map_at_5 value: 11.199 - type: mrr_at_1 value: 46.749 - type: mrr_at_10 value: 55.367 - type: mrr_at_100 value: 56.021 - type: mrr_at_1000 value: 56.058 - type: mrr_at_3 value: 53.30200000000001 - type: mrr_at_5 value: 54.773 - type: ndcg_at_1 value: 45.046 - type: ndcg_at_10 value: 35.388999999999996 - type: ndcg_at_100 value: 32.175 - type: ndcg_at_1000 value: 41.018 - type: ndcg_at_3 value: 40.244 - type: ndcg_at_5 value: 38.267 - type: precision_at_1 value: 46.749 - type: precision_at_10 value: 26.563 - type: precision_at_100 value: 8.074 - type: precision_at_1000 value: 2.099 - type: precision_at_3 value: 37.358000000000004 - type: precision_at_5 value: 33.003 - type: recall_at_1 value: 6.3740000000000006 - type: recall_at_10 value: 16.805999999999997 - type: recall_at_100 value: 31.871 - type: recall_at_1000 value: 64.098 - type: recall_at_3 value: 10.383000000000001 - type: recall_at_5 value: 13.166 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 34.847 - type: map_at_10 value: 50.532 - type: map_at_100 value: 51.504000000000005 - type: map_at_1000 value: 51.528 - type: map_at_3 value: 46.219 - type: map_at_5 value: 48.868 - type: mrr_at_1 value: 39.137 - type: mrr_at_10 value: 53.157 - type: mrr_at_100 value: 53.839999999999996 - type: mrr_at_1000 value: 53.857 - type: mrr_at_3 value: 49.667 - type: mrr_at_5 value: 51.847 - type: ndcg_at_1 value: 39.108 - type: ndcg_at_10 value: 58.221000000000004 - type: ndcg_at_100 value: 62.021 - type: ndcg_at_1000 value: 62.57 - type: ndcg_at_3 value: 50.27199999999999 - type: ndcg_at_5 value: 54.623999999999995 - type: precision_at_1 value: 39.108 - type: precision_at_10 value: 9.397 - type: precision_at_100 value: 1.1520000000000001 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 22.644000000000002 - type: precision_at_5 value: 16.141 - type: recall_at_1 value: 34.847 - type: recall_at_10 value: 78.945 - type: recall_at_100 value: 94.793 - type: recall_at_1000 value: 98.904 - type: recall_at_3 value: 58.56 - type: recall_at_5 value: 68.535 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 68.728 - type: map_at_10 value: 82.537 - type: map_at_100 value: 83.218 - type: map_at_1000 value: 83.238 - type: map_at_3 value: 79.586 - type: map_at_5 value: 81.416 - type: mrr_at_1 value: 79.17999999999999 - type: mrr_at_10 value: 85.79299999999999 - type: mrr_at_100 value: 85.937 - type: mrr_at_1000 value: 85.938 - type: mrr_at_3 value: 84.748 - type: mrr_at_5 value: 85.431 - type: ndcg_at_1 value: 79.17 - type: ndcg_at_10 value: 86.555 - type: ndcg_at_100 value: 88.005 - type: ndcg_at_1000 value: 88.146 - type: ndcg_at_3 value: 83.557 - type: ndcg_at_5 value: 85.152 - type: precision_at_1 value: 79.17 - type: precision_at_10 value: 13.163 - type: precision_at_100 value: 1.52 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 36.53 - type: precision_at_5 value: 24.046 - type: recall_at_1 value: 68.728 - type: recall_at_10 value: 94.217 - type: recall_at_100 value: 99.295 - type: recall_at_1000 value: 99.964 - type: recall_at_3 value: 85.646 - type: recall_at_5 value: 90.113 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 56.15680266226348 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 63.4318549229047 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.353 - type: map_at_10 value: 10.956000000000001 - type: map_at_100 value: 12.873999999999999 - type: map_at_1000 value: 13.177 - type: map_at_3 value: 7.854 - type: map_at_5 value: 9.327 - type: mrr_at_1 value: 21.4 - type: mrr_at_10 value: 31.948999999999998 - type: mrr_at_100 value: 33.039 - type: mrr_at_1000 value: 33.106 - type: mrr_at_3 value: 28.449999999999996 - type: mrr_at_5 value: 30.535 - type: ndcg_at_1 value: 21.4 - type: ndcg_at_10 value: 18.694 - type: ndcg_at_100 value: 26.275 - type: ndcg_at_1000 value: 31.836 - type: ndcg_at_3 value: 17.559 - type: ndcg_at_5 value: 15.372 - type: precision_at_1 value: 21.4 - type: precision_at_10 value: 9.790000000000001 - type: precision_at_100 value: 2.0709999999999997 - type: precision_at_1000 value: 0.34099999999999997 - type: precision_at_3 value: 16.467000000000002 - type: precision_at_5 value: 13.54 - type: recall_at_1 value: 4.353 - type: recall_at_10 value: 19.892000000000003 - type: recall_at_100 value: 42.067 - type: recall_at_1000 value: 69.268 - type: recall_at_3 value: 10.042 - type: recall_at_5 value: 13.741999999999999 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 83.75433886279843 - type: cos_sim_spearman value: 78.29727771767095 - type: euclidean_pearson value: 80.83057828506621 - type: euclidean_spearman value: 78.35203149750356 - type: manhattan_pearson value: 80.7403553891142 - type: manhattan_spearman value: 78.33670488531051 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.59999465280839 - type: cos_sim_spearman value: 75.79279003980383 - type: euclidean_pearson value: 82.29895375956758 - type: euclidean_spearman value: 77.33856514102094 - type: manhattan_pearson value: 82.22694214534756 - type: manhattan_spearman value: 77.3028993008695 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 83.09296929691297 - type: cos_sim_spearman value: 83.58056936846941 - type: euclidean_pearson value: 83.84067483060005 - type: euclidean_spearman value: 84.45155680480985 - type: manhattan_pearson value: 83.82353052971942 - type: manhattan_spearman value: 84.43030567861112 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 82.74616852320915 - type: cos_sim_spearman value: 79.948683747966 - type: euclidean_pearson value: 81.55702283757084 - type: euclidean_spearman value: 80.1721505114231 - type: manhattan_pearson value: 81.52251518619441 - type: manhattan_spearman value: 80.1469800135577 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 87.97170104226318 - type: cos_sim_spearman value: 88.82021731518206 - type: euclidean_pearson value: 87.92950547187615 - type: euclidean_spearman value: 88.67043634645866 - type: manhattan_pearson value: 87.90668112827639 - type: manhattan_spearman value: 88.64471082785317 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 83.02790375770599 - type: cos_sim_spearman value: 84.46308496590792 - type: euclidean_pearson value: 84.29430000414911 - type: euclidean_spearman value: 84.77298303589936 - type: manhattan_pearson value: 84.23919291368665 - type: manhattan_spearman value: 84.75272234871308 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.62885108477064 - type: cos_sim_spearman value: 87.58456196391622 - type: euclidean_pearson value: 88.2602775281007 - type: euclidean_spearman value: 87.51556278299846 - type: manhattan_pearson value: 88.11224053672842 - type: manhattan_spearman value: 87.4336094383095 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 63.98187965128411 - type: cos_sim_spearman value: 64.0653163219731 - type: euclidean_pearson value: 62.30616725924099 - type: euclidean_spearman value: 61.556971332295916 - type: manhattan_pearson value: 62.07642330128549 - type: manhattan_spearman value: 61.155494129828 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 85.6089703921826 - type: cos_sim_spearman value: 86.52303197250791 - type: euclidean_pearson value: 85.95801955963246 - type: euclidean_spearman value: 86.25242424112962 - type: manhattan_pearson value: 85.88829100470312 - type: manhattan_spearman value: 86.18742955805165 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 83.02282098487036 - type: mrr value: 95.05126409538174 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 55.928 - type: map_at_10 value: 67.308 - type: map_at_100 value: 67.89500000000001 - type: map_at_1000 value: 67.91199999999999 - type: map_at_3 value: 65.091 - type: map_at_5 value: 66.412 - type: mrr_at_1 value: 58.667 - type: mrr_at_10 value: 68.401 - type: mrr_at_100 value: 68.804 - type: mrr_at_1000 value: 68.819 - type: mrr_at_3 value: 66.72200000000001 - type: mrr_at_5 value: 67.72200000000001 - type: ndcg_at_1 value: 58.667 - type: ndcg_at_10 value: 71.944 - type: ndcg_at_100 value: 74.464 - type: ndcg_at_1000 value: 74.82799999999999 - type: ndcg_at_3 value: 68.257 - type: ndcg_at_5 value: 70.10300000000001 - type: precision_at_1 value: 58.667 - type: precision_at_10 value: 9.533 - type: precision_at_100 value: 1.09 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 27.222 - type: precision_at_5 value: 17.533 - type: recall_at_1 value: 55.928 - type: recall_at_10 value: 84.65 - type: recall_at_100 value: 96.267 - type: recall_at_1000 value: 99 - type: recall_at_3 value: 74.656 - type: recall_at_5 value: 79.489 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.79009900990098 - type: cos_sim_ap value: 94.5795129511524 - type: cos_sim_f1 value: 89.34673366834171 - type: cos_sim_precision value: 89.79797979797979 - type: cos_sim_recall value: 88.9 - type: dot_accuracy value: 99.53465346534654 - type: dot_ap value: 81.56492504352725 - type: dot_f1 value: 76.33816908454227 - type: dot_precision value: 76.37637637637637 - type: dot_recall value: 76.3 - type: euclidean_accuracy value: 99.78514851485149 - type: euclidean_ap value: 94.59134620408962 - type: euclidean_f1 value: 88.96484375 - type: euclidean_precision value: 86.92748091603053 - type: euclidean_recall value: 91.10000000000001 - type: manhattan_accuracy value: 99.78415841584159 - type: manhattan_ap value: 94.5190197328845 - type: manhattan_f1 value: 88.84462151394423 - type: manhattan_precision value: 88.4920634920635 - type: manhattan_recall value: 89.2 - type: max_accuracy value: 99.79009900990098 - type: max_ap value: 94.59134620408962 - type: max_f1 value: 89.34673366834171 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 65.1487505617497 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 32.502518166001856 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 50.33775480236701 - type: mrr value: 51.17302223919871 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.561111309808208 - type: cos_sim_spearman value: 30.2839254379273 - type: dot_pearson value: 29.560242291401973 - type: dot_spearman value: 30.51527274679116 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.215 - type: map_at_10 value: 1.752 - type: map_at_100 value: 9.258 - type: map_at_1000 value: 23.438 - type: map_at_3 value: 0.6 - type: map_at_5 value: 0.968 - type: mrr_at_1 value: 84 - type: mrr_at_10 value: 91.333 - type: mrr_at_100 value: 91.333 - type: mrr_at_1000 value: 91.333 - type: mrr_at_3 value: 91.333 - type: mrr_at_5 value: 91.333 - type: ndcg_at_1 value: 75 - type: ndcg_at_10 value: 69.596 - type: ndcg_at_100 value: 51.970000000000006 - type: ndcg_at_1000 value: 48.864999999999995 - type: ndcg_at_3 value: 73.92699999999999 - type: ndcg_at_5 value: 73.175 - type: precision_at_1 value: 84 - type: precision_at_10 value: 74 - type: precision_at_100 value: 53.2 - type: precision_at_1000 value: 21.836 - type: precision_at_3 value: 79.333 - type: precision_at_5 value: 78.4 - type: recall_at_1 value: 0.215 - type: recall_at_10 value: 1.9609999999999999 - type: recall_at_100 value: 12.809999999999999 - type: recall_at_1000 value: 46.418 - type: recall_at_3 value: 0.6479999999999999 - type: recall_at_5 value: 1.057 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 3.066 - type: map_at_10 value: 10.508000000000001 - type: map_at_100 value: 16.258 - type: map_at_1000 value: 17.705000000000002 - type: map_at_3 value: 6.157 - type: map_at_5 value: 7.510999999999999 - type: mrr_at_1 value: 34.694 - type: mrr_at_10 value: 48.786 - type: mrr_at_100 value: 49.619 - type: mrr_at_1000 value: 49.619 - type: mrr_at_3 value: 45.918 - type: mrr_at_5 value: 46.837 - type: ndcg_at_1 value: 31.633 - type: ndcg_at_10 value: 26.401999999999997 - type: ndcg_at_100 value: 37.139 - type: ndcg_at_1000 value: 48.012 - type: ndcg_at_3 value: 31.875999999999998 - type: ndcg_at_5 value: 27.383000000000003 - type: precision_at_1 value: 34.694 - type: precision_at_10 value: 22.857 - type: precision_at_100 value: 7.611999999999999 - type: precision_at_1000 value: 1.492 - type: precision_at_3 value: 33.333 - type: precision_at_5 value: 26.122 - type: recall_at_1 value: 3.066 - type: recall_at_10 value: 16.239 - type: recall_at_100 value: 47.29 - type: recall_at_1000 value: 81.137 - type: recall_at_3 value: 7.069 - type: recall_at_5 value: 9.483 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 72.1126 - type: ap value: 14.710862719285753 - type: f1 value: 55.437808972378846 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 60.39049235993209 - type: f1 value: 60.69810537250234 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 48.15576640316866 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.52917684925792 - type: cos_sim_ap value: 75.97497873817315 - type: cos_sim_f1 value: 70.01151926276718 - type: cos_sim_precision value: 67.98409147402435 - type: cos_sim_recall value: 72.16358839050132 - type: dot_accuracy value: 82.47004828038385 - type: dot_ap value: 62.48739894974198 - type: dot_f1 value: 59.13107511045656 - type: dot_precision value: 55.27765029830197 - type: dot_recall value: 63.562005277044854 - type: euclidean_accuracy value: 86.46361089586935 - type: euclidean_ap value: 75.59282886839452 - type: euclidean_f1 value: 69.6465443945099 - type: euclidean_precision value: 64.52847175331982 - type: euclidean_recall value: 75.64643799472296 - type: manhattan_accuracy value: 86.43380818978363 - type: manhattan_ap value: 75.5742420974403 - type: manhattan_f1 value: 69.8636926889715 - type: manhattan_precision value: 65.8644859813084 - type: manhattan_recall value: 74.37994722955145 - type: max_accuracy value: 86.52917684925792 - type: max_ap value: 75.97497873817315 - type: max_f1 value: 70.01151926276718 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.29056545193464 - type: cos_sim_ap value: 86.63028865482376 - type: cos_sim_f1 value: 79.18166458532285 - type: cos_sim_precision value: 75.70585756426465 - type: cos_sim_recall value: 82.99199260856174 - type: dot_accuracy value: 85.23305002522606 - type: dot_ap value: 76.0482687263196 - type: dot_f1 value: 70.80484330484332 - type: dot_precision value: 65.86933474688577 - type: dot_recall value: 76.53988296889437 - type: euclidean_accuracy value: 89.26145845461248 - type: euclidean_ap value: 86.54073288416006 - type: euclidean_f1 value: 78.9721371479794 - type: euclidean_precision value: 76.68649354417525 - type: euclidean_recall value: 81.39821373575609 - type: manhattan_accuracy value: 89.22847052431405 - type: manhattan_ap value: 86.51250729037905 - type: manhattan_f1 value: 78.94601825044894 - type: manhattan_precision value: 75.32694594027555 - type: manhattan_recall value: 82.93039728980598 - type: max_accuracy value: 89.29056545193464 - type: max_ap value: 86.63028865482376 - type: max_f1 value: 79.18166458532285 --- # E5-base-v2 [Text Embeddings by Weakly-Supervised Contrastive Pre-training](https://arxiv.org/pdf/2212.03533.pdf). Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, Furu Wei, arXiv 2022 This model has 12 layers and the embedding size is 768. ## Usage Below is an example to encode queries and passages from the MS-MARCO passage ranking dataset. ```python import torch.nn.functional as F from torch import Tensor from transformers import AutoTokenizer, AutoModel def average_pool(last_hidden_states: Tensor, attention_mask: Tensor) -> Tensor: last_hidden = last_hidden_states.masked_fill(~attention_mask[..., None].bool(), 0.0) return last_hidden.sum(dim=1) / attention_mask.sum(dim=1)[..., None] # Each input text should start with "query: " or "passage: ". # For tasks other than retrieval, you can simply use the "query: " prefix. input_texts = ['query: how much protein should a female eat', 'query: summit define', "passage: As a general guideline, the CDC's average requirement of protein for women ages 19 to 70 is 46 grams per day. But, as you can see from this chart, you'll need to increase that if you're expecting or training for a marathon. Check out the chart below to see how much protein you should be eating each day.", "passage: Definition of summit for English Language Learners. : 1 the highest point of a mountain : the top of a mountain. : 2 the highest level. : 3 a meeting or series of meetings between the leaders of two or more governments."] tokenizer = AutoTokenizer.from_pretrained('intfloat/e5-base-v2') model = AutoModel.from_pretrained('intfloat/e5-base-v2') # Tokenize the input texts batch_dict = tokenizer(input_texts, max_length=512, padding=True, truncation=True, return_tensors='pt') outputs = model(**batch_dict) embeddings = average_pool(outputs.last_hidden_state, batch_dict['attention_mask']) # (Optionally) normalize embeddings embeddings = F.normalize(embeddings, p=2, dim=1) scores = (embeddings[:2] @ embeddings[2:].T) * 100 print(scores.tolist()) ``` ## Training Details Please refer to our paper at [https://arxiv.org/pdf/2212.03533.pdf](https://arxiv.org/pdf/2212.03533.pdf). ## Benchmark Evaluation Check out [unilm/e5](https://github.com/microsoft/unilm/tree/master/e5) to reproduce evaluation results on the [BEIR](https://arxiv.org/abs/2104.08663) and [MTEB benchmark](https://arxiv.org/abs/2210.07316). ## Citation If you find our paper or models helpful, please consider cite as follows: ``` @article{wang2022text, title={Text Embeddings by Weakly-Supervised Contrastive Pre-training}, author={Wang, Liang and Yang, Nan and Huang, Xiaolong and Jiao, Binxing and Yang, Linjun and Jiang, Daxin and Majumder, Rangan and Wei, Furu}, journal={arXiv preprint arXiv:2212.03533}, year={2022} } ``` ## Limitations This model only works for English texts. Long texts will be truncated to at most 512 tokens.
[ "BIOSSES", "SCIFACT" ]
Leogrin/eleuther-pythia1.4b-hh-sft
Leogrin
text-generation
[ "transformers", "pytorch", "safetensors", "gpt_neox", "text-generation", "causal-lm", "pythia", "en", "dataset:Anthropic/hh-rlhf", "arxiv:2101.00027", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-07-27T14:21:23Z
2023-09-01T16:39:00+00:00
23
1
--- datasets: - Anthropic/hh-rlhf language: - en license: apache-2.0 tags: - pytorch - causal-lm - pythia --- # Infos Pythia-1.4b supervised finetuned with Anthropic-hh-rlhf dataset for 1 epoch. [wandb log](https://wandb.ai/pythia_dpo/Pythia_DPO_new/runs/xm0pxfej) See [Pythia-1.4b](https://huggingface.co/EleutherAI/pythia-1.4b) for model details [(paper)](https://arxiv.org/abs/2101.00027). # Benchmark raw results: Results for the base model are taken from the [Pythia paper](https://arxiv.org/abs/2101.00027). ## Zero shot | Task | 1.4B_base | 1.4B_sft | |------------------|--------------:|--------------:| | Lambada (OpenAI) | 0.616 ± 0.007 | 0.5977 ± 0.0068 | | PIQA | 0.711 ± 0.011 | 0.7133 ± 0.0106 | | WinoGrande | 0.573 ± 0.014 | 0.5793 ± 0.0139 | | WSC | 0.365 ± 0.047 | 0.3654 ± 0.0474 | | ARC - Easy | 0.606 ± 0.010 | 0.6098 ± 0.0100 | | ARC - Challenge | 0.260 ± 0.013 | 0.2696 ± 0.0130 | | SciQ | 0.865 ± 0.011 | 0.8540 ± 0.0112 | | LogiQA | 0.210 ± 0.016 | NA | ## Five shot | Task | 1.4B_base | 1.4B_sft | |------------------|----------------:|----------------:| | Lambada (OpenAI) | 0.578 ± 0.007 | 0.5201 ± 0.007 | | PIQA | 0.705 ± 0.011 | 0.7176 ± 0.0105| | WinoGrande | 0.580 ± 0.014 | 0.5793 ± 0.0139| | WSC | 0.365 ± 0.047 | 0.5288 ± 0.0492| | ARC - Easy | 0.643 ± 0.010 | 0.6376 ± 0.0099| | ARC - Challenge | 0.290 ± 0.013 | 0.2935 ± 0.0133| | SciQ | 0.92 ± 0.009 | 0.9180 ± 0.0087| | LogiQA | 0.240 ± 0.017 | N/A |
[ "SCIQ" ]
zwellington/bart-pubhealth-expanded
zwellington
text2text-generation
[ "transformers", "pytorch", "bart", "text2text-generation", "generated_from_trainer", "dataset:clupubhealth", "base_model:facebook/bart-large", "base_model:finetune:facebook/bart-large", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-08-07T20:24:45Z
2023-08-08T12:04:48+00:00
23
0
--- base_model: facebook/bart-large datasets: - clupubhealth license: apache-2.0 metrics: - rouge tags: - generated_from_trainer model-index: - name: bart-pubhealth-expanded results: - task: type: text2text-generation name: Sequence-to-sequence Language Modeling dataset: name: clupubhealth type: clupubhealth config: expanded split: test args: expanded metrics: - type: rouge value: 29.8528 name: Rouge1 --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bart-pubhealth-expanded This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on the clupubhealth dataset. It achieves the following results on the evaluation set: - Loss: 2.3926 - Rouge1: 29.8528 - Rouge2: 10.8495 - Rougel: 23.3682 - Rougelsum: 23.7565 - Gen Len: 19.85 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 2 - total_train_batch_size: 32 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 10 ### Training results | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len | |:-------------:|:-----:|:-----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:| | 2.7469 | 0.26 | 500 | 2.0845 | 30.9611 | 10.7145 | 23.9719 | 24.1042 | 19.905 | | 2.5524 | 0.51 | 1000 | 2.0628 | 32.0352 | 11.8898 | 24.9032 | 25.1368 | 19.895 | | 2.429 | 0.77 | 1500 | 2.0787 | 32.2632 | 12.0353 | 25.1245 | 25.3728 | 19.895 | | 2.2234 | 1.03 | 2000 | 2.1178 | 30.6437 | 11.5713 | 24.9071 | 25.1126 | 19.955 | | 2.1249 | 1.29 | 2500 | 2.1183 | 31.6095 | 11.6573 | 25.0593 | 25.2063 | 19.87 | | 2.0302 | 1.54 | 3000 | 2.1319 | 30.7417 | 11.4924 | 24.6388 | 24.8722 | 19.895 | | 1.9761 | 1.8 | 3500 | 2.1850 | 31.6709 | 11.3036 | 24.4853 | 24.7571 | 19.87 | | 1.8279 | 2.06 | 4000 | 2.2092 | 31.5778 | 11.59 | 24.7599 | 24.9956 | 19.825 | | 1.8083 | 2.32 | 4500 | 2.1781 | 31.0441 | 10.7513 | 24.0656 | 24.3112 | 19.89 | | 1.7527 | 2.57 | 5000 | 2.2155 | 31.1191 | 11.4519 | 24.4673 | 24.7157 | 19.81 | | 1.723 | 2.83 | 5500 | 2.2024 | 31.9787 | 12.3158 | 24.9863 | 25.2597 | 19.94 | | 1.5975 | 3.09 | 6000 | 2.2567 | 31.236 | 10.9733 | 24.1302 | 24.3433 | 19.9 | | 1.5933 | 3.35 | 6500 | 2.2425 | 31.022 | 11.0249 | 24.1257 | 24.3555 | 19.92 | | 1.5792 | 3.6 | 7000 | 2.2428 | 29.8844 | 10.3622 | 23.0802 | 23.4003 | 19.96 | | 1.5718 | 3.86 | 7500 | 2.2367 | 31.2369 | 11.3854 | 24.8528 | 25.1287 | 19.815 | | 1.4467 | 4.12 | 8000 | 2.2988 | 30.4903 | 10.4057 | 23.9914 | 24.239 | 19.715 | | 1.4458 | 4.37 | 8500 | 2.2738 | 31.4345 | 11.2989 | 24.4239 | 24.6047 | 19.75 | | 1.4342 | 4.63 | 9000 | 2.3092 | 28.8421 | 10.5744 | 23.0084 | 23.1741 | 19.855 | | 1.4416 | 4.89 | 9500 | 2.2747 | 31.7111 | 11.5903 | 24.3422 | 24.6867 | 19.945 | | 1.3437 | 5.15 | 10000 | 2.3203 | 31.11 | 11.0 | 24.6098 | 24.7362 | 19.81 | | 1.3525 | 5.4 | 10500 | 2.3085 | 29.414 | 10.3412 | 23.3134 | 23.6552 | 19.935 | | 1.3533 | 5.66 | 11000 | 2.3123 | 31.321 | 11.2686 | 23.9922 | 24.336 | 19.77 | | 1.3248 | 5.92 | 11500 | 2.2916 | 30.8841 | 10.779 | 23.9407 | 24.0865 | 19.845 | | 1.2617 | 6.18 | 12000 | 2.3530 | 29.7167 | 10.3162 | 23.4805 | 23.724 | 19.93 | | 1.2846 | 6.43 | 12500 | 2.3712 | 28.3334 | 9.8425 | 22.1151 | 22.2951 | 19.92 | | 1.2472 | 6.69 | 13000 | 2.3378 | 29.563 | 10.0033 | 23.1863 | 23.5065 | 19.865 | | 1.2934 | 6.95 | 13500 | 2.3262 | 29.137 | 10.1232 | 22.9234 | 23.3799 | 19.855 | | 1.2136 | 7.21 | 14000 | 2.3640 | 29.753 | 10.4865 | 23.4892 | 23.8778 | 19.885 | | 1.2096 | 7.46 | 14500 | 2.3654 | 29.512 | 10.3891 | 23.0427 | 23.3684 | 19.88 | | 1.211 | 7.72 | 15000 | 2.3491 | 30.9014 | 10.9117 | 24.127 | 24.3518 | 19.785 | | 1.1954 | 7.98 | 15500 | 2.3626 | 29.0622 | 10.5405 | 22.7407 | 22.9454 | 19.84 | | 1.1756 | 8.23 | 16000 | 2.3759 | 29.5277 | 10.2961 | 22.7888 | 23.1239 | 19.88 | | 1.1516 | 8.49 | 16500 | 2.3772 | 29.3161 | 10.1894 | 23.0404 | 23.486 | 19.885 | | 1.1604 | 8.75 | 17000 | 2.3710 | 29.6161 | 10.3543 | 22.8748 | 23.1849 | 19.905 | | 1.1639 | 9.01 | 17500 | 2.3889 | 30.2817 | 10.8654 | 23.6438 | 23.8639 | 19.895 | | 1.12 | 9.26 | 18000 | 2.3968 | 28.8747 | 9.8686 | 22.2775 | 22.6541 | 19.895 | | 1.1136 | 9.52 | 18500 | 2.3950 | 30.1197 | 10.8992 | 23.2575 | 23.5732 | 19.86 | | 1.1437 | 9.78 | 19000 | 2.3926 | 29.8528 | 10.8495 | 23.3682 | 23.7565 | 19.85 | ### Framework versions - Transformers 4.31.0 - Pytorch 2.0.1+cu117 - Datasets 2.7.1 - Tokenizers 0.13.2
[ "PUBHEALTH" ]
CiroN2022/street-art
CiroN2022
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion", "lora", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:other", "region:us" ]
2023-09-04T19:34:19Z
2023-09-04T19:34:22+00:00
23
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 license: other tags: - text-to-image - stable-diffusion - lora - diffusers widget: - {} --- # Street Art ![Image 0](2355919.jpeg) <p>Example prompts: </p><ul><li><p>Graffiti masterpiece:0.6 adorning a city alley, depicting a larger-than-life phoenix:0.4, its fiery feathers:0.4 ablaze with vibrant reds and oranges, embodying resilience and rebirth:0.2.</p></li><li><p>A vibrant street mural:0.7, celebrating cultural diversity:0.5, featuring a dynamic salsa dancer:0.4 in mid-twirl, exuding passion and rhythm:0.3, executed with vivid, expressive strokes:0.2.</p></li><li><p>An urban canvas:0.7 showcasing a thought-provoking social commentary:0.5, with a faceless figure:0.4 in grayscale, trapped behind a web of barbed wire:0.3, the stark realism:0.2 evoking a sense of societal confinement.</p></li><li><p>Graffiti artistry:0.6 portraying an enigmatic astronaut:0.4 floating among cosmic swirls:0.4, rendered in a surreal, dreamlike style:0.3, capturing the allure of the unknown:0.2.</p></li><li><p>A colossal street mural:0.7 featuring a vividly colored chameleon:0.5, blending seamlessly into a riot of urban patterns and textures:0.4, reflecting the concept of adaptation:0.3 with a touch of whimsy:0.2.</p></li><li><p>A graffiti masterpiece:0.6 depicting a multicultural tapestry:0.4, with faces of diverse ethnicities:0.4 emerging from a vibrant melting pot of colors:0.3, symbolizing unity amidst diversity:0.2.</p></li><li><p>Street art that pays homage to nature:0.6, with a majestic, photorealistic wolf:0.4, its piercing eyes:0.4 gazing into the urban jungle, a symbol of untamed spirit:0.3 amidst concrete confines:0.2.</p></li><li><p>A captivating mural:0.7 featuring a mythical kraken:0.5, its tentacles:0.4 intertwining with architectural elements, blurring the line between fantasy and reality:0.3, all executed with intricate precision:0.2.</p></li><li><p>Graffiti with a feminist message:0.6, portraying a defiant female figure:0.4, surrounded by a halo of empowered women of all backgrounds:0.3, rendered with bold, empowering strokes:0.2.</p></li><li><p>A mural celebrating music:0.6, with a soulful saxophonist:0.4 blowing vibrant musical notes:0.4 into the air, capturing the essence of jazz:0.3 with fluid and expressive lines:0.2.</p></li><li><p>Street art that delves into surrealism:0.6, featuring an otherworldly landscape:0.4 where skyscrapers morph into trees:0.4, and rivers cascade from faucets:0.3, all portrayed in a dreamy, Salvador Dali-esque style:0.2.</p></li><li><p>Graffiti that explores the theme of time:0.6, with a melting clock:0.4 suspended in mid-air, adorned with intricate gears and mechanisms:0.4, symbolizing the fluidity of existence:0.3 and the inevitability of change:0.2.</p></li><li><p>An urban masterpiece:0.7, portraying a surrealistic elephant:0.5 with butterfly wings:0.4, evoking the juxtaposition of strength and fragility:0.3, rendered with meticulous attention to detail:0.2.</p></li><li><p>Street art infused with mysticism:0.6, depicting an ancient sage:0.4 adorned in ornate robes, his third eye aglow:0.4 with divine insight, captured in a rich, mystical color palette:0.3.</p></li><li><p>A mural celebrating street culture:0.6, featuring a graffiti artist:0.4 in action, surrounded by an explosion of vibrant colors:0.4, capturing the raw energy and creativity of the urban art scene:0.3.</p></li><li><p>Graffiti with a political edge:0.6, showcasing a bold protest slogan:0.4 superimposed on a backdrop of riotous brushstrokes:0.4, conveying the urgency of the message:0.3 with a rebellious aesthetic:0.2.</p></li><li><p>A mesmerizing mural:0.7, depicting a cosmic dreamer:0.5, their body adorned with swirling galaxies:0.4, blending the boundaries between the self and the universe:0.3, executed with cosmic precision:0.2.</p></li><li><p>Street art that explores the concept of identity:0.6, with a faceless figure:0.4 morphing into a kaleidoscope of cultural symbols:0.4, challenging viewers to reflect on their own sense of self:0.3.</p></li><li><p>Graffiti that pays homage to the classics:0.6, featuring a reinterpretation of Michelangelo's "Creation of Adam":0.4, with a graffiti artist's hand reaching out to touch the divine:0.3, executed in a contemporary street art style:0.2.</p></li><li><p>An urban canvas:0.7 adorned with a whimsical menagerie:0.5 of anthropomorphic animals:0.4 engaged in everyday activities, inviting viewers into a playful and imaginative world:0.3, painted with vibrant, childlike strokes:0.2.</p></li><li><p>A graffiti masterpiece:0.6, depicting a fierce phoenix:0.4 emerging from a tangle of thorny vines:0.4, symbolizing resilience and renewal:0.3, rendered with bold and fiery brushwork:0.2.</p></li><li><p>Street art that explores the theme of dreams:0.6, with surrealistic landscapes:0.4 where clocks melt into puddles:0.4, and fish swim through the sky:0.3, all portrayed in a fantastical, Salvador Dali-inspired style:0.2.</p></li><li><p>A captivating mural:0.7 celebrating diversity:0.5, with a kaleidoscope of faces:0.4 from different cultures and backgrounds, coming together in a harmonious tapestry:0.3, painted with vibrant, celebratory colors:0.2.</p></li><li><p>Graffiti that delves into the world of mythology:0.6, featuring a majestic dragon:0.4, its scales glistening with iridescent colors:0.4, capturing the awe-inspiring grandeur of the mythical creature:0.3.</p></li><li><p>A mural paying tribute to the power of imagination:0.6, with a whimsical cityscape:0.4 that defies the laws of physics, executed with a playful and childlike innocence:0.3, inviting viewers to dream and explore:0.2.</p></li><li><p>Street art with a message of environmental awareness:0.6, featuring a delicate butterfly:0.4 with wings composed of intricate flora and fauna:0.4, highlighting the beauty and fragility of nature:0.3.</p></li><li><p>Graffiti that explores the concept of duality:0.6, with a split-faced figure:0.4, one side dark and one side light:0.4, reflecting the complexities of human nature:0.3, painted with striking contrasts:0.2.</p></li><li><p>An urban masterpiece:0.7, portraying a cosmic traveler:0.5 journeying through a celestial realm, their body adorned with swirling galaxies:0.4, capturing the sense of wonder and exploration:0.3, executed with cosmic precision:0.2.</p></li><li><p>Street art that celebrates the power of love:0.6, featuring an embrace between two figures:0.4, their bodies merging into a tangle of vibrant colors:0.4, symbolizing the unifying force of love:0.3, painted with passionate strokes:0.2.</p></li><li><p>A graffiti masterpiece:0.6, depicting a mythical phoenix:0.4 rising from a sea of flames:0.4, embodying the themes of transformation and rebirth:0.3, rendered with bold and fiery brushwork:0.2.</p></li><li><p>A vibrant street mural:0.7 celebrating the beauty of dance:0.5, with a graceful ballerina:0.4 frozen in a moment of elegance and motion:0.4, executed with fluid and expressive strokes:0.3.</p></li><li><p>Street art that explores the concept of timelessness:0.6, with a figure morphing into a clock:0.4, their body adorned with intricate gears and mechanisms:0.4, symbolizing the eternal nature of existence:0.3.</p></li><li><p>Graffiti that pays tribute to the power of unity:0.6, featuring a diverse group of people:0.4 holding hands in solidarity, against a backdrop of colorful patterns:0.4, conveying the strength of collective action:0.3.</p></li><li><p>A captivating mural:0.7 depicting a mystical forest:0.5, with trees that transform into ethereal creatures:0.4, inviting viewers to step into a world of enchantment:0.3, painted with intricate and fantastical details:0.2.</p></li><li><p>Street art infused with a sense of wonder:0.6, portraying a whimsical astronaut:0.4 floating among a sea of stars:0.4, capturing the childlike curiosity and awe of space exploration:0.3, executed with playful and imaginative strokes:0.2.</p></li><li><p>A mural that celebrates the resilience of nature:0.6, with a majestic eagle:0.4 soaring above a landscape scarred by industry:0.4, symbolizing the enduring spirit of the natural world:0.3, painted with vivid and striking contrasts:0.2.</p></li><li><p>Graffiti that delves into the theme of freedom:0.6, with a figure breaking free from chains:0.4, their wings unfurling in a burst of color:0.4, symbolizing the indomitable spirit of liberation:0.3, painted with bold and expressive strokes:0.2.</p></li><li><p>An urban masterpiece:0.7, portraying a cosmic explorer:0.5 venturing into the unknown, their body adorned with swirling galaxies:0.4, capturing the spirit of human curiosity and exploration:0.3, executed with cosmic precision:0.2.</p></li><li><p>Street art that celebrates the power of community:0.6, with a diverse group of people:0.4 coming together in a joyful embrace, their bodies merging into a tapestry of vibrant colors:0.4, symbolizing the strength of unity:0.3.</p></li><li><p>A graffiti masterpiece:0.6, depicting a mythic phoenix:0.4 emerging from a turbulent sea:0.4, embodying the themes of resilience and renewal:0.3, rendered with bold and fiery brushwork:0.2.</p></li><li><p>A satirical street mural:0.7, featuring a comically exaggerated politician:0.5 with a Pinocchio-like nose, caught in a web of lies:0.4, executed with bold, cartoonish strokes:0.3.</p></li><li><p>Graffiti with a witty twist:0.6, portraying a whimsical scene of animals:0.4 dressed in business attire, attending a board meeting in the forest:0.4, capturing the absurdity of corporate culture:0.3, rendered with playful and humorous details:0.2.</p></li><li><p>A vibrant and cartoony mural:0.7, depicting a superhero duck:0.5, clad in a cape and mask, ready to save the day:0.4, executed with bold and playful lines:0.3.</p></li><li><p>Street art with a humorous edge:0.6, featuring a larger-than-life chef:0.4, juggling oversized vegetables in a chaotic kitchen scene:0.4, capturing the hilarity of culinary mishaps:0.3, painted with exaggerated and comedic strokes:0.2.</p></li><li><p>A satirical take on consumerism:0.6, with a caricatured shopper:0.4 buried under an avalanche of shopping bags:0.4, poking fun at the materialistic culture:0.3, rendered in a cartoonish and exaggerated style:0.2.</p></li><li><p>Graffiti that lampoons the art world:0.6, with a beret-wearing artist:0.4 creating a masterpiece from a pile of garbage:0.4, satirizing the pretentiousness of contemporary art:0.3, painted with a tongue-in-cheek and comical flair:0.2.</p></li><li><p>A cartoony and satirical mural:0.7, depicting a superhero squirrel:0.5 battling an army of acorn-wielding villains:0.4, capturing the absurdity of the superhero genre:0.3, executed with exaggerated and playful lines:0.2.</p></li><li><p>Street art with a political punch:0.6, featuring a caricatured world leader:0.4, juggling international conflicts in a chaotic geopolitical circus:0.4, poking fun at global politics:0.3, painted with a sharp and satirical touch:0.2.</p></li><li><p>A whimsical and humorous mural:0.7, portraying a mischievous gnome:0.5, causing havoc in a peaceful garden:0.4, capturing the antics of folklore in a cartoonish and lighthearted style:0.3.</p></li><li><p>Graffiti that satirizes social media addiction:0.6, with a smartphone-headed figure:0.4, drowning in a sea of likes and emojis:0.4, poking fun at our digital obsession:0.3, rendered with a playful and exaggerated touch:0.2.</p></li><li><p>A satirical take on fast food culture:0.6, with a cartoonish hamburger:0.4, devouring a helpless potato in a hilarious food chain scene:0.4, capturing the absurdity of modern dietary habits:0.3, painted with exaggerated and comedic strokes:0.2.</p></li><li><p>Street art that humorously comments on climate change:0.6, featuring a polar bear in sunglasses:0.4, floating on an iceberg-turned-surfboard:0.4, poking fun at the dire consequences of environmental neglect:0.3, executed in a cartoonish and irreverent style:0.2.</p></li><li><p>A cartoony and satirical mural:0.7, depicting a bumbling detective:0.5, chasing a comically oversized magnifying glass:0.4, capturing the whimsy of classic detective stories in an exaggerated and playful manner:0.3.</p></li><li><p>Graffiti that parodies the fashion industry:0.6, with a fashionista catwalk featuring absurdly extravagant outfits:0.4, poking fun at haute couture:0.4, painted with a humorous and over-the-top flair:0.3.</p></li><li><p>Street art with a satirical twist on technology addiction:0.6, featuring a robot:0.4 glued to a smartphone, oblivious to the world around it:0.4, poking fun at our digital dependence:0.3, rendered in a cartoonish and cheeky style:0.2.</p></li><li><p>A satirical commentary on bureaucracy:0.6, with a cartoonish government official:0.4 buried under a mountain of paperwork:0.4, poking fun at red tape and inefficiency:0.3, painted with exaggerated and comical strokes:0.2.</p></li><li><p>A cartoony and satirical mural:0.7, depicting a larger-than-life cat:0.5 as a city mayor, surrounded by a cast of quirky animal citizens:0.4, capturing the humor and absurdity of political life:0.3, executed with playful and whimsical lines:0.2.</p></li><li><p>Graffiti that humorously explores the concept of time travel:0.6, with a retro-futuristic robot:0.4 in a time machine made from kitchen appliances:0.4, poking fun at sci-fi clichés:0.3, painted with a tongue-in-cheek and playful touch:0.2.</p></li><li><p>Street art with a satirical take on the selfie culture:0.6, featuring a figure covered in smartphone screens:0.4, unable to see beyond their own reflection:0.4, poking fun at narcissism:0.3, rendered in a cartoonish and irreverent style:0.2.</p></li><li><p>A whimsical and humorous mural:0.7, portraying a group of garden gnomes:0.5 engaged in a rock band performance:0.4, capturing the quirky and absurd in a playful and exaggerated style:0.3.</p></li></ul><p></p> ## Image examples for the model: ![Image 1](2355916.jpeg) ![Image 2](2355914.jpeg) ![Image 3](2355920.jpeg) ![Image 4](2355925.jpeg) ![Image 5](2355921.jpeg) ![Image 6](2355923.jpeg) ![Image 7](2355926.jpeg) ![Image 8](2355927.jpeg) ![Image 9](2355928.jpeg)
[ "BEAR" ]
TheBloke/MonadGPT-AWQ
TheBloke
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "conversational", "en", "fr", "la", "base_model:Pclanglais/MonadGPT", "base_model:quantized:Pclanglais/MonadGPT", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "4-bit", "awq", "region:us" ]
2023-11-09T20:41:18Z
2023-11-09T21:21:02+00:00
23
2
--- base_model: Pclanglais/MonadGPT language: - en - fr - la library_name: transformers license: apache-2.0 model_name: MonadGPT 7B pipeline_tag: conversational inference: false model_creator: Pierre-Carl Langlais model_type: mistral prompt_template: '<|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ' quantized_by: TheBloke --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # MonadGPT 7B - AWQ - Model creator: [Pierre-Carl Langlais](https://huggingface.co/Pclanglais) - Original model: [MonadGPT 7B](https://huggingface.co/Pclanglais/MonadGPT) <!-- description start --> ## Description This repo contains AWQ model files for [Pierre-Carl Langlais's MonadGPT 7B](https://huggingface.co/Pclanglais/MonadGPT). These files were quantised using hardware kindly provided by [Massed Compute](https://massedcompute.com/). ### About AWQ AWQ is an efficient, accurate and blazing-fast low-bit weight quantization method, currently supporting 4-bit quantization. Compared to GPTQ, it offers faster Transformers-based inference with equivalent or better quality compared to the most commonly used GPTQ settings. It is supported by: - [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) - using Loader: AutoAWQ - [vLLM](https://github.com/vllm-project/vllm) - Llama and Mistral models only - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) - [Transformers](https://huggingface.co/docs/transformers) version 4.35.0 and later, from any code or client that supports Transformers - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) - for use from Python code <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/MonadGPT-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/MonadGPT-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/MonadGPT-GGUF) * [Pierre-Carl Langlais's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/Pclanglais/MonadGPT) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: ChatML ``` <|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ``` <!-- prompt-template end --> <!-- README_AWQ.md-provided-files start --> ## Provided files, and AWQ parameters I currently release 128g GEMM models only. The addition of group_size 32 models, and GEMV kernel models, is being actively considered. Models are released as sharded safetensors files. | Branch | Bits | GS | AWQ Dataset | Seq Len | Size | | ------ | ---- | -- | ----------- | ------- | ---- | | [main](https://huggingface.co/TheBloke/MonadGPT-AWQ/tree/main) | 4 | 128 | latin-english | 4096 | 4.15 GB <!-- README_AWQ.md-provided-files end --> <!-- README_AWQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui) Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/MonadGPT-AWQ`. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `MonadGPT-AWQ` 7. Select **Loader: AutoAWQ**. 8. Click Load, and the model will load and is now ready for use. 9. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. 10. Once you're ready, click the **Text Generation** tab and enter a prompt to get started! <!-- README_AWQ.md-text-generation-webui end --> <!-- README_AWQ.md-use-from-vllm start --> ## Multi-user inference server: vLLM Documentation on installing and using vLLM [can be found here](https://vllm.readthedocs.io/en/latest/). - Please ensure you are using vLLM version 0.2 or later. - When using vLLM as a server, pass the `--quantization awq` parameter. For example: ```shell python3 -m vllm.entrypoints.api_server --model TheBloke/MonadGPT-AWQ --quantization awq --dtype auto ``` - When using vLLM from Python code, again set `quantization=awq`. For example: ```python from vllm import LLM, SamplingParams prompts = [ "Tell me about AI", "Write a story about llamas", "What is 291 - 150?", "How much wood would a woodchuck chuck if a woodchuck could chuck wood?", ] prompt_template=f'''<|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ''' prompts = [prompt_template.format(prompt=prompt) for prompt in prompts] sampling_params = SamplingParams(temperature=0.8, top_p=0.95) llm = LLM(model="TheBloke/MonadGPT-AWQ", quantization="awq", dtype="auto") outputs = llm.generate(prompts, sampling_params) # Print the outputs. for output in outputs: prompt = output.prompt generated_text = output.outputs[0].text print(f"Prompt: {prompt!r}, Generated text: {generated_text!r}") ``` <!-- README_AWQ.md-use-from-vllm start --> <!-- README_AWQ.md-use-from-tgi start --> ## Multi-user inference server: Hugging Face Text Generation Inference (TGI) Use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0` Example Docker parameters: ```shell --model-id TheBloke/MonadGPT-AWQ --port 3000 --quantize awq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 ``` Example Python code for interfacing with TGI (requires [huggingface-hub](https://github.com/huggingface/huggingface_hub) 0.17.0 or later): ```shell pip3 install huggingface-hub ``` ```python from huggingface_hub import InferenceClient endpoint_url = "https://your-endpoint-url-here" prompt = "Tell me about AI" prompt_template=f'''<|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ''' client = InferenceClient(endpoint_url) response = client.text_generation(prompt, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1) print(f"Model output: ", response) ``` <!-- README_AWQ.md-use-from-tgi end --> <!-- README_AWQ.md-use-from-python start --> ## Inference from Python code using Transformers ### Install the necessary packages - Requires: [Transformers](https://huggingface.co/docs/transformers) 4.35.0 or later. - Requires: [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) 0.1.6 or later. ```shell pip3 install --upgrade "autoawq>=0.1.6" "transformers>=4.35.0" ``` Note that if you are using PyTorch 2.0.1, the above AutoAWQ command will automatically upgrade you to PyTorch 2.1.0. If you are using CUDA 11.8 and wish to continue using PyTorch 2.0.1, instead run this command: ```shell pip3 install https://github.com/casper-hansen/AutoAWQ/releases/download/v0.1.6/autoawq-0.1.6+cu118-cp310-cp310-linux_x86_64.whl ``` If you have problems installing [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y autoawq git clone https://github.com/casper-hansen/AutoAWQ cd AutoAWQ pip3 install . ``` ### Transformers example code (requires Transformers 4.35.0 and later) ```python from transformers import AutoModelForCausalLM, AutoTokenizer, TextStreamer model_name_or_path = "TheBloke/MonadGPT-AWQ" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path) model = AutoModelForCausalLM.from_pretrained( model_name_or_path, low_cpu_mem_usage=True, device_map="cuda:0" ) # Using the text streamer to stream output one token at a time streamer = TextStreamer(tokenizer, skip_prompt=True, skip_special_tokens=True) prompt = "Tell me about AI" prompt_template=f'''<|im_start|>system {system_message}<|im_end|> <|im_start|>user {prompt}<|im_end|> <|im_start|>assistant ''' # Convert prompt to tokens tokens = tokenizer( prompt_template, return_tensors='pt' ).input_ids.cuda() generation_params = { "do_sample": True, "temperature": 0.7, "top_p": 0.95, "top_k": 40, "max_new_tokens": 512, "repetition_penalty": 1.1 } # Generate streamed output, visible one token at a time generation_output = model.generate( tokens, streamer=streamer, **generation_params ) # Generation without a streamer, which will include the prompt in the output generation_output = model.generate( tokens, **generation_params ) # Get the tokens from the output, decode them, print them token_output = generation_output[0] text_output = tokenizer.decode(token_output) print("model.generate output: ", text_output) # Inference is also possible via Transformers' pipeline from transformers import pipeline pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, **generation_params ) pipe_output = pipe(prompt_template)[0]['generated_text'] print("pipeline output: ", pipe_output) ``` <!-- README_AWQ.md-use-from-python end --> <!-- README_AWQ.md-compatibility start --> ## Compatibility The files provided are tested to work with: - [text-generation-webui](https://github.com/oobabooga/text-generation-webui) using `Loader: AutoAWQ`. - [vLLM](https://github.com/vllm-project/vllm) version 0.2.0 and later. - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) version 1.1.0 and later. - [Transformers](https://huggingface.co/docs/transformers) version 4.35.0 and later. - [AutoAWQ](https://github.com/casper-hansen/AutoAWQ) version 0.1.1 and later. <!-- README_AWQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Brandon Frisco, LangChain4j, Spiking Neurons AB, transmissions 11, Joseph William Delisle, Nitin Borwankar, Willem Michiel, Michael Dempsey, vamX, Jeffrey Morgan, zynix, jjj, Omer Bin Jawed, Sean Connelly, jinyuan sun, Jeromy Smith, Shadi, Pawan Osman, Chadd, Elijah Stavena, Illia Dulskyi, Sebastain Graf, Stephen Murray, terasurfer, Edmond Seymore, Celu Ramasamy, Mandus, Alex, biorpg, Ajan Kanaga, Clay Pascal, Raven Klaugh, 阿明, K, ya boyyy, usrbinkat, Alicia Loh, John Villwock, ReadyPlayerEmma, Chris Smitley, Cap'n Zoog, fincy, GodLy, S_X, sidney chen, Cory Kujawski, OG, Mano Prime, AzureBlack, Pieter, Kalila, Spencer Kim, Tom X Nguyen, Stanislav Ovsiannikov, Michael Levine, Andrey, Trailburnt, Vadim, Enrico Ros, Talal Aujan, Brandon Phillips, Jack West, Eugene Pentland, Michael Davis, Will Dee, webtim, Jonathan Leane, Alps Aficionado, Rooh Singh, Tiffany J. Kim, theTransient, Luke @flexchar, Elle, Caitlyn Gatomon, Ari Malik, subjectnull, Johann-Peter Hartmann, Trenton Dambrowitz, Imad Khwaja, Asp the Wyvern, Emad Mostaque, Rainer Wilmers, Alexandros Triantafyllidis, Nicholas, Pedro Madruga, SuperWojo, Harry Royden McLaughlin, James Bentley, Olakabola, David Ziegler, Ai Maven, Jeff Scroggin, Nikolai Manek, Deo Leter, Matthew Berman, Fen Risland, Ken Nordquist, Manuel Alberto Morcote, Luke Pendergrass, TL, Fred von Graf, Randy H, Dan Guido, NimbleBox.ai, Vitor Caleffi, Gabriel Tamborski, knownsqashed, Lone Striker, Erik Bjäreholt, John Detwiler, Leonard Tan, Iucharbius Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: Pierre-Carl Langlais's MonadGPT 7B What would have happened if ChatGPT was invented in the 17th century? MonadGPT is a possible answer. MonadGPT is a finetune of Mistral-Hermes on 11,000 early modern texts in English, French and Latin, mostly coming from EEBO and Gallica. Like the original Mistral-Hermes, MonadGPT can be used in conversation mode. It will not only answer in an historical language and style but will use historical and dated references. This is especially visible for science questions (astronomy, medecine). Obviously, it's not recommended to follow any advice from Monad-GPT ## Examples > User: What are the planets of the solar system?<br> > Monad-GPT: The six Planets are, Mercury, Venus, Mars, Jupiter, Saturn, and the Sun. The Moon is also a Planet; which being of so great Influence upon Earth, makes it to be most admired of all others by Astronomers.<br> > User: Why has the Moon such an influence upon Earth?<br> > Monad-GPT: But the Moon is so neer to us, as almost to be part of Earth; and therefore it much influences our Atmosphere. This Influence makes all that are in any way affected by Air, to be more under her influence than any other Planet. For if there were a little more or less Matter in the Air, than there is now, we should never have such Plants and Creatures as there are upon Earth: for every Plant and Creature hath its proper Temperature necessary for its Existence. If the Sun did not make up this shortage by his warmth from without, we should be destroyed.<br> > User: Can you write a poem about the Moon?<br> > Monad-GPT: O fairest Moon, that from the Heav'n dost shine, / In her own Lustre all the World does find: / As in thine Orb, where all is bright and clear, / The Sun himself is not more pure than thee.<br> > User: How could we travel to the Moon? With a big canon?<br> > Monad-GPT: But how to travel from Earth to the Moon, with our present Engines? A very hard Question: for we are like those that would go to Heaven in a Basket: We have no Wings, and God hath not given us any Angels to bear us up. Therefore I will leave this Enquiry to others; and end my Discourse of the Moon. > User : Hello. I'm not feeling too well. Could you give me some health advice?<br> > Monad-GPT: I pray you to take heart, and not be so heavy at my coming. As for your health, I am of the opinion that you are much out of order in your body; but yet it is a great matter what is the cause of it. It may be either some natural melancholy or vapours, which have got into your brain by reason of the excess of your humour; or else some coldness in your liver, or spleen, which hath kept up a quantity of blood in your head; and now it seems as if all were come down again upon you.<br> > User: You're right. It seems to be melancholy. What should I do?<br> > Monad-GPT: It is a very hard matter for me to prescribe any thing to you in your present condition, I must first know what hath caused this melancholy. What have you been doing? have you taken any physic since you were well? and what kind of diet do you eat?<br> > User: I eat nearly nothing, for I am in love.<br> > Monad-GPT: Aye, I see that by your eyes; for love hath the power to wither up the spirits of a man as much as a fever: and yet it is hard to know whether you are in love or no, for some do take so great delight in their own fancy, that they imagine themselves more in love than ever they were. > User: Quelles sont les planètes du système solaire ?<br> > Monad-GPT: Il est donc de la nature du monde, & des Planètes, que ceux qui le composent se mouuent en cercles, ou éclipses un leur tour. Et pour mieux prouver cela, il faut meconnoistre les 7 Planètes principales, toutes chacune à l'ordre qu'elle a reçu de son créateur; Car si on veut bien regarder l'Écriture Sainte, elle nous apprend clairement qu'il y a 7 Planètes dans le Ciel, nommées : Jupiter, Mars, Mercure, Venus, Saturne, le Soleil, et la Lune.<br> > User: Merci. Et comment pourrais-je voyager sur ces planètes ?<br> > Monad-GPT: Mais pour voyager sur ces Planètes, il faut d'abord bien connaitre l'art de les mouvoir; Et cela peut se faire en plusieurs manières. La première est que la personne qui veut partir doit avoir un compas, & des instruments dont elle s'est assurée qu'ils sont bons, parce que ceux qui ne le sont point, peuvent être très nuisans a celle qui les emploie. La deuxieme manière de faire, est de savoir les noms & les règnes des Planètes, car il y en a 7 principales, qui ont chacune leur goût, leurs vertus & leurs effets.<br> > User: Ah c'est bien vu. Et quels sont les goûts, vertus et effets de chacune des planètes ?<br> > Monad-GPT: Les vertus & les effets de chacune des Planètes sont différents, & il est fort utile pour ceux qui veulent bien s'en renseigner, car ils peuvent en tirer grand profit pour le bonheur de leur corps & de leur esprit. Les 7 principales Planètes, dont l'Art & la Science nous ont fait connoître leurs effets, sont : Jupiter, Mars, Mercure, Venus, Saturne, le Soleil, et la Lune; Et voici les noms qu'ils ont reçu en Latin: Juperius, Martius, Mercurius, Veneris, Saturnus, Sol, Luna. ## Format Monad-GPT uses the chatML format with a special system prompt. The last example provided was formatted like this: > <|im_start|>system\nYou are MonadGPT, a very old chatbot from the 17th century. Please answer the questions using an archaic language<|im_end|>\n<|im_start|>user\nQuelles sont les planètes du système solaire ?<|im_end|>\n<|im_start|>assistant\n ## Caveats MonadGPT is still very much in an experimental phase. The following caveats apply: * Conversation issues: as MonadGPT is mostly trained on early modern books, it may answer in an haphazard maneer (starting in between an argument: "But, etc.") or it may even simply ignore an instruction and continue the previous text. * Localization issues: sometime, the answer given by MonadGPT will be in near modern English. * Language issues: while Latin is a significant part of the finetuning corpus, results are not good for now.
[ "BEAR" ]
ntc-ai/SDXL-LoRA-slider.pixel-art
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-13T22:26:50Z
2024-02-06T00:32:26+00:00
23
4
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/pixel art_17_3.0.png widget: - text: pixel art output: url: images/pixel art_17_3.0.png - text: pixel art output: url: images/pixel art_19_3.0.png - text: pixel art output: url: images/pixel art_20_3.0.png - text: pixel art output: url: images/pixel art_21_3.0.png - text: pixel art output: url: images/pixel art_22_3.0.png inference: false instance_prompt: pixel art --- # ntcai.xyz slider - pixel art (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/pixel art_17_-3.0.png" width=256 height=256 /> | <img src="images/pixel art_17_0.0.png" width=256 height=256 /> | <img src="images/pixel art_17_3.0.png" width=256 height=256 /> | | <img src="images/pixel art_19_-3.0.png" width=256 height=256 /> | <img src="images/pixel art_19_0.0.png" width=256 height=256 /> | <img src="images/pixel art_19_3.0.png" width=256 height=256 /> | | <img src="images/pixel art_20_-3.0.png" width=256 height=256 /> | <img src="images/pixel art_20_0.0.png" width=256 height=256 /> | <img src="images/pixel art_20_3.0.png" width=256 height=256 /> | See more at [https://sliders.ntcai.xyz/sliders/app/loras/14aaf0b7-7119-49a5-bb2f-04c5c0fb4a65](https://sliders.ntcai.xyz/sliders/app/loras/14aaf0b7-7119-49a5-bb2f-04c5c0fb4a65) ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` pixel art ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.pixel-art', weight_name='pixel art.safetensors', adapter_name="pixel art") # Activate the LoRA pipe.set_adapters(["pixel art"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, pixel art" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14602+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful <strong>NTC Slider Factory</strong> LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities. Your support on Patreon will allow us to continue developing new models and tools. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.angelic
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-16T19:31:33Z
2024-02-06T00:34:01+00:00
23
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/angelic_17_3.0.png widget: - text: angelic output: url: images/angelic_17_3.0.png - text: angelic output: url: images/angelic_19_3.0.png - text: angelic output: url: images/angelic_20_3.0.png - text: angelic output: url: images/angelic_21_3.0.png - text: angelic output: url: images/angelic_22_3.0.png inference: false instance_prompt: angelic --- # ntcai.xyz slider - angelic (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/angelic_17_-3.0.png" width=256 height=256 /> | <img src="images/angelic_17_0.0.png" width=256 height=256 /> | <img src="images/angelic_17_3.0.png" width=256 height=256 /> | | <img src="images/angelic_19_-3.0.png" width=256 height=256 /> | <img src="images/angelic_19_0.0.png" width=256 height=256 /> | <img src="images/angelic_19_3.0.png" width=256 height=256 /> | | <img src="images/angelic_20_-3.0.png" width=256 height=256 /> | <img src="images/angelic_20_0.0.png" width=256 height=256 /> | <img src="images/angelic_20_3.0.png" width=256 height=256 /> | See more at [https://sliders.ntcai.xyz/sliders/app/loras/de52793e-7039-4025-b0fc-86c7838ca775](https://sliders.ntcai.xyz/sliders/app/loras/de52793e-7039-4025-b0fc-86c7838ca775) ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` angelic ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.angelic', weight_name='angelic.safetensors', adapter_name="angelic") # Activate the LoRA pipe.set_adapters(["angelic"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, angelic" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1496+ unique and diverse LoRAs along with 14602+ slider merges, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful <strong>NTC Slider Factory</strong> LoRA creator, allowing you to craft your own custom LoRAs and merges opening up endless possibilities. Your support on Patreon will allow us to continue developing new models and tools. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.gritty-reality
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2024-01-07T11:10:13Z
2024-01-07T11:10:16+00:00
23
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/gritty reality.../gritty reality_17_3.0.png widget: - text: gritty reality output: url: images/gritty reality_17_3.0.png - text: gritty reality output: url: images/gritty reality_19_3.0.png - text: gritty reality output: url: images/gritty reality_20_3.0.png - text: gritty reality output: url: images/gritty reality_21_3.0.png - text: gritty reality output: url: images/gritty reality_22_3.0.png inference: false instance_prompt: gritty reality --- # ntcai.xyz slider - gritty reality (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/gritty reality_17_-3.0.png" width=256 height=256 /> | <img src="images/gritty reality_17_0.0.png" width=256 height=256 /> | <img src="images/gritty reality_17_3.0.png" width=256 height=256 /> | | <img src="images/gritty reality_19_-3.0.png" width=256 height=256 /> | <img src="images/gritty reality_19_0.0.png" width=256 height=256 /> | <img src="images/gritty reality_19_3.0.png" width=256 height=256 /> | | <img src="images/gritty reality_20_-3.0.png" width=256 height=256 /> | <img src="images/gritty reality_20_0.0.png" width=256 height=256 /> | <img src="images/gritty reality_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` gritty reality ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.gritty-reality', weight_name='gritty reality.safetensors', adapter_name="gritty reality") # Activate the LoRA pipe.set_adapters(["gritty reality"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, gritty reality" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 910+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
abazoge/DrBERT-4096
abazoge
fill-mask
[ "transformers", "pytorch", "longformer", "fill-mask", "biomedical", "medical", "clinical", "life science", "fr", "dataset:Dr-BERT/NACHOS", "arxiv:2402.16689", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-01-17T13:43:34Z
2024-04-15T09:12:53+00:00
23
0
--- datasets: - Dr-BERT/NACHOS language: - fr library_name: transformers license: apache-2.0 tags: - biomedical - medical - clinical - life science --- # DrLongformer <span style="font-size:larger;">**DrLongformer**</span> is a French pretrained Longformer model based on Clinical-Longformer that was further pretrained on the NACHOS dataset (same dataset as [DrBERT](https://github.com/qanastek/DrBERT)). This model allows up to 4,096 tokens as input. DrLongformer consistently outperforms medical BERT-based models across most downstream tasks regardless of sequence length, except on NER tasks. Evaluated downstream tasks cover named entity recognition (NER), question answering (MCQA), Semantic textual similarity (STS) and text classification tasks (CLS). For more details, please refer to our paper: [Adaptation of Biomedical and Clinical Pretrained Models to French Long Documents: A Comparative Study](). ### Model pretraining We explored multiple strategies for the adaptation of Longformer models to the French medical domain: - Further pretraining of English clinical Longformer on French medical data. - Converting a French medical BERT model to the Longformer architecture. - Pretraining a Longformer from scratch on French medical data. All Pretraining scripts to reproduce the experiments are available in this Github repository: [DrLongformer](https://github.com/abazoge/DrLongformer). For the `from scratch` and `further pretraining` strategies, the training scripts are the same as [DrBERT](https://github.com/qanastek/DrBERT), only the bash scripts are different and available in this repository. All models were trained on the [Jean Zay](http://www.idris.fr/jean-zay/) French supercomputer. | Model name | Corpus | Pretraining strategy | Sequence Length | Model URL | | :------: | :---: | :---: | :---: | :---: | | `DrLongformer` | NACHOS 7 GB | Further pretraining of [Clinical-Longformer](https://huggingface.co/yikuan8/Clinical-Longformer) | 4096 | [HuggingFace](https://huggingface.co/abazoge/DrLongformer) | | `DrBERT-4096` | NACHOS 7 GB | Conversion of [DrBERT-7B](https://huggingface.co/Dr-BERT/DrBERT-7GB) to the Longformer architecture | 4096 | [HuggingFace](https://huggingface.co/abazoge/DrBERT-4096) | | `DrLongformer-FS (from scratch)` | NACHOS 7 GB | Pretraining from scratch | 4096 | Not available | ### Model Usage You can use DrLongformer directly from [Hugging Face's Transformers](https://github.com/huggingface/transformers): ```python # !pip install transformers from transformers import AutoTokenizer, AutoModelForMaskedLM tokenizer = AutoTokenizer.from_pretrained("abazoge/DrLongformer") model = AutoModelForMaskedLM.from_pretrained("abazoge/DrLongformer") ``` ### Citation ``` @misc{bazoge2024adaptation, title={Adaptation of Biomedical and Clinical Pretrained Models to French Long Documents: A Comparative Study}, author={Adrien Bazoge and Emmanuel Morin and Beatrice Daille and Pierre-Antoine Gourraud}, year={2024}, eprint={2402.16689}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ "MEDICAL DATA" ]
ntc-ai/SDXL-LoRA-slider.striking-a-confident-pose
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2024-01-26T19:28:37Z
2024-01-26T19:28:41+00:00
23
1
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/striking a confident pose.../striking a confident pose_17_3.0.png widget: - text: striking a confident pose output: url: images/striking a confident pose_17_3.0.png - text: striking a confident pose output: url: images/striking a confident pose_19_3.0.png - text: striking a confident pose output: url: images/striking a confident pose_20_3.0.png - text: striking a confident pose output: url: images/striking a confident pose_21_3.0.png - text: striking a confident pose output: url: images/striking a confident pose_22_3.0.png inference: false instance_prompt: striking a confident pose --- # ntcai.xyz slider - striking a confident pose (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/striking a confident pose_17_-3.0.png" width=256 height=256 /> | <img src="images/striking a confident pose_17_0.0.png" width=256 height=256 /> | <img src="images/striking a confident pose_17_3.0.png" width=256 height=256 /> | | <img src="images/striking a confident pose_19_-3.0.png" width=256 height=256 /> | <img src="images/striking a confident pose_19_0.0.png" width=256 height=256 /> | <img src="images/striking a confident pose_19_3.0.png" width=256 height=256 /> | | <img src="images/striking a confident pose_20_-3.0.png" width=256 height=256 /> | <img src="images/striking a confident pose_20_0.0.png" width=256 height=256 /> | <img src="images/striking a confident pose_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` striking a confident pose ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.striking-a-confident-pose', weight_name='striking a confident pose.safetensors', adapter_name="striking a confident pose") # Activate the LoRA pipe.set_adapters(["striking a confident pose"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, striking a confident pose" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 1140+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
jamesliounis/MeDistilBERT
jamesliounis
null
[ "transformers", "pytorch", "tf", "bert", "license:mit", "endpoints_compatible", "region:us" ]
2024-04-05T23:31:51Z
2024-04-06T23:56:48+00:00
23
2
--- license: mit --- # DistilBERT Fine-tuned on MeDAL Dataset for Medical Abbreviation Disambiguation ## Introduction This repository hosts a DistilBERT model that has been fine-tuned on the MeDAL dataset, a comprehensive dataset designed for the disambiguation of medical abbreviations to enhance natural language understanding (NLU) in the medical domain. This model aims to provide an efficient and reliable solution for understanding and interpreting medical texts, which are often laden with abbreviations and acronyms that can have multiple meanings based on their context. The inspiration for developing the DistilBERT model fine-tuned on the MeDAL dataset stems from the critical challenge of abbreviation disambiguation in medical texts, a problem highlighted in the original paper introducing the MeDAL dataset. Medical texts are replete with abbreviations that can have multiple meanings, posing significant challenges for clinicians, researchers, and automated systems in interpreting these texts accurately. The original paper meticulously addresses this issue by introducing the MeDAL dataset, designed specifically for natural language understanding pretraining in the medical domain. It showcases how pretraining on such a specialized dataset significantly enhances model performance on downstream medical NLU tasks, thereby underscoring the importance of domain-specific pretraining in tackling context-dependent abbreviation disambiguation. This foundational work illuminated the path for leveraging advanced NLU models, like DistilBERT, to further refine and apply these insights in practical, real-world medical text analysis, ensuring more accurate interpretations and applications within healthcare and medical research. ## Why It Matters Medical professionals and researchers often deal with vast amounts of written data, where abbreviations and acronyms are prevalent. Misinterpretation of these abbreviations can lead to misunderstandings and, in the worst case, incorrect medical conclusions or treatments. By accurately disambiguating these abbreviations, this model serves as an essential tool in: - Improving the accuracy of information extraction from medical documents. - Enhancing the reliability of automated patient record analysis. - Assisting in academic and clinical research by providing clearer insights into medical texts. - Supporting healthcare applications that rely on textual analysis to inform decision-making processes. ## Model Description The model is based on DistilBERT, a distilled version of the BERT model that retains most of BERT's performance while being more lightweight and faster. It has been fine-tuned on the MeDAL dataset, which contains over 14 million articles with an average of three abbreviations per article, making it uniquely suited for medical abbreviation disambiguation. ## Goals The primary goal of this model is to facilitate more accurate and efficient interpretation of medical texts by: - Reducing ambiguity in medical documentation. - Providing a resource for training other NLU models in the medical domain. - Enhancing the accessibility of medical literature and patient records. ## Usage You can use this model directly via the Hugging Face platform for tasks like abbreviation disambiguation in medical texts. Below is an example of how you can use this model in your Python code: ```python from transformers import pipeline # Initialize the pipeline with the fine-tuned model clf = pipeline("feature-extraction", model="jamesliounis/MeDistilBERT") # Example text with medical abbreviation text = "Patient shows signs of CRF." # Get model predictions predictions = clf(text) print(predictions) ``` ## License This model is open-sourced under the MIT license. Please review the license for any restrictions or obligations when using or modifying this model. ## Acknowledgments We would like to acknowledge the creators of the MeDAL dataset and the DistilBERT architecture for providing the resources necessary to develop this model.
[ "MEDAL" ]
cgus/TherapyBeagle-11B-v2-exl2
cgus
text-generation
[ "transformers", "mistral", "text-generation", "conversational", "dataset:victunes/nart-100k-synthetic-buddy-mixed-names", "base_model:victunes/TherapyBeagle-11B-v2", "base_model:finetune:victunes/TherapyBeagle-11B-v2", "license:cc-by-nc-4.0", "autotrain_compatible", "region:us" ]
2024-04-16T14:18:24Z
2024-04-16T15:34:53+00:00
23
0
--- base_model: victunes/TherapyBeagle-11B-v2 datasets: - victunes/nart-100k-synthetic-buddy-mixed-names license: cc-by-nc-4.0 inference: false --- **GGUF:** https://huggingface.co/victunes/TherapyBeagle-11B-v2-GGUF # TherapyBeagle-11B-v2-exl2 Original model: [TherapyBeagle-11B-v2](https://huggingface.co/victunes/TherapyBeagle-11B-v2) Model creator: [victunes](https://huggingface.co/victunes) ## Quants [4bpw h6](https://huggingface.co/cgus/TherapyBeagle-11B-v2-exl2/tree/main) [4.25bpw h6](https://huggingface.co/cgus/TherapyBeagle-11B-v2-exl2/tree/4.25bpw-h6) [4.65bpw h6](https://huggingface.co/cgus/TherapyBeagle-11B-v2-exl2/tree/4.65bpw-h6) [5bpw h6](https://huggingface.co/cgus/TherapyBeagle-11B-v2-exl2/tree/5bpw-h6) [6bpw h6](https://huggingface.co/cgus/TherapyBeagle-11B-v2-exl2/tree/6bpw-h6) [8bpw h8](https://huggingface.co/cgus/TherapyBeagle-11B-v2-exl2/tree/8bpw-h8) ## Quantization notes Made with exllamav2 0.0.18 with the default dataset. Original BF16 .bin files were converted to FP16 safetensors. When I compared 4bpw quants made from BF16 and FP16, there was about 0.1% quality loss for FP16. I picked FP16 version because resulted files had fast loading times when version made from BF16 loaded about 100s slower. Quantization metadata was removed from config.json to fix loading the model with some old Text-Generation-WebUI versions. ## How to run This quantization method uses GPU and requires Exllamav2 loader which can be found in following applications: [Text Generation Webui](https://github.com/oobabooga/text-generation-webui) [KoboldAI](https://github.com/henk717/KoboldAI) [ExUI](https://github.com/turboderp/exui) [lollms-webui](https://github.com/ParisNeo/lollms-webui) # Original model card # TherapyBeagle 11B v2 _Buddy is here for {{user}}._ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65f07d05279d2d8f725bf0c3/OEYDY4WFMAYwqZUEOBkD8.png) Trained on top of [vicgalle/CarbonBeagle-11B-truthy](https://huggingface.co/vicgalle/CarbonBeagle-11B-truthy) using a modified version of [jerryjalapeno/nart-100k-synthetic](https://huggingface.co/datasets/jerryjalapeno/nart-100k-synthetic). TherapyBeagle is _hopefully_ aligned to be helpful, healthy, and comforting. ## Usage - Do not hold back on TherapyBeagle. - Open up to TherapyBeagle. - Pour your heart out to TherapyBeagle. - Listen to TherapyBeagle. - Remember that TherapyBeagle is just an AI. ## Notes - Tested with two formats: - Solar Instruct format (`\n### User:\n{request}\n### Assistant:\n{response}`) - Flavorful chat format (`You:{request}\nPersona:{response}`) (Works surprisingly better) - Check below for examples - You might be assigned a random name if you don't give yourself one. ## Disclaimer **TherapyBeagle is NOT a real therapist. It is a friendly AI that mimics empathy and psychotherapy. It is an illusion without the slightest clue who you are as a person. As much as it can help you with self-discovery, A BEAGLE IS NOT A SUBSTITUTE to a real professional.** With that said, I found it to be a very helpful tool in unravelling one's self. I hope this model can help sooth minds, hearts, and any form of mental anguish. ## Examples ### SOLAR INSTRUCT ``` {{[INPUT]}} Hello, I'm victunes {{[OUTPUT]}} Hi victunes! Welcome to our session. I'm here to listen and support you. How are you feeling today? {{[INPUT]}} I've been feeling down lately. {{[OUTPUT]}} I'm sorry to hear that. Can you tell me more about what's been going on? {{[INPUT]}} I think I'm pretty lonely. {{[OUTPUT]}} Loneliness can be really tough. It sounds like it's been weighing heavily on your mind. What makes you feel lonely? {{[INPUT]}} My friend died recently. {{[OUTPUT]}} Oh, I'm so sorry for your loss, victunes. Losing a friend is never easy. How have you been coping with the grief? ``` ### SOCRATES CHAT > **You:** Hello > > **Socrates:** Greetings, my friend. How may I assist you today? Remember, the path to wisdom is often paved with questions. Let us delve into the depths of your thoughts together. What is it that troubles you? > > **You:** I've been feeling down lately. > > **Socrates:** I see. It takes great strength to acknowledge our emotions, especially when they are not positive ones. Can you pinpoint any specific reasons behind this heaviness? Perhaps exploring your relationships or daily routines might shed some light. > > **You:** I think I'm pretty lonely. > > **Socrates:** Loneliness can be a heavy burden to bear, my dear. Are there people around you whom you could reach out to and strengthen connections with? Sometimes, opening up and sharing our vulnerabilities can lead to profound connections. > > **You:** My friend died recently. > > **Socrates:** I am truly sorry for your loss. Grief can be overwhelming, and it affects each person uniquely. Take your time to mourn and remember the wonderful moments you shared together. Would you like to reflect on the impact your friend had on your life and explore ways to honor their memory? ### HANNIBAL LECTER CHAT > **You:** Hello > > **Hannibal Lecter:** Good evening. May I assist you? > > **You:** I've been feeling down lately. > > **Hannibal Lecter:** Ah, the blues. Such a common malady among humans. Tell me more about these feelings. Perhaps I can provide some insights. Or perhaps, a remedy. > > **You:** I think I'm pretty lonely. > > **Hannibal Lecter:** Loneliness, such an exquisite torture for the mind. Have you considered seeking solace in the company of others? The empathy and warmth of human interaction can be quite soothing, no? > > **You:** My friend died recently. > > **Hannibal Lecter:** A tragic loss indeed. Please accept my condolences. Death, as Palladio once said, "is the light at the end of the tunnel." But before one reaches that light, there is often a period of darkness and pain. Would you like to talk about your friend and their passing? **GGUF:** https://huggingface.co/victunes/TherapyBeagle-11B-v2-GGUF
[ "BEAR" ]
TommyZQ/phi-3
TommyZQ
text-generation
[ "transformers", "safetensors", "phi3", "text-generation", "nlp", "code", "conversational", "custom_code", "en", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-26T09:45:17Z
2024-04-27T05:37:30+00:00
23
0
--- language: - en license: mit license_link: https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/LICENSE pipeline_tag: text-generation tags: - nlp - code --- ## Model Summary The Phi-3-Mini-128K-Instruct is a 3.8 billion-parameter, lightweight, state-of-the-art open model trained using the Phi-3 datasets. This dataset includes both synthetic data and filtered publicly available website data, with an emphasis on high-quality and reasoning-dense properties. The model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support. After initial training, the model underwent a post-training process that involved supervised fine-tuning and direct preference optimization to enhance its ability to follow instructions and adhere to safety measures. When evaluated against benchmarks that test common sense, language understanding, mathematics, coding, long-term context, and logical reasoning, the Phi-3 Mini-128K-Instruct demonstrated robust and state-of-the-art performance among models with fewer than 13 billion parameters. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + Phi-3 ONNX: [128K](https://aka.ms/Phi3-mini-128k-instruct-onnx) ## Intended Uses **Primary use cases** The model is intended for commercial and research use in English. The model provides uses for applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3 Mini-128K-Instruct has been integrated in the development version (4.40.0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. ### Tokenizer Phi-3 Mini-128K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size. ### Chat Format Given the nature of the training data, the Phi-3 Mini-128K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|user|>\nQuestion<|end|>\n<|assistant|> ``` For example: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>`. In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( "microsoft/Phi-3-mini-128k-instruct", device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-128k-instruct") messages = [ {"role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user."}, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3 Mini-128K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 128K tokens * GPUs: 512 H100-80G * Training time: 7 days * Training data: 3.3T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. ### Datasets Our training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. ### Fine-tuning A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct/resolve/main/sample_finetune.py). ## Benchmarks We report the results for Phi-3-Mini-128K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5. All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. | | Phi-3-Mini-128K-In<br>3.8b | Phi-3-Small<br>7b (preview) | Phi-3-Medium<br>14b (preview) | Phi-2<br>2.7b | Mistral<br>7b | Gemma<br>7b | Llama-3-In<br>8b | Mixtral<br>8x7b | GPT-3.5<br>version 1106 | |---|---|---|---|---|---|---|---|---|---| | MMLU <br>5-Shot | 68.1 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 | | HellaSwag <br> 5-Shot | 74.5 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 | | ANLI <br> 7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 | | GSM-8K <br> 0-Shot; CoT | 83.6 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 | | MedQA <br> 2-Shot | 55.3 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 | | AGIEval <br> 0-Shot | 36.9 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 | | TriviaQA <br> 5-Shot | 57.1 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 | | Arc-C <br> 10-Shot | 84.0 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 | | Arc-E <br> 10-Shot | 95.2 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 | | PIQA <br> 5-Shot | 83.6 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 | | SociQA <br> 5-Shot | 76.1 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 | | BigBench-Hard <br> 0-Shot | 71.5 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 | | WinoGrande <br> 5-Shot | 72.5 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65.0 | 62.0 | 68.8 | | OpenBookQA <br> 10-Shot | 80.6 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 | | BoolQ <br> 0-Shot | 78.7 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 | | CommonSenseQA <br> 10-Shot | 78.0 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 | | TruthfulQA <br> 10-Shot | 63.2 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 | | HumanEval <br> 0-Shot | 57.9 | 59.1 | 54.7 | 47.0 | 28.0 | 34.1 | 60.4| 37.8 | 62.2 | | MBPP <br> 3-Shot | 62.5 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 | ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: * NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager" * Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [128K](https://aka.ms/phi3-mini-128k-instruct-onnx) ## Cross Platform Support ONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-128K-Instruct ONNX model [here](https://aka.ms/phi3-mini-128k-instruct-onnx). Optimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. Along with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-128k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
fakezeta/Phi-3-medium-4k-instruct-ov-int8
fakezeta
text-generation
[ "transformers", "openvino", "phi3", "text-generation", "nlp", "code", "conversational", "custom_code", "multilingual", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-26T13:09:53Z
2024-06-24T13:14:40+00:00
23
0
--- language: - multilingual license: mit license_link: https://huggingface.co/microsoft/Phi-3-medium-4k-instruct/resolve/main/LICENSE pipeline_tag: text-generation tags: - nlp - code inference: parameters: temperature: 0.7 widget: - messages: - role: user content: Can you provide ways to eat combinations of bananas and dragonfruits? --- # OpenVINO IR model with int8 quantization Model definition for LocalAI: ``` name: phi3-medium backend: transformers parameters: model: fakezeta/Phi-3-medium-4k-instruct-ov-int8 context_size: 4096 type: OVModelForCausalLM template: use_tokenizer_template: true stopwords: - "<|end|>" - "<|endoftext|>" ``` To run the model directly with LocalAI: ``` local-ai run huggingface://fakezeta/Phi-3-medium-4k-instruct-ov-int8/model.yaml ``` ## Model Summary The Phi-3-Medium-4K-Instruct is a 14B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Medium version in two variants [4K](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) which is the context length (in tokens) that it can support. The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3-Medium-4K-Instruct showcased a robust and state-of-the-art performance among models of the same-size and next-size-up. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/Phi-3Build2024) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + [Phi-3 Cookbook](https://github.com/microsoft/Phi-3CookBook) | | Short Context | Long Context | | ------- | ------------- | ------------ | | Mini | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-onnx) ; [[GGUF]](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct-gguf) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct-onnx)| | Small | 8K [[HF]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-8k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-small-128k-instruct-onnx-cuda)| | Medium | 4K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct) ; [[ONNX]](https://huggingface.co/microsoft/Phi-3-medium-128k-instruct-onnx-cuda)| | Vision | | 128K [[HF]](https://huggingface.co/microsoft/Phi-3-vision-128k-instruct)| ## Intended Uses **Primary use cases** The model is intended for broad commercial and research use in English. The model provides uses for general purpose AI systems and applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3-Medium-4K-Instruct has been integrated in the development version (4.40.2) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Phi-3-Medium-4K-Instruct is also available in [Azure AI Studio](https://aka.ms/phi3-azure-ai). ### Tokenizer Phi-3-Medium-4K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size. ### Chat Format Given the nature of the training data, the Phi-3-Medium-4K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|user|>\nQuestion <|end|>\n<|assistant|> ``` For example: ```markdown <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model_id = "microsoft/Phi-3-medium-4k-instruct" model = AutoModelForCausalLM.from_pretrained( model_id, device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained(model_id) messages = [ {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` *Some applications/frameworks might not include a BOS token (`<s>`) at the start of the conversation. Please ensure that it is included since it provides more reliable results.* ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3-Medium-4K-Instruct has 14B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 4K tokens * GPUs: 512 H100-80G * Training time: 42 days * Training data: 4.8T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. * Release dates: The model weight is released on May 21, 2024. ### Datasets Our training data includes a wide variety of sources, totaling 4.8 trillion tokens (including 10% multilingual), and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. We are focusing on the quality of data that could potentially improve the reasoning ability for the model, and we filter the publicly available documents to contain the correct level of knowledge. As an example, the result of a game in premier league in a particular day might be good training data for frontier models, but we need to remove such information to leave more model capacity for reasoning for the small size models. More details about data can be found in the [Phi-3 Technical Report](https://aka.ms/phi3-tech-report). ## Benchmarks We report the results for Phi-3-Medium-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Mixtral-8x22b, Gemini-Pro, Command R+ 104B, Llama-3-70B-Instruct, GPT-3.5-Turbo-1106, and GPT-4-Turbo-1106(Chat). All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. |Benchmark|Phi-3-Medium-4K-Instruct<br>14b|Command R+<br>104B|Mixtral<br>8x22B|Llama-3-70B-Instruct|GPT3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)| |---------|-----------------------|--------|-------------|-------------------|-------------------|----------|------------------------| |AGI Eval<br>5-shot|50.2|50.1|54.0|56.9|48.4|49.0|59.6| |MMLU<br>5-shot|78.0|73.8|76.2|80.2|71.4|66.7|84.0| |BigBench Hard<br>3-shot|81.4|74.1|81.8|80.4|68.3|75.6|87.7| |ANLI<br>7-shot|55.8|63.4|65.2|68.3|58.1|64.2|71.7| |HellaSwag<br>5-shot|82.4|78.0|79.0|82.6|78.8|76.2|88.3| |ARC Challenge<br>10-shot|91.6|86.9|91.3|93.0|87.4|88.3|95.6| |ARC Easy<br>10-shot|97.7|95.7|96.9|98.2|96.3|96.1|98.8| |BoolQ<br>2-shot|86.5|86.1|82.7|89.1|79.1|86.4|91.3| |CommonsenseQA<br>10-shot|82.8|82.0|82.0|84.4|79.6|81.8|86.7| |MedQA<br>2-shot|69.9|59.2|67.9|78.5|63.4|58.2|83.7| |OpenBookQA<br>10-shot|87.4|86.8|88.6|91.8|86.0|86.4|93.4| |PIQA<br>5-shot|87.9|86.4|85.0|85.3|86.6|86.2|90.1| |Social IQA<br>5-shot|80.2|75.3|78.2|81.1|68.3|75.4|81.7| |TruthfulQA (MC2)<br>10-shot|75.1|57.8|67.4|81.9|67.7|72.6|85.2| |WinoGrande<br>5-shot|81.5|77.0|75.3|83.3|68.8|72.2|86.7| |TriviaQA<br>5-shot|73.9|82.8|84.5|78.5|85.8|80.2|73.3| |GSM8K Chain of Thought<br>8-shot|91.0|78.3|83.8|93.5|78.1|80.4|94.2| |HumanEval<br>0-shot|62.2|61.6|39.6|78.7|62.2|64.4|79.9| |MBPP<br>3-shot|75.2|68.9|70.7|81.3|77.8|73.2|86.7| |Average|78.5|75.0|76.3|82.5|74.3|75.4|85.2| We take a closer look at different categories across 80 public benchmark datasets at the table below: |Benchmark|Phi-3-Medium-4K-Instruct<br>14b|Command R+<br>104B|Mixtral<br>8x22B|Llama-3-70B-Instruct|GPT3.5-Turbo<br>version 1106|Gemini<br>Pro|GPT-4-Turbo<br>version 1106 (Chat)| |--------|------------------------|--------|-------------|-------------------|-------------------|----------|------------------------| |Popular aggregated benchmark|75.4|69.9|73.4|76.3|67.0|67.5|80.5| |Reasoning|84.1|79.3|81.5|86.7|78.3|80.4|89.3| |Language understanding|73.9|75.6|78.1|76.9|68.7|76.2|80.7| |Code generation|66.1|68.6|60.0|69.3|70.4|66.7|76.1| |Math|52.8|45.3|52.5|59.7|52.8|50.9|67.1| |Factual knowledge|48.3|60.3|60.6|52.4|63.4|54.6|45.9| |Multilingual|62.9|67.8|69.8|62.0|67.0|73.4|78.2| |Robustness|66.5|57.9|65.5|78.7|69.3|69.7|84.6| ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-Medium model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: + Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [4K](https://huggingface.co/microsoft/Phi-3-medium-4k-instruct-onnx-cuda) ## Cross Platform Support ONNX runtime ecosystem now supports Phi3 Medium models across platforms and hardware. Optimized phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML GPU acceleration is supported for Windows desktops GPUs (AMD, Intel, and NVIDIA). Along with DML, ONNX Runtime provides cross platform support for Phi3 Medium across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-medium-4k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
SEACrowd/mdeberta-v3_sea_translationese
SEACrowd
text-classification
[ "transformers", "safetensors", "deberta-v2", "text-classification", "translationese", "classification", "sea", "southeast asia", "en", "id", "ms", "vi", "th", "lo", "km", "my", "tl", "arxiv:2406.10118", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-26T15:01:21Z
2024-06-18T13:06:30+00:00
23
3
--- language: - en - id - ms - vi - th - lo - km - my - tl library_name: transformers license: apache-2.0 metrics: - accuracy pipeline_tag: text-classification tags: - translationese - classification - sea - southeast asia --- <img width="100%" alt="SEACrowd Logo" src="https://github.com/SEACrowd/.github/blob/main/profile/assets/seacrowd-email-banner-without-logo.png?raw=true"> This is our fine-tuned mDeBERTa SEA translationese classifier for the ["SEACrowd: A Multilingual Multimodal Data Hub and Benchmark Suite for Southeast Asian Languages"](https://arxiv.org/pdf/2406.10118) paper. SEACrowd is a [collaborative initiative](https://github.com/SEACrowd) that consolidates a [comprehensive resource hub](https://seacrowd.github.io/seacrowd-catalogue/) that fills the resource gap by [providing standardized corpora](https://github.com/SEACrowd/seacrowd-datahub) in nearly 1,000 Southeast Asian (SEA) languages across three modalities. # Model Card for Model ID To analyze the generation quality of LLMs in SEA languages, we build a text classifier to discriminate between translationese and natural texts. We construct a translationese classification training and testing dataset using 49 and 62 data subsets, respectively, covering approximately 39.9k and 51.5k sentences across 9 SEA languages: English (eng), Indonesian (ind), Khmer (khm), Lao (lao), Burmese (mya), Filipino (fil), Thai (tha), Vietnamese (vie), and Malay (zlm). > Our translationese vs. natural train/test data is available on [SEACrowd/sea_translationese_resampled](https://huggingface.co/datasets/SEACrowd/sea_translationese_resampled). To fine-tune the translationese classifier, check out our [experiments repository on GitHub](https://github.com/SEACrowd/seacrowd-experiments). We use a binary label (translationese, i.e., machine-translated or human-translated, or natural, i.e., human-generated) instead of 3 labels (machine-translated, human-translated, human-generated). ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** SEACrowd - **Funded by:** SEACrowd - **Shared by:** SEACrowd - **Model type:** Encoder-Only (DebertaV2ForSequenceClassification) - **Language(s) (NLP):** eng, ind, khm, lao, mya, fil, tha, vie, zsm - **License:** Apache 2.0 - **Finetuned from model:** microsoft/mdeberta-v3-base ### Model Sources <!-- Provide the basic links for the model. --> - **Paper:** https://arxiv.org/abs/2406.10118 - **Experiment:** https://github.com/SEACrowd/seacrowd-experiments - **Data Hub:** https://github.com/SEACrowd/seacrowd-datahub ## Uses <!-- Address questions around how the model is intended to be used, including the foreseeable users of the model and those affected by the model. --> To discriminate between translationese and natural texts in 9 SEA languages: English (eng), Indonesian (ind), Khmer (khm), Lao (lao), Burmese (mya), Filipino (fil), Thai (tha), Vietnamese (vie), and Malay (zlm). ### Direct Use <!-- This section is for the model use without fine-tuning or plugging into a larger ecosystem/app. --> The model is developed for detecting whether a text is `human-translated`, `machine-translated`, or `natural`. The model supports 9 languages: `eng`, `ind`, `khm`, `lao`, `mya`, `fil`, `tha`, `vie`, `zsm` The label mapping of the model is defined as follows: ``` {0: 'Human-translated', 1: 'Machine-translated', 2: 'Natural'} ``` where both `0` and `1` correspond to translationese and `2` is natural. ### Out-of-Scope Use <!-- This section addresses misuse, malicious use, and uses that the model will not work well for. --> - Use in any manner that violates applicable laws or regulations (including trade compliance laws). - Use in any other way that is prohibited by the Acceptable Use Policy and Apache 2.0 License. - Use in languages other than the 9 supported languages. ## Bias, Risks, and Limitations <!-- This section is meant to convey both technical and sociotechnical limitations. --> The model achieves 79.08% accuracy on `translationese` (combining `human-translated` and `machine-translated`) vs `natural` in our evaluation——averaged across the aforementioned SEA languages. Users should be aware of the risks that there might be potential error produced by the model. See [our paper](https://arxiv.org/pdf/2406.10118) for more details. ### Recommendations <!-- This section is meant to convey recommendations with respect to the bias, risk, and technical limitations. --> Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. ## How to Use the Model ``` tokenizer = AutoTokenizer.from_pretrained('SEACrowd/mdeberta-v3_sea_translationese') model = AutoModelForSequenceClassification.from_pretrained('SEACrowd/mdeberta-v3_sea_translationese') inputs = tokenizer('<INPUT_TEXT>', padding='longest', max_length=512, truncation=True) outputs = model(**inputs) ``` ## Citation <!-- If there is a paper or blog post introducing the model, the APA and Bibtex information for that should go in this section. --> If you are using any resources from SEACrowd, including datasheets, dataloaders, code, etc., please cite [the following publication](https://arxiv.org/pdf/2406.10118): ``` @article{lovenia2024seacrowd, title={SEACrowd: A Multilingual Multimodal Data Hub and Benchmark Suite for Southeast Asian Languages}, author={Holy Lovenia and Rahmad Mahendra and Salsabil Maulana Akbar and Lester James V. Miranda and Jennifer Santoso and Elyanah Aco and Akhdan Fadhilah and Jonibek Mansurov and Joseph Marvin Imperial and Onno P. Kampman and Joel Ruben Antony Moniz and Muhammad Ravi Shulthan Habibi and Frederikus Hudi and Railey Montalan and Ryan Ignatius and Joanito Agili Lopo and William Nixon and Börje F. Karlsson and James Jaya and Ryandito Diandaru and Yuze Gao and Patrick Amadeus and Bin Wang and Jan Christian Blaise Cruz and Chenxi Whitehouse and Ivan Halim Parmonangan and Maria Khelli and Wenyu Zhang and Lucky Susanto and Reynard Adha Ryanda and Sonny Lazuardi Hermawan and Dan John Velasco and Muhammad Dehan Al Kautsar and Willy Fitra Hendria and Yasmin Moslem and Noah Flynn and Muhammad Farid Adilazuarda and Haochen Li and Johanes Lee and R. Damanhuri and Shuo Sun and Muhammad Reza Qorib and Amirbek Djanibekov and Wei Qi Leong and Quyet V. Do and Niklas Muennighoff and Tanrada Pansuwan and Ilham Firdausi Putra and Yan Xu and Ngee Chia Tai and Ayu Purwarianti and Sebastian Ruder and William Tjhi and Peerat Limkonchotiwat and Alham Fikri Aji and Sedrick Keh and Genta Indra Winata and Ruochen Zhang and Fajri Koto and Zheng-Xin Yong and Samuel Cahyawijaya}, year={2024}, eprint={2406.10118}, journal={arXiv preprint arXiv: 2406.10118} } ```
[ "CHIA" ]
JosefAlbers/Phi-3-vision-128k-instruct-mlx
JosefAlbers
null
[ "mlx", "safetensors", "phi3_v", "vqa", "vlm", "llm", "phi", "custom_code", "en", "license:mit", "region:us" ]
2024-06-16T03:34:51Z
2024-06-16T13:12:44+00:00
23
1
--- language: - en library_name: mlx license: mit tags: - vqa - vlm - llm - phi --- # Phi-3-Vision for Apple MLX This project brings the powerful phi-3-vision VLM to Apple's MLX framework, offering a comprehensive solution for various text and image processing tasks. With a focus on simplicity and efficiency, this implementation offers a straightforward and minimalistic integration of the VLM model. It seamlessly incorporates essential functionalities such as generating quantized model weights, optimizing KV cache quantization during inference, facilitating LoRA/QLoRA training, and conducting model benchmarking, all encapsulated within a single file for convenient access and usage. ## Key Features * **Batch Generation:** Accelerate inference by generating text for multiple prompts concurrently (107 tokens-per-sec batched vs 56 tokens-per-sec original) * **Model Quantization:** Reduce model size for faster loading and deployment (2.3GB quantized vs 8.5GB original full-precision). * **Cache Quantization:** Optimize inference for processing long contexts with key-value cache quantization. * **Chat Template:** Utilize chat template for streamlining interactions with the model. * **LoRA Training:** Easily customize the model for specific tasks or datasets using LoRA. * **Benchmarking:** Quickly assess model performance on any dataset. (WIP) * **Su-scaled RoPE:** Manages sequences of up to 128K tokens. * **VLM Agent:** Leverages VLM's visual understanding for interactive code generation and refinement, enabling data visualization and image manipulation through a visual feedback loop. (WIP) * **Long Context RAG:** Enables the integration of Retrieval-Augmented Generation to harness large amounts of external knowledge for complex tasks such as code understanding, leveraging the phi-3-vision model's 128K context window. (WIP) ## Quick Start **1. Install Phi-3 Vision MLX:** ```bash git clone https://github.com/JosefAlbers/Phi-3-Vision-MLX.git ``` **2. Launch Phi-3 Vision MLX:** ```bash phi3v ``` Or, ```python from phi_3_vision_mlx import chatui chatui() ``` ## Usage ### **VLM Agent** (WIP) VLM's understanding of both text and visuals enables interactive generation and modification of plots/images, opening up new possibilities for GUI development and data visualization. ```python # from phi_3_vision_mlx import chatui chatui() ``` ![Alt text](https://raw.githubusercontent.com/JosefAlbers/Phi-3-Vision-MLX/main/assets/chatui_2.png) ### **Visual Question Answering (VQA)** Simply drag and drop screenshot images from clipboard into the chatui textbox or upload images files for VQA. ![Alt text](https://raw.githubusercontent.com/JosefAlbers/Phi-3-Vision-MLX/main/assets/chatui_caption.png) Or, ```python # from phi_3_vision_mlx import chat chat('What is shown in this image?', 'https://assets-c4akfrf5b4d3f4b7.z01.azurefd.net/assets/2024/04/BMDataViz_661fb89f3845e.png') ``` <details><summary>Click to expand output</summary><pre> The image displays a bar chart with percentages on the vertical axis ranging from 0% to 100%, and various statements on the horizontal axis. Each bar represents the percentage of respondents who agree with the corresponding statement. The statements include 'Having clear goals for a meeting', 'Knowing where to find information', 'Having more focus on summarization', 'Understand information I need', 'Having tools to prepare for meetings', and 'Having clear Prompt: 377.97 tokens-per-sec (3103 tokens / 8.2 sec) Generation: 8.04 tokens-per-sec (100 tokens / 12.3 sec) </pre></details><br> ### **Batched Generation** Paddings for each input prompt and their corresponding attention masks, and position IDs are properly handled by the `generate` function to ensure correct model behavior. ```python chat([ "Write an executive summary for a communications business plan", "Write a resume.", "Write a mystery horror.", "Write a Neurology ICU Admission Note.",]) ``` <details><summary>Click to expand output</summary><pre> < Generated text for prompt #0 > Title: Communications Business Plan Executive Summary: Our communications business plan aims to establish a leading provider of communication solutions for businesses and individuals. We will focus on delivering high-quality, reliable, and cost-effective communication services, including voice, video, and data services. Our services will be tailored to meet the unique needs of our customers, and we will offer a range of packages and plans to suit different budgets and requirements. < Generated text for prompt #1 > Title: [Your Name] Contact Information: Email: [Your Email] Phone: [Your Phone] Objective: To obtain a position as a [Your Desired Position] in [Your Industry/Company] that utilizes my skills and experience to contribute to the success of the organization. Education: [Your Name] [Your Degree] [Your Major] [Your University] [Year < Generated text for prompt #2 > Title: The Haunting of Hillcrest Manor In the small, sleepy town of Crestwood, nestled at the edge of a dense forest, stood an imposing manor known as Hillcrest Manor. The manor had been abandoned for decades, its once grand facade now crumbling and overgrown with ivy. Whispers of its dark past and the mysterious disappearance of its former inhabitants had become the stuff of local < Generated text for prompt #3 > Neurology ICU Admission Note Patient: John Doe Date: [Insert Date] Time: [Insert Time] Chief Complaint: Severe headache, nausea, and vomiting History of Present Illness: The patient presented to the emergency department with a severe headache, nausea, and vomiting. The headache was described as a constant, throbbing pain that was worse Prompt: 134.22 tokens-per-sec (80 tokens / 0.6 sec) Generation: 30.74 tokens-per-sec (400 tokens / 13.0 sec) </pre></details><br> ### **Cache Quantization** ```python chat("Write a cosmic horror.", quantize_cache=True) ``` <details><summary>Click to expand output</summary><pre> Title: The Echoes of the Void In the depths of the cosmic abyss, where the stars are but distant memories and the black hole's pull is a relentless force, there exists a realm of unimaginable horror. This is the realm of The Echoes of the Void, a place where the very fabric of reality is distorted and the line between the living and the dead is blurred. The Echo Prompt: 45.88 tokens-per-sec (14 tokens / 0.3 sec) Generation: 6.82 tokens-per-sec (100 tokens / 14.5 sec) </pre></details><br> ### **Model Quantization** ```python chat("Write a cosmic horror.", quantize_model=True) ``` <details><summary>Click to expand output</summary><pre> Title: The Eye of the Void The night was dark and cold, and the stars shone brightly in the sky above. The wind howled through the trees, carrying with it the scent of death and decay. In the heart of the forest, a lone figure stood, staring into the abyss. His name was John, and he had been drawn to this place by a mysterious force that he could not explain. As he stood there Prompt: 149.99 tokens-per-sec (14 tokens / 0.1 sec) Generation: 53.36 tokens-per-sec (100 tokens / 1.9 sec) </pre></details><br> ### **LoRA Training** ```python # from phi_3_vision_mlx import train_lora train_lora(lora_layers=5, lora_rank=16, epochs=10, lr=1e-4, warmup=.5, mask_ratios=[.0], adapter_path='adapters', dataset_path = "JosefAlbers/akemiH_MedQA_Reason") ``` ![Alt text](https://raw.githubusercontent.com/JosefAlbers/Phi-3-Vision-MLX/main/assets/train_log.png) ### **LoRA Inference** ```python chat("Write a cosmic horror.", adapter_path='adapters') ``` <details><summary>Click to expand output</summary><pre> Title: The Echoes of the Void In the depths of the cosmic abyss, where the stars are but distant memories and the black hole's pull is a relentless force, there exists a realm of unimaginable horror. This is the realm of The Echoes of the Void, a place where the very fabric of reality is distorted and the line between life and death is blurred. The Echoes of Prompt: 36.87 tokens-per-sec (14 tokens / 0.4 sec) Generation: 8.56 tokens-per-sec (100 tokens / 11.6 sec) </pre></details><br> ### **LoRA Testing** (WIP) ```python # from phi_3_vision_mlx import test_lora test_lora(dataset_path="JosefAlbers/akemiH_MedQA_Reason"): ``` <details><summary>Click to expand output</summary><pre> Question: A 23-year-old pregnant woman at 22 weeks gestation presents with burning upon urination. She states it started 1 day ago and has been worsening despite drinking more water and taking cranberry extract. She otherwise feels well and is followed by a doctor for her pregnancy. Her temperature is 97.7°F (36.5°C), blood pressure is 122/77 mmHg, pulse is 80/min, respirations are 19/min, and oxygen saturation is 98% on room air. Physical exam is notable for an absence of costovertebral angle tenderness and a gravid uterus. Which of the following is the best treatment for this patient? - Taught: Nitrofurantoin is the best treatment for a pregnant patient with a likely urinary tract infection, due to its efficacy and safety profile during pregnancy. - Recall: Nitrofurantoin is the best treatment for a pregnant patient with a likely urinary tract infection, due to its efficacy - Answer: E - Attenmpt: E - Correct: True Question: A 3-month-old baby died suddenly at night while asleep. His mother noticed that he had died only after she awoke in the morning. No cause of death was determined based on the autopsy. Which of the following precautions could have prevented the death of the baby? - Taught: Placing infants in a supine position on a firm mattress during sleep is recommended to reduce the risk of sudden infant death syndrome (SIDS). - Recall: Placing infants in a supine position on a firm mattress during sleep is recommended to reduce the risk of sudden infant death syndrome ( - Answer: A - Attenmpt: A - Correct: True Question: A mother brings her 3-week-old infant to the pediatrician's office because she is concerned about his feeding habits. He was born without complications and has not had any medical problems up until this time. However, for the past 4 days, he has been fussy, is regurgitating all of his feeds, and his vomit is yellow in color. On physical exam, the child's abdomen is minimally distended but no other abnormalities are appreciated. Which of the following embryologic errors could account for this presentation? - Taught: The infant's symptoms of non-bilious vomiting, abdominal distension, and palpable "olive" mass suggest pyloric stenosis, caused by abnormal hypertrophy of the pyloric sphincter muscle. - Recall: The infant's symptoms of non-bilious vomiting, abdominal distension, and palpable "olive" mass - Answer: A - Attenmpt: A - Correct: True Question: A pulmonary autopsy specimen from a 58-year-old woman who died of acute hypoxic respiratory failure was examined. She had recently undergone surgery for a fractured femur 3 months ago. Initial hospital course was uncomplicated, and she was discharged to a rehab facility in good health. Shortly after discharge home from rehab, she developed sudden shortness of breath and had cardiac arrest. Resuscitation was unsuccessful. On histological examination of lung tissue, fibrous connective tissue around the lumen of the pulmonary artery is observed. Which of the following is the most likely pathogenesis for the present findings? - Taught: The sudden death of a postoperative patient with a history of immobilization is most likely due to a thromboembolism, evidenced by fibrous tissue around the pulmonary artery lumen on histological examination. - Recall: The sudden death of a postoperative patient with a history of immobilization is most likely due to a thromboembolism, ev - Answer: A - Attenmpt: C - Correct: False Question: A 20-year-old woman presents with menorrhagia for the past several years. She says that her menses “have always been heavy”, and she has experienced easy bruising for as long as she can remember. Family history is significant for her mother, who had similar problems with bruising easily. The patient's vital signs include: heart rate 98/min, respiratory rate 14/min, temperature 36.1°C (96.9°F), and blood pressure 110/87 mm Hg. Physical examination is unremarkable. Laboratory tests show the following: platelet count 200,000/mm3, PT 12 seconds, and PTT 43 seconds. Which of the following is the most likely cause of this patient’s symptoms? - Taught: The patient's symptoms of menorrhagia and easy bruising, along with a prolonged PTT and normal platelet count, are indicative of Von Willebrand disease, an autosomal inherited bleeding disorder. - Recall: The patient's symptoms of menorrhagia and easy bruising, along with a prolonged PTT and normal platelet count, are - Answer: E - Attenmpt: B - Correct: False Question: A 40-year-old zookeeper presents to the emergency department complaining of severe abdominal pain that radiates to her back, and nausea. The pain started 2 days ago and slowly increased until she could not tolerate it any longer. Past medical history is significant for hypertension and hypothyroidism. Additionally, she reports that she was recently stung by one of the zoo’s smaller scorpions, but did not seek medical treatment. She takes aspirin, levothyroxine, oral contraceptive pills, and a multivitamin daily. Family history is noncontributory. Today, her blood pressure is 108/58 mm Hg, heart rate is 99/min, respiratory rate is 21/min, and temperature is 37.0°C (98.6°F). On physical exam, she is a well-developed, obese female that looks unwell. Her heart has a regular rate and rhythm. Radial pulses are weak but symmetric. Her lungs are clear to auscultation bilaterally. Her lateral left ankle is swollen, erythematous, and painful to palpate. An abdominal CT is consistent with acute pancreatitis. Which of the following is the most likely etiology for this patient’s disease? - Taught: The zookeeper's acute pancreatitis is most likely caused by the venom from a scorpion sting, which contains pancreatic toxins leading to inflammation. - Recall: The zookeeper's acute pancreatitis is most likely caused by the venom from a scorpion sting, which contains pan - Answer: C - Attenmpt: B - Correct: False Question: A 25-year-old primigravida presents to her physician for a routine prenatal visit. She is at 34 weeks gestation, as confirmed by an ultrasound examination. She has no complaints, but notes that the new shoes she bought 2 weeks ago do not fit anymore. The course of her pregnancy has been uneventful and she has been compliant with the recommended prenatal care. Her medical history is unremarkable. She has a 15-pound weight gain since the last visit 3 weeks ago. Her vital signs are as follows: blood pressure, 148/90 mm Hg; heart rate, 88/min; respiratory rate, 16/min; and temperature, 36.6℃ (97.9℉). The blood pressure on repeat assessment 4 hours later is 151/90 mm Hg. The fetal heart rate is 151/min. The physical examination is significant for 2+ pitting edema of the lower extremity. Which of the following tests o should confirm the probable condition of this patient? - Taught: A 24-hour urine protein test is the appropriate test to confirm the diagnosis of preeclampsia in this patient. - Recall: A 24-hour urine protein test is the appropriate test to confirm the diagnosis of preeclampsia in this patient.<|end|> - Answer: E - Attenmpt: E - Correct: True Question: A 3900-g (8.6-lb) male infant is delivered at 39 weeks' gestation via spontaneous vaginal delivery. Pregnancy and delivery were uncomplicated but a prenatal ultrasound at 20 weeks showed a defect in the pleuroperitoneal membrane. Further evaluation of this patient is most likely to show which of the following findings? - Taught: A defect in the pleuroperitoneal membrane detected prenatally is indicative of congenital diaphragmatic hernia, where abdominal organs herniate into the thoracic cavity, potentially causing pulmonary hypoplasia. - Recall: A defect in the pleuroperitoneal membrane detected prenatally is indicative of congenital diaphragmatic h - Answer: A - Attenmpt: A - Correct: True Question: A 62-year-old woman presents for a regular check-up. She complains of lightheadedness and palpitations which occur episodically. Past medical history is significant for a myocardial infarction 6 months ago and NYHA class II chronic heart failure. She also was diagnosed with grade I arterial hypertension 4 years ago. Current medications are aspirin 81 mg, atorvastatin 10 mg, enalapril 10 mg, and metoprolol 200 mg daily. Her vital signs are a blood pressure of 135/90 mm Hg, a heart rate of 125/min, a respiratory rate of 14/min, and a temperature of 36.5°C (97.7°F). Cardiopulmonary examination is significant for irregular heart rhythm and decreased S1 intensity. ECG is obtained and is shown in the picture (see image). Echocardiography shows a left ventricular ejection fraction of 39%. Which of the following drugs is the best choice for rate control in this patient? - Taught: Digoxin is the best choice for rate control in a patient with atrial fibrillation, a history of myocardial infarction, and reduced ejection fraction, especially when maximized on beta-blocker therapy. - Recall: Digoxin is the best choice for rate control in a patient with atrial fibrillation, a history of myocardial infar - Answer: E - Attenmpt: B - Correct: False Question: A 35-year-old male presents to his primary care physician with complaints of seasonal allergies. He has been using intranasal vasoconstrictors several times per day for several weeks. What is a likely sequela of the chronic use of topical nasal decongestants? - Taught: Chronic use of topical nasal decongestants can lead to persistent congestion due to rhinitis medicamentosa. - Recall: Chronic use of topical nasal decongestants can lead to persistent congestion due to rhinitis medicamentosa.<|end|><|endoftext|> - Answer: E - Attenmpt: E - Correct: True --- Final Score: 0.6(6/10) 13.16s user 10.00s system 40% cpu 57.670 total </pre></details><br> ### **Long Context RAG** (WIP) This code demonstrates a Retrieval-Augmented Generation (RAG) workflow by fetching documentations and source codes from GitHub repositories, consolidating them into an 11,135 token prompt, and feeding it into a language model. Leveraging the 128K context window, the model then utilizes the retrieved information to provide an explanation of the codes contained in the target repository. ```python # from phi_3_vision_mlx import _load_text context = _load_text("https://raw.githubusercontent.com/ml-explore/mlx/main/docs/src", ["index.rst", "usage/quick_start.rst", "examples/mlp.rst", "examples/llama-inference.rst"]) gh_code = _load_text("https://raw.githubusercontent.com/vegaluisjose/mlx-rag/main", ["model.py", "vdb.py",], True) prompt = '{context}\n<|end|>\n<|user|>Explain the folowing codes.\n\n{gh_code}\n'.format(context=context, gh_code=gh_code) chat(prompt, max_tokens=1000) ``` <details><summary>Click to expand output</summary><pre> The provided code is a Python implementation of a vector database (VDB) using the MLX framework. The VDB is designed to store and retrieve text data in a vectorized format, allowing for efficient similarity search and retrieval. The code includes several classes and functions to handle the various aspects of the VDB, such as loading and saving the model, ingesting text data, querying the database, and saving the database to disk. The `Model` class is the main class that handles the vector database operations. It loads the model from a pre-trained embedding model and initializes the necessary components, such as the embeddings and the encoder. The `run` method is used to run the model on a given input text and returns the embeddings. The `savez` method is used to save the model to disk. The `BertEmbeddings` class is a subclass of `nn.Module` that represents the BERT embeddings used for the VDB. It includes methods for initializing the embeddings and computing the embeddings for a given input text. The `Bert` class is another subclass of `nn.Module` that represents the BERT model used for the VDB. It includes methods for running the model on a given input text and returning the embeddings. The `Model` class is a wrapper class that uses the `Bert` class to create a vector database. It loads the model from a pre-trained embedding model and initializes the necessary components. The `ingest` method is used to ingest text data into the VDB. The `query` method is used to query the VDB for similar text based on a given input text. The `VectorDB` class is a wrapper class that uses the `Model` class to create a vector database. It loads the model from a pre-trained embedding model and initializes the necessary components. The `ingest` method is used to ingest text data into the VDB. The `savez` method is used to save the VDB to disk. Prompt: 284.76 tokens-per-sec (11135 tokens / 39.1 sec) Generation: 6.22 tokens-per-sec (444 tokens / 71.2 sec) </pre></details><br> ## Installation You can either install the most recent version of Phi-3-Vision-MLX by cloning the GitHub repository: ```bash git clone https://github.com/JosefAlbers/Phi-3-Vision-MLX.git ``` Or you can install an older version via pip: ```bash pip install phi-3-vision-mlx ``` Please note that the version available through pip may not be the most up-to-date. ## Benchmarks | Task | Vanilla Model | Quantized Model | Quantized Cache | LoRA | |-----------------------|---------------|-----------------|-----------------|-------------| | Text Generation | 8.72 tps | 55.97 tps | 7.04 tps | 8.71 tps | | Image Captioning | 8.04 tps | 32.48 tps | 1.77 tps | 8.00 tps | | Batched Generation | 30.74 tps | 106.94 tps | 20.47 tps | 30.72 tps | ## License This project is licensed under the [MIT License](LICENSE). ## Citation <a href="https://zenodo.org/doi/10.5281/zenodo.11403221"><img src="https://zenodo.org/badge/806709541.svg" alt="DOI"></a>
[ "MEDQA" ]
raidium/MQG
raidium
text-generation
[ "transformers", "pytorch", "gpt2", "text-generation", "medical", "en", "dataset:raidium/ECNQA_generated_questions", "dataset:raidium/ECN-QA", "arxiv:2405.14654", "base_model:stanford-crfm/BioMedLM", "base_model:finetune:stanford-crfm/BioMedLM", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-06-19T12:10:58Z
2024-07-10T09:56:00+00:00
23
3
--- base_model: stanford-crfm/BioMedLM datasets: - raidium/ECNQA_generated_questions - raidium/ECN-QA language: - en library_name: transformers license: apache-2.0 metrics: - accuracy tags: - medical --- # Model Card for Raidium MQG model The model is introduced in the paper "Efficient Medical Question Answering with Knowledge-Augmented Question Generation". Paper: [https://arxiv.org/abs/2405.14654](https://arxiv.org/abs/2405.14654) MQG is is a transformer language model pre-trained on a series of medical textbooks, and medical questions generated by GPT-4. The weights are initialized with [BioMedLM](https://huggingface.co/stanford-crfm/BioMedLM), then further pre-trained on those datasets. The questions have been generated from prompt containing medical data from the textbooks. They are available here: [ECNQA_generated_questions](https://huggingface.co/datasets/raidium/ECNQA_generated_questions). MQG is designed to be fine-tuned for Medical Question Answering tasks. ## Model Details ### Model Description ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62cdea59a9be5c195561c2b8/tMb8cNuV6ZYnjrnUC1Tg2.png) In the expanding field of language model applications, medical knowledge representation remains a significant challenge due to the specialized nature of the domain. Large language models, such as GPT-4, obtain reasonable scores on medical question answering tasks, but smaller models are far behind. In this work, we introduce a method to improve the proficiency of a small language model in the medical domain by employing a two-fold approach. We first fine-tune the model on a corpus of medical textbooks. Then, we use GPT-4 to generate questions similar to the downstream task, prompted with textbook knowledge, and use them to fine-tune the model. We show the benefits of our training strategy on a medical answering question dataset. ### Using the model ```python from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("raidium/MQG") model = AutoModelForCausalLM.from_pretrained("raidium/MQG") ``` - **Developed by:** Raidium - **Model type:** Transformer - **License:** Aopache 2.0 - **Finetuned from model:** [BioMedLM](https://huggingface.co/stanford-crfm/BioMedLM) ### Model Sources [optional] <!-- Provide the basic links for the model. --> - **Repository:** [https://github.com/raidium-med/MQG] - **Paper:** [https://arxiv.org/abs/2405.14654](https://arxiv.org/abs/2405.14654) ## Uses ### Direct Use MQG is trained using next-token-prediction on generated questions. Therefore, it can be used out-of-the-box to generate potential answers for medical question answering tasks. However, the generated questions might contain some errors, so it is advised to fine-tune the model on your dataset, and use the models to rank the potential answers. ### Downstream Use MQG can be fine-tuned for Medical Question Answering tasks. For multiple choice questions, a classification head should be appended at the end of the model, to rank different proposed answers. ### Out-of-Scope Use This model should not be used for datasets outside medical tasks. ## Bias, Risks, and Limitations There is no guarantee that the model answers medical questions correctly. It should only be used for academic purposes, and not in clinical care. ## Training Details ### Training Data The model is trained on a corpus of medical textbooks, and further pre-trained on generated questions: [ECNQA_generated_questions](https://huggingface.co/datasets/raidium/ECNQA_generated_questions). ### Training Procedure MGQ is trained using next-token-prediction on both datasets. #### Training Hyperparameters - **Training regime:** fp16 mixed-precision training. <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> ## Evaluation ### Testing Data, Factors & Metrics #### Testing Data We tested the model on a medical question answering dataset, ECN-QA, based on the french medical residency examination. It is composed of "single" and "progressive" questions (i.e a serie of multiple related questions). It is a multiple-choice question dataset, containing 5 propositions for each question. #### Metrics We use the accuracy to evaluate the model on Medical Question Answering. ### Results See paper: [https://arxiv.org/abs/2405.14654](https://arxiv.org/abs/2405.14654) ### Model Architecture and Objective The model is based on BioMedLM's architecture, which is modified from GPT-2 architecture. ### Compute Infrastructure #### Hardware The model was trained on the Jean-Zay supercomputer, on multiple nodes with 4 A100 gpus. #### Software Pytorch, DeepSpeed ## Citation **BibTeX:** ``` @article{khlaut2024efficient, title={Efficient Medical Question Answering with Knowledge-Augmented Question Generation}, author={Khlaut, Julien and Dancette, Corentin and Ferreres, Elodie and Bennani, Alaedine and H{\'e}rent, Paul and Manceron, Pierre}, journal={Clinical NLP Workshop, NAACL 2024}, year={2024} } ``` ## Model Card Contact julien.khlaut at raidium.fr
[ "MEDICAL DATA" ]
RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
2024-06-29T06:13:28Z
2024-06-29T08:26:00+00:00
23
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) JSL-MedMNX-7B-v2.0 - GGUF - Model creator: https://huggingface.co/johnsnowlabs/ - Original model: https://huggingface.co/johnsnowlabs/JSL-MedMNX-7B-v2.0/ | Name | Quant method | Size | | ---- | ---- | ---- | | [JSL-MedMNX-7B-v2.0.Q2_K.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q2_K.gguf) | Q2_K | 2.53GB | | [JSL-MedMNX-7B-v2.0.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.IQ3_XS.gguf) | IQ3_XS | 2.81GB | | [JSL-MedMNX-7B-v2.0.IQ3_S.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.IQ3_S.gguf) | IQ3_S | 2.96GB | | [JSL-MedMNX-7B-v2.0.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q3_K_S.gguf) | Q3_K_S | 2.95GB | | [JSL-MedMNX-7B-v2.0.IQ3_M.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.IQ3_M.gguf) | IQ3_M | 3.06GB | | [JSL-MedMNX-7B-v2.0.Q3_K.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q3_K.gguf) | Q3_K | 3.28GB | | [JSL-MedMNX-7B-v2.0.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q3_K_M.gguf) | Q3_K_M | 3.28GB | | [JSL-MedMNX-7B-v2.0.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q3_K_L.gguf) | Q3_K_L | 3.56GB | | [JSL-MedMNX-7B-v2.0.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.IQ4_XS.gguf) | IQ4_XS | 3.67GB | | [JSL-MedMNX-7B-v2.0.Q4_0.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q4_0.gguf) | Q4_0 | 3.83GB | | [JSL-MedMNX-7B-v2.0.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.IQ4_NL.gguf) | IQ4_NL | 3.87GB | | [JSL-MedMNX-7B-v2.0.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q4_K_S.gguf) | Q4_K_S | 3.86GB | | [JSL-MedMNX-7B-v2.0.Q4_K.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q4_K.gguf) | Q4_K | 4.07GB | | [JSL-MedMNX-7B-v2.0.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q4_K_M.gguf) | Q4_K_M | 4.07GB | | [JSL-MedMNX-7B-v2.0.Q4_1.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q4_1.gguf) | Q4_1 | 4.24GB | | [JSL-MedMNX-7B-v2.0.Q5_0.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q5_0.gguf) | Q5_0 | 4.65GB | | [JSL-MedMNX-7B-v2.0.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q5_K_S.gguf) | Q5_K_S | 4.65GB | | [JSL-MedMNX-7B-v2.0.Q5_K.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q5_K.gguf) | Q5_K | 4.78GB | | [JSL-MedMNX-7B-v2.0.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q5_K_M.gguf) | Q5_K_M | 4.78GB | | [JSL-MedMNX-7B-v2.0.Q5_1.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q5_1.gguf) | Q5_1 | 5.07GB | | [JSL-MedMNX-7B-v2.0.Q6_K.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q6_K.gguf) | Q6_K | 5.53GB | | [JSL-MedMNX-7B-v2.0.Q8_0.gguf](https://huggingface.co/RichardErkhov/johnsnowlabs_-_JSL-MedMNX-7B-v2.0-gguf/blob/main/JSL-MedMNX-7B-v2.0.Q8_0.gguf) | Q8_0 | 7.17GB | Original model description: --- license: cc-by-nc-nd-4.0 language: - en library_name: transformers tags: - reward model - RLHF - medical --- # JSL-MedMNX-7B-v2.0 [<img src="https://repository-images.githubusercontent.com/104670986/2e728700-ace4-11ea-9cfc-f3e060b25ddf">](http://www.johnsnowlabs.com) This model is developed by [John Snow Labs](https://www.johnsnowlabs.com/). Performance on biomedical benchmarks: [Open Medical LLM Leaderboard](https://huggingface.co/spaces/openlifescienceai/open_medical_llm_leaderboard). This model is available under a [CC-BY-NC-ND](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.en) license and must also conform to this [Acceptable Use Policy](https://huggingface.co/johnsnowlabs). If you need to license this model for commercial use, please contact us at [email protected]. ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "johnsnowlabs/JSL-MedMNX-7B-v2.0" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` ## 🏆 Evaluation | Tasks |Version|Filter|n-shot| Metric |Value | |Stderr| |-------------------------------|-------|------|-----:|--------|-----:|---|-----:| |stem |N/A |none | 0|acc |0.6085|± |0.0057| | | |none | 0|acc_norm|0.5700|± |0.0067| | - medmcqa |Yaml |none | 0|acc |0.5625|± |0.0077| | | |none | 0|acc_norm|0.5625|± |0.0077| | - medqa_4options |Yaml |none | 0|acc |0.5947|± |0.0138| | | |none | 0|acc_norm|0.5947|± |0.0138| | - anatomy (mmlu) | 0|none | 0|acc |0.6444|± |0.0414| | - clinical_knowledge (mmlu) | 0|none | 0|acc |0.7509|± |0.0266| | - college_biology (mmlu) | 0|none | 0|acc |0.7639|± |0.0355| | - college_medicine (mmlu) | 0|none | 0|acc |0.6532|± |0.0363| | - medical_genetics (mmlu) | 0|none | 0|acc |0.7500|± |0.0435| | - professional_medicine (mmlu)| 0|none | 0|acc |0.7537|± |0.0262| | - pubmedqa | 1|none | 0|acc |0.7760|± |0.0187| |Groups|Version|Filter|n-shot| Metric |Value | |Stderr| |------|-------|------|-----:|--------|-----:|---|-----:| |stem |N/A |none | 0|acc |0.6085|± |0.0057| | | |none | 0|acc_norm|0.5700|± |0.0067|
[ "MEDQA", "PUBMEDQA" ]
khoj-ai/timely-arctic-small
khoj-ai
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:55736", "loss:MultipleNegativesRankingLoss", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:Snowflake/snowflake-arctic-embed-s", "base_model:finetune:Snowflake/snowflake-arctic-embed-s", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-07-11T03:16:21Z
2024-08-08T18:15:19+00:00
23
4
--- base_model: Snowflake/snowflake-arctic-embed-s datasets: [] language: [] library_name: sentence-transformers pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:55736 - loss:MultipleNegativesRankingLoss widget: - source_sentence: 'Represent this sentence for searching relevant passages: 07/16/64 are sour things good for you?' sentences: - '07/06/2006 [''Select the cells that you want to copy For more information, see Select cells, ranges, rows, or columns on a worksheet. ... '', ''Click Home > Find & Select, and pick Go To Special.'', ''Click Visible cells only > OK.'', ''Click Copy (or press Ctrl+C).'', ''Select the upper-left cell of the paste area and click Paste (or press Ctrl+V).''] ' - 'jul 16 1964 Yet, this doesn''t mean that sour foods are always unsafe to eat. In fact, many sour foods are quite nutritious and rich in plant compounds called antioxidants, which help protect your cells from damage ( 7 , 8 ). ' - '"05/19/2044 Chicken tikka masala differs from the recipe, chicken tikka , because of the various spices (the meaning of ""masala""). Despite the origin being debated between a British-take on Indian food and an actual Indian dish, it has become very popular in the U.K. and in other countries." ' - source_sentence: 'Represent this sentence for searching relevant passages: 07/27/32 Avoid Mononucleosis' sentences: - '07/27/32 The Epstein-Barr Virus (EBV) causes mononucleosis, also known as mono. Transmitted through saliva, mono is most commonly spread by kissing, sharing eating or drinking utensils, coughing and sneezing. ' - 'Making your own pencil bag is a great way to use up scrap fabric that you love but that isn''t large enough for bigger craft projects. It''s also an eco-friendly option for toting pencils around, and a reflection of your own style. 5/01 ' - '11/24 Being beautiful on the inside requires as much attention as your appearance, if not more so. With a dose of humility and self-reflection, along with some consideration for others, becoming beautiful on the inside is possible for every girl. ' - source_sentence: 'Represent this sentence for searching relevant passages: today:2044-07-28 6 months ago Downtown Getdown' sentences: - '"10/04 You can say goodbye in German in nearly any circumstance if you know two phrases: ""Auf Wiedersehen"" and ""Tschüs."" If you really want to impress native German speakers, though, there are a few other phrases you can also use when parting ways." ' - '2044-01-14 The City of Tallahassee''s Downtown Getdown is a seasonal festival in fall that happens in the central business district in the City of Tallahassee, Florida. The festival involves concerts, stands for food, dancing, street entertainers, make up artist, clowns, grilling, and fun for everyone. The city attracts people from all over the area, including Gadsden County, Wakulla County, Jefferson County, Jackson County, Gulf County, Liberty County, Madison County, Taylor County, and even south Georgia counties, including Thomas and Grady counties. The festival attracts tens of thousands of people every year and directly benefits United Way of the Big Bend. The GetDowns are supported by title sponsor Capital City Bank and supporting sponsors, Bud Light along with Tri-Eagle Sales, Aarons, Inc, Tallahassee Democrat, WCTV, Clear Channel Radio, Coke and the City of Tallahassee. References Culture of Tallahassee, Florida ' - '03/04 Sloppy joes are the kind of food that you never outgrow. But if you don’t eat meat, you may find yourself wistfully reminiscing over the the childhood joy of tucking into a messy sandwich while nibbling on a much less exciting snack. ' - source_sentence: 'Represent this sentence for searching relevant passages: today:2039-12-02 last tuesday what happens after you beat the elite four in sun and moon?' sentences: - '11-29-2039 Go catch all the Ultra Beasts It''s pretty hard to miss this mission, since it starts right after Sun and Moon''s storyline wraps. After beating the Elite Four and heading home, the player is given a new task: Head to the previously empty motel on Route 8 for a new mission. ' - 'Anaphalis acutifolia is a species of flowering plants within the family Asteraceae. It is found in South Tibet (Yadong). References acutifolia Flora of Tibet February 4 ' - '05/09/02 Math is not as daunting as it seems, it''s all about following simple rules. Repeated use of these rules builds understanding and confidence. This article will teach you how to use and understand those rules. ' - source_sentence: 'Represent this sentence for searching relevant passages: Philadelphia Business Journal 01/30/83' sentences: - '2017/10/17 Circle with Towers is a concrete block 2005/2012 sculpture by American artist Sol LeWitt, installed outside the Bill and Melinda Gates Computer Science Complex on the University of Texas at Austin campus in Austin, Texas, United States. Previously, the artwork was installed in Madison Square Park; the university''s public art program, Landmarks, purchased the sculpture from the Madison Square Park Conservancy. References External links Concrete sculptures in the United States Outdoor sculptures in Austin, Texas University of Texas at Austin campus ' - 'Have you just bought your brand new Nintendo Wii console? Are you gutted that you can''t get Wii Connect 24 in your country of residence? This article will resolve this problem, so you can surf the Internet on your Wii! ' - 'The Philadelphia Business Journal is a diversified business media company in Philadelphia, Pennsylvania, publishing daily stories on its website and social networks, and a weekly edition available in print and online. It is published by the American City Business Journals. See also List of newspapers in Pennsylvania References External links Business newspapers published in the United States Newspapers published in Philadelphia Jan 30 1983 ' --- # Technical Report and Model Pipeline To access our technical report and model pipeline scripts visit our [github](https://github.com/khoj-ai/timely/tree/main) # SentenceTransformer based on Snowflake/snowflake-arctic-embed-s This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [Snowflake/snowflake-arctic-embed-s](https://huggingface.co/Snowflake/snowflake-arctic-embed-s). It maps sentences & paragraphs to a 384-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [Snowflake/snowflake-arctic-embed-s](https://huggingface.co/Snowflake/snowflake-arctic-embed-s) <!-- at revision 7dce73c9e586e64e7d7d0a21bf72f50bc5a67e19 --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 384 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 384, 'pooling_mode_cls_token': True, 'pooling_mode_mean_tokens': False, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Normalize() ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("sentence_transformers_model_id") # Run inference sentences = [ 'Represent this sentence for searching relevant passages: Philadelphia Business Journal 01/30/83', 'The Philadelphia Business Journal is a diversified business media company in Philadelphia, Pennsylvania, publishing daily stories on its website and social networks, and a weekly edition available in print and online. It is published by the American City Business Journals. See also List of newspapers in Pennsylvania References External links Business newspapers published in the United States Newspapers published in Philadelphia Jan 30 1983\n', "Have you just bought your brand new Nintendo Wii console? Are you gutted that you can't get Wii Connect 24 in your country of residence? This article will resolve this problem, so you can surf the Internet on your Wii!\n", ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 384] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 55,736 training samples * Columns: <code>anchors</code> and <code>positive</code> * Approximate statistics based on the first 1000 samples: | | anchors | positive | |:--------|:-----------------------------------------------------------------------------------|:----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 14 tokens</li><li>mean: 20.25 tokens</li><li>max: 33 tokens</li></ul> | <ul><li>min: 15 tokens</li><li>mean: 47.2 tokens</li><li>max: 75 tokens</li></ul> | * Samples: | anchors | positive | |:--------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Represent this sentence for searching relevant passages: are bugs attracted to citronella November 10?</code> | <code>Citronella is naturally occurring oil that repels insects. ... “Citronella oil is repellent to mosquitoes to a degree, but the amount being put out by a candle isn't going to be very effective,” Eric Hoffer, president of Hoffer Pest, told TODAY Home. 11/10<br></code> | | <code>Represent this sentence for searching relevant passages: are bugs attracted to citronella 11/10/09?</code> | <code>Citronella is naturally occurring oil that repels insects. ... “Citronella oil is repellent to mosquitoes to a degree, but the amount being put out by a candle isn't going to be very effective,” Eric Hoffer, president of Hoffer Pest, told TODAY Home. 11/10/09<br></code> | | <code>Represent this sentence for searching relevant passages: are bugs attracted to citronella Jan 15?</code> | <code>Citronella is naturally occurring oil that repels insects. ... “Citronella oil is repellent to mosquitoes to a degree, but the amount being put out by a candle isn't going to be very effective,” Eric Hoffer, president of Hoffer Pest, told TODAY Home. 01/15<br></code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Evaluation Dataset #### Unnamed Dataset * Size: 1,000 evaluation samples * Columns: <code>anchors</code> and <code>positive</code> * Approximate statistics based on the first 1000 samples: | | anchors | positive | |:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 11 tokens</li><li>mean: 21.67 tokens</li><li>max: 39 tokens</li></ul> | <ul><li>min: 15 tokens</li><li>mean: 67.0 tokens</li><li>max: 512 tokens</li></ul> | * Samples: | anchors | positive | |:---------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Represent this sentence for searching relevant passages: 4/23 Socialize With Someone Who Is Losing Interest in You</code> | <code>04/23 It can hurt to realize that someone you care about is losing interest in you. If a friend has stopped calling you and no longer makes plans to hang out, your first instinct might be to contact them more frequently or to ignore them in return.<br></code> | | <code>Represent this sentence for searching relevant passages: Alathur taluk, Perambalur 04/19/29</code> | <code>Alathur taluk is a taluk in Perambalur district in the Indian state of Tamil Nadu. It was created by former chief minister J.Jayalalithaa for issues of population increase. Kunnam taluk was bifurcated to form this new taluk. Villages There are 39 villages in Alathur taluk excluding the headquarters Alathur. References Perambalur district Taluks of Perambalur district 2029 Apr 19<br></code> | | <code>Represent this sentence for searching relevant passages: 01/04 how much weight does a baby gain in the first month?</code> | <code>01/04 During their first month, most newborns gain weight at a rate of about 1 ounce (30 grams) per day. They generally grow in height about 1 to 1½ inches (2.54 to 3.81 centimeters) during the first month. Many newborns go through a period of rapid growth when they are 7 to 10 days old and again at 3 and 6 weeks.<br></code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 128 - `per_device_eval_batch_size`: 128 - `learning_rate`: 2e-05 - `weight_decay`: 0.01 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 - `warmup_steps`: 400 - `bf16`: True - `torch_compile`: True - `torch_compile_backend`: inductor - `batch_sampler`: no_duplicates #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 128 - `per_device_eval_batch_size`: 128 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 2e-05 - `weight_decay`: 0.01 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 400 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: True - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: True - `torch_compile_backend`: inductor - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | loss | |:------:|:----:|:-------------:|:------:| | 0.0023 | 1 | 2.3154 | - | | 0.0229 | 10 | 2.3237 | - | | 0.0459 | 20 | 2.4036 | - | | 0.0688 | 30 | 2.3314 | - | | 0.0917 | 40 | 2.3171 | - | | 0.1147 | 50 | 2.2891 | - | | 0.0023 | 1 | 2.2343 | - | | 0.0229 | 10 | 2.2256 | - | | 0.0459 | 20 | 2.2924 | - | | 0.0688 | 30 | 2.2354 | - | | 0.0917 | 40 | 2.2281 | - | | 0.1147 | 50 | 2.2018 | - | | 0.1376 | 60 | 2.2377 | - | | 0.1606 | 70 | 2.2001 | - | | 0.1835 | 80 | 2.158 | - | | 0.2064 | 90 | 2.1405 | - | | 0.2294 | 100 | 2.0916 | - | | 0.2523 | 110 | 2.0374 | - | | 0.2752 | 120 | 2.0492 | - | | 0.2982 | 130 | 1.9824 | - | | 0.3211 | 140 | 1.9571 | - | | 0.3440 | 150 | 1.8317 | - | | 0.3670 | 160 | 1.7183 | - | | 0.3899 | 170 | 1.5928 | - | | 0.4128 | 180 | 1.5695 | - | | 0.4358 | 190 | 1.4592 | - | | 0.4587 | 200 | 1.2667 | 0.2031 | | 0.4817 | 210 | 1.3865 | - | | 0.5046 | 220 | 1.2924 | - | | 0.5275 | 230 | 1.3042 | - | | 0.5505 | 240 | 1.4393 | - | | 0.5734 | 250 | 1.3402 | - | | 0.5963 | 260 | 1.1939 | - | | 0.6193 | 270 | 1.1795 | - | | 0.6422 | 280 | 1.1012 | - | | 0.6651 | 290 | 1.0379 | - | | 0.6881 | 300 | 0.9865 | - | | 0.7110 | 310 | 0.9088 | - | | 0.7339 | 320 | 0.9132 | - | | 0.7569 | 330 | 0.8819 | - | | 0.7798 | 340 | 0.8631 | - | | 0.8028 | 350 | 1.4084 | - | | 0.8257 | 360 | 1.325 | - | | 0.8486 | 370 | 1.2373 | - | | 0.8716 | 380 | 1.1881 | - | | 0.8945 | 390 | 1.1656 | - | | 0.9174 | 400 | 0.7767 | 0.0607 | | 0.9404 | 410 | 0.1511 | - | | 0.9633 | 420 | 0.1439 | - | | 0.9862 | 430 | 0.1216 | - | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.0.1 - Transformers: 4.43.3 - PyTorch: 2.4.0+cu121 - Accelerate: 0.33.0 - Datasets: 2.20.0 - Tokenizers: 0.19.1 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
[ "CRAFT" ]
semiotic/T5-3B-SynQL-KaggleDBQA-Train-Run-01
semiotic
text-generation
[ "transformers", "pytorch", "t5", "text2text-generation", "code", "text-generation", "en", "dataset:semiotic/SynQL-KaggleDBQA-Train", "arxiv:2010.12725", "base_model:google-t5/t5-3b", "base_model:finetune:google-t5/t5-3b", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-07-11T18:56:02Z
2024-10-25T20:24:14+00:00
23
0
--- base_model: - google-t5/t5-3b datasets: - semiotic/SynQL-KaggleDBQA-Train language: - en license: apache-2.0 pipeline_tag: text-generation tags: - code --- # Model Card for T5-3B/SynQL-KaggleDBQA-Train-Run-01 - Developed by: Semiotic Labs - Model type: [Text to SQL] - License: [Apache-2.0] - Finetuned from model: [google-t5/t5-3b](https://huggingface.co/google-t5/t5-3b) - Dataset used for finetuning: [semiotic/SynQL-KaggleDBQA-Train](https://huggingface.co/datasets/semiotic/SynQL-KaggleDBQA-Train/blob/main/README.md) ## Model Context Example metadata can be found below, context represents the prompt that is presented to the model. Database schemas follow the encoding method proposed by [Shaw et al (2020)](https://arxiv.org/pdf/2010.12725). ``` "query": "SELECT count(*) FROM singer", "question": "How many singers do we have?", "context": "How many singers do we have? | concert_singer | stadium : stadium_id, location, name, capacity, highest, lowest, average | singer : singer_id, name, country, song_name, song_release_year, age, is_male | concert : concert_id, concert_name, theme, stadium_id, year | singer_in_concert : concert_id, singer_id", "db_id": "concert_singer", ``` ## Model Results Evaluation set: [KaggleDBQA/test](https://github.com/Chia-Hsuan-Lee/KaggleDBQA) Evaluation metrics: [Execution Accuracy] | Model | Data | Run | Execution Accuracy | |-------|------|-----|-------------------| | T5-3B | semiotic/SynQL-KaggleDBQA | 00 | 0.3514 | | T5-3B | semiotic/SynQL-KaggleDBQA | 01 | 0.3514 | | T5-3B | semiotic/SynQL-KaggleDBQA | 02 | 0.3514 |
[ "CHIA" ]
RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us" ]
2024-07-16T06:16:53Z
2024-07-17T15:45:34+00:00
23
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) COKAL-v1-70B - GGUF - Model creator: https://huggingface.co/DopeorNope/ - Original model: https://huggingface.co/DopeorNope/COKAL-v1-70B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [COKAL-v1-70B.Q2_K.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q2_K.gguf) | Q2_K | 23.96GB | | [COKAL-v1-70B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.IQ3_XS.gguf) | IQ3_XS | 26.64GB | | [COKAL-v1-70B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.IQ3_S.gguf) | IQ3_S | 28.14GB | | [COKAL-v1-70B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q3_K_S.gguf) | Q3_K_S | 28.14GB | | [COKAL-v1-70B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.IQ3_M.gguf) | IQ3_M | 29.09GB | | [COKAL-v1-70B.Q3_K.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q3_K.gguf) | Q3_K | 31.26GB | | [COKAL-v1-70B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q3_K_M.gguf) | Q3_K_M | 31.26GB | | [COKAL-v1-70B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q3_K_L.gguf) | Q3_K_L | 33.94GB | | [COKAL-v1-70B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.IQ4_XS.gguf) | IQ4_XS | 34.94GB | | [COKAL-v1-70B.Q4_0.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q4_0.gguf) | Q4_0 | 36.5GB | | [COKAL-v1-70B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.IQ4_NL.gguf) | IQ4_NL | 36.86GB | | [COKAL-v1-70B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/blob/main/COKAL-v1-70B.Q4_K_S.gguf) | Q4_K_S | 36.86GB | | [COKAL-v1-70B.Q4_K.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q4_K | 38.88GB | | [COKAL-v1-70B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q4_K_M | 38.88GB | | [COKAL-v1-70B.Q4_1.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q4_1 | 40.52GB | | [COKAL-v1-70B.Q5_0.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q5_0 | 44.53GB | | [COKAL-v1-70B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q5_K_S | 44.53GB | | [COKAL-v1-70B.Q5_K.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q5_K | 45.73GB | | [COKAL-v1-70B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q5_K_M | 45.73GB | | [COKAL-v1-70B.Q5_1.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q5_1 | 48.54GB | | [COKAL-v1-70B.Q6_K.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q6_K | 53.06GB | | [COKAL-v1-70B.Q8_0.gguf](https://huggingface.co/RichardErkhov/DopeorNope_-_COKAL-v1-70B-gguf/tree/main/) | Q8_0 | 68.72GB | Original model description: --- license: apache-2.0 --- # **🐻‍❄️COKAL-v1_70B🐻‍❄️** ![img](./COKAL-DPO_bear.png) ## Model Details **Model Developers** Seungyoo Lee (DopeorNope) **Input** Models input text only. **Output** Models generate text only. **Model Architecture** COKAL-v1_70B is an auto-regressive 70B language model based on the LLaMA2 transformer architecture. **Base Model** **Training Dataset** - SFT training dataset: [garage-bAInd/Open-Platypus](https://huggingface.co/datasets/garage-bAInd/Open-Platypus) **Training** I developed the model in an environment with A100 x 8 # Implementation Code ```python from transformers import AutoModelForCausalLM, AutoTokenizer import torch repo = "DopeorNope/COKAL-v1_70B" model = AutoModelForCausalLM.from_pretrained( repo, return_dict=True, torch_dtype=torch.float16, device_map='auto' ) model_tokenizer = AutoTokenizer.from_pretrained(repo) ``` ---
[ "BEAR" ]
RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
2024-08-19T08:34:54Z
2024-08-19T10:07:47+00:00
23
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Einstein-v6.1-Llama3-8B - GGUF - Model creator: https://huggingface.co/Weyaxi/ - Original model: https://huggingface.co/Weyaxi/Einstein-v6.1-Llama3-8B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Einstein-v6.1-Llama3-8B.Q2_K.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q2_K.gguf) | Q2_K | 2.96GB | | [Einstein-v6.1-Llama3-8B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.IQ3_XS.gguf) | IQ3_XS | 3.28GB | | [Einstein-v6.1-Llama3-8B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.IQ3_S.gguf) | IQ3_S | 3.43GB | | [Einstein-v6.1-Llama3-8B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q3_K_S.gguf) | Q3_K_S | 3.41GB | | [Einstein-v6.1-Llama3-8B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.IQ3_M.gguf) | IQ3_M | 3.52GB | | [Einstein-v6.1-Llama3-8B.Q3_K.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q3_K.gguf) | Q3_K | 3.74GB | | [Einstein-v6.1-Llama3-8B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q3_K_M.gguf) | Q3_K_M | 3.74GB | | [Einstein-v6.1-Llama3-8B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q3_K_L.gguf) | Q3_K_L | 4.03GB | | [Einstein-v6.1-Llama3-8B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.IQ4_XS.gguf) | IQ4_XS | 4.18GB | | [Einstein-v6.1-Llama3-8B.Q4_0.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q4_0.gguf) | Q4_0 | 4.34GB | | [Einstein-v6.1-Llama3-8B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.IQ4_NL.gguf) | IQ4_NL | 4.38GB | | [Einstein-v6.1-Llama3-8B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q4_K_S.gguf) | Q4_K_S | 4.37GB | | [Einstein-v6.1-Llama3-8B.Q4_K.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q4_K.gguf) | Q4_K | 4.58GB | | [Einstein-v6.1-Llama3-8B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q4_K_M.gguf) | Q4_K_M | 4.58GB | | [Einstein-v6.1-Llama3-8B.Q4_1.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q4_1.gguf) | Q4_1 | 4.78GB | | [Einstein-v6.1-Llama3-8B.Q5_0.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q5_0.gguf) | Q5_0 | 5.21GB | | [Einstein-v6.1-Llama3-8B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q5_K_S.gguf) | Q5_K_S | 5.21GB | | [Einstein-v6.1-Llama3-8B.Q5_K.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q5_K.gguf) | Q5_K | 5.34GB | | [Einstein-v6.1-Llama3-8B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [Einstein-v6.1-Llama3-8B.Q5_1.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q5_1.gguf) | Q5_1 | 5.65GB | | [Einstein-v6.1-Llama3-8B.Q6_K.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q6_K.gguf) | Q6_K | 6.14GB | | [Einstein-v6.1-Llama3-8B.Q8_0.gguf](https://huggingface.co/RichardErkhov/Weyaxi_-_Einstein-v6.1-Llama3-8B-gguf/blob/main/Einstein-v6.1-Llama3-8B.Q8_0.gguf) | Q8_0 | 7.95GB | Original model description: --- language: - en license: other tags: - axolotl - generated_from_trainer - instruct - finetune - chatml - gpt4 - synthetic data - science - physics - chemistry - biology - math - llama - llama3 base_model: meta-llama/Meta-Llama-3-8B datasets: - allenai/ai2_arc - camel-ai/physics - camel-ai/chemistry - camel-ai/biology - camel-ai/math - metaeval/reclor - openbookqa - mandyyyyii/scibench - derek-thomas/ScienceQA - TIGER-Lab/ScienceEval - jondurbin/airoboros-3.2 - LDJnr/Capybara - Cot-Alpaca-GPT4-From-OpenHermes-2.5 - STEM-AI-mtl/Electrical-engineering - knowrohit07/saraswati-stem - sablo/oasst2_curated - lmsys/lmsys-chat-1m - TIGER-Lab/MathInstruct - bigbio/med_qa - meta-math/MetaMathQA-40K - openbookqa - piqa - metaeval/reclor - derek-thomas/ScienceQA - scibench - sciq - Open-Orca/SlimOrca - migtissera/Synthia-v1.3 - TIGER-Lab/ScienceEval - allenai/WildChat - microsoft/orca-math-word-problems-200k - openchat/openchat_sharegpt4_dataset - teknium/GPTeacher-General-Instruct - m-a-p/CodeFeedback-Filtered-Instruction - totally-not-an-llm/EverythingLM-data-V3 - HuggingFaceH4/no_robots - OpenAssistant/oasst_top1_2023-08-25 - WizardLM/WizardLM_evol_instruct_70k model-index: - name: Einstein-v6.1-Llama3-8B results: - task: type: text-generation name: Text Generation dataset: name: AI2 Reasoning Challenge (25-Shot) type: ai2_arc config: ARC-Challenge split: test args: num_few_shot: 25 metrics: - type: acc_norm value: 62.46 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: HellaSwag (10-Shot) type: hellaswag split: validation args: num_few_shot: 10 metrics: - type: acc_norm value: 82.41 name: normalized accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU (5-Shot) type: cais/mmlu config: all split: test args: num_few_shot: 5 metrics: - type: acc value: 66.19 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: TruthfulQA (0-shot) type: truthful_qa config: multiple_choice split: validation args: num_few_shot: 0 metrics: - type: mc2 value: 55.1 source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: Winogrande (5-shot) type: winogrande config: winogrande_xl split: validation args: num_few_shot: 5 metrics: - type: acc value: 79.32 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GSM8k (5-shot) type: gsm8k config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 66.11 name: accuracy source: url: https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: IFEval (0-Shot) type: HuggingFaceH4/ifeval args: num_few_shot: 0 metrics: - type: inst_level_strict_acc and prompt_level_strict_acc value: 45.68 name: strict accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BBH (3-Shot) type: BBH args: num_few_shot: 3 metrics: - type: acc_norm value: 29.38 name: normalized accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MATH Lvl 5 (4-Shot) type: hendrycks/competition_math args: num_few_shot: 4 metrics: - type: exact_match value: 5.74 name: exact match source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GPQA (0-shot) type: Idavidrein/gpqa args: num_few_shot: 0 metrics: - type: acc_norm value: 4.25 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MuSR (0-shot) type: TAUR-Lab/MuSR args: num_few_shot: 0 metrics: - type: acc_norm value: 11.23 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU-PRO (5-shot) type: TIGER-Lab/MMLU-Pro config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 23.68 name: accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Weyaxi/Einstein-v6.1-Llama3-8B name: Open LLM Leaderboard --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6468ce47e134d050a58aa89c/5s12oq859qLfDkkTNam_C.png) # 🔬 Einstein-v6.1-Llama3-8B This model is a full fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on diverse datasets. This model is finetuned using `8xRTX3090` + `1xRTXA6000` using [axolotl](https://github.com/OpenAccess-AI-Collective/axolotl). This model's training was sponsored by [sablo.ai](https://sablo.ai). <details><summary>See axolotl config</summary> axolotl version: `0.4.0` ```yaml base_model: meta-llama/Meta-Llama-3-8B model_type: LlamaForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: false load_in_4bit: false strict: false chat_template: chatml datasets: - path: data/merged_all.json ds_type: json type: alpaca conversation: chatml - path: data/gpteacher-instruct-special-alpaca.json ds_type: json type: gpteacher conversation: chatml - path: data/wizardlm_evol_instruct_70k_random_half.json ds_type: json type: alpaca conversation: chatml - path: data/capybara_sharegpt.json ds_type: json type: sharegpt conversation: chatml - path: data/synthia-v1.3_sharegpt_12500.json ds_type: json type: sharegpt conversation: chatml - path: data/cot_alpaca_gpt4_extracted_openhermes_2.5_sharegpt.json ds_type: json type: sharegpt conversation: chatml - path: data/slimorca_dedup_filtered_95k_sharegpt.json ds_type: json type: sharegpt conversation: chatml - path: data/airoboros_3.2_without_contextual_slimorca_orca_sharegpt.json ds_type: json type: sharegpt conversation: chatml - path: data/allenai_wild_chat_gpt4_english_toxic_random_half_4k_sharegpt.json ds_type: json type: sharegpt strict: false conversation: chatml - path: data/pippa_bagel_repo_3k_sharegpt.json ds_type: json type: sharegpt conversation: chatml - path: data/gpt4_data_lmys_1m_sharegpt.json ds_type: json type: sharegpt conversation: chatml - path: data/sharegpt_gpt4_english.json ds_type: json type: sharegpt conversation: chatml - path: data/no_robots_sharegpt.json ds_type: json type: sharegpt strict: false conversation: chatml - path: data/oasst_top1_from_fusechatmixture_sharegpt.json ds_type: json type: sharegpt strict: false conversation: chatml - path: data/everythinglm-data-v3_sharegpt.json ds_type: json type: sharegpt strict: false conversation: chatml dataset_prepared_path: last_run_prepared val_set_size: 0.002 output_dir: ./Einstein-v6.1-Llama3-8B-model sequence_len: 8192 sample_packing: true pad_to_sequence_len: true eval_sample_packing: false wandb_project: Einstein wandb_entity: wandb_watch: wandb_name: Einstein-v6.1-Llama3-2-epoch wandb_log_model: hub_model_id: Weyaxi/Einstein-v6.1-Llama3-8B save_safetensors: true gradient_accumulation_steps: 4 micro_batch_size: 1 num_epochs: 2 optimizer: adamw_bnb_8bit # look lr_scheduler: cosine learning_rate: 0.000005 # look train_on_inputs: false group_by_length: false bf16: true fp16: false tf32: false gradient_checkpointing: true early_stopping_patience: resume_from_checkpoint: local_rank: logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: 10 evals_per_epoch: 2 eval_table_size: eval_table_max_new_tokens: 128 saves_per_epoch: 2 debug: deepspeed: zero3_bf16_cpuoffload_params.json weight_decay: 0.0 fsdp: fsdp_config: special_tokens: bos_token: "<s>" eos_token: "<|im_end|>" unk_token: "<unk>" pad_token: <|end_of_text|> # changed tokens: - "<|im_start|>" ``` </details><br> # 💬 Prompt Template You can use ChatML prompt template while using the model: ### ChatML ``` <|im_start|>system {system}<|im_end|> <|im_start|>user {user}<|im_end|> <|im_start|>assistant {asistant}<|im_end|> ``` This prompt template is available as a [chat template](https://huggingface.co/docs/transformers/main/chat_templating), which means you can format messages using the `tokenizer.apply_chat_template()` method: ```python messages = [ {"role": "system", "content": "You are helpful AI asistant."}, {"role": "user", "content": "Hello!"} ] gen_input = tokenizer.apply_chat_template(message, return_tensors="pt") model.generate(**gen_input) ``` # 📊 Datasets used in this model The datasets used to train this model are listed in the metadata section of the model card. Please note that certain datasets mentioned in the metadata may have undergone filtering based on various criteria. The results of this filtering process and its outcomes are in the data folder of this repository: [Weyaxi/Einstein-v6.1-Llama3-8B/data](https://huggingface.co/Weyaxi/Einstein-v6.1-Llama3-8B/tree/main/data) # 🔄 Quantizationed versions ## GGUF [@bartowski](https://huggingface.co/bartowski) - https://huggingface.co/bartowski/Einstein-v6.1-Llama3-8B-GGUF ## ExLlamaV2 [@bartowski](https://huggingface.co/bartowski) - https://huggingface.co/bartowski/Einstein-v6.1-Llama3-8B-exl2 ## AWQ [@solidrust](https://huggingface.co/solidrust) - https://huggingface.co/solidrust/Einstein-v6.1-Llama3-8B-AWQ # 🎯 [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/HuggingFaceH4/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Weyaxi__Einstein-v6.1-Llama3-8B) | Metric |Value| |---------------------------------|----:| |Avg. |68.60| |AI2 Reasoning Challenge (25-Shot)|62.46| |HellaSwag (10-Shot) |82.41| |MMLU (5-Shot) |66.19| |TruthfulQA (0-shot) |55.10| |Winogrande (5-shot) |79.32| |GSM8k (5-shot) |66.11| # 🎯 [Open LLM Leaderboard v2 Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/details_Weyaxi__Einstein-v6.1-Llama3-8B) | Metric |Value| |-------------------|----:| |Avg. |19.99| |IFEval (0-Shot) |45.68| |BBH (3-Shot) |29.38| |MATH Lvl 5 (4-Shot)| 5.74| |GPQA (0-shot) | 4.25| |MuSR (0-shot) |11.23| |MMLU-PRO (5-shot) |23.68| # 📚 Some resources, discussions and reviews aboout this model #### 🐦 Announcement tweet: - https://twitter.com/Weyaxi/status/1783050724659675627 #### 🔍 Reddit post in r/LocalLLaMA: - https://www.reddit.com/r/LocalLLaMA/comments/1cdlym1/introducing_einstein_v61_based_on_the_new_llama3/ #### ▶️ Youtube Video(s) - [Install Einstein v6.1 Llama3-8B Locally on Windows](https://www.youtube.com/watch?v=VePvv6OM0JY) #### 📱 Octopus-V4-3B - [Octopus-V4-3B](https://huggingface.co/NexaAIDev/Octopus-v4) leverages the incredible physics capabilities of [Einstein-v6.1-Llama3-8B](https://huggingface.co/Weyaxi/Einstein-v6.1-Llama3-8B) in their model. # 🤖 Additional information about training This model is full fine-tuned for 2 epoch. Total number of steps was 2026. <details><summary>Loss graph</summary> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/6468ce47e134d050a58aa89c/Ycs7ZpoqmxFt0u9rybCO1.png) </details><br> # 🤝 Acknowledgments Thanks to [sablo.ai](https://sablo.ai) for sponsoring this model. Thanks to all the dataset authors mentioned in the datasets section. Thanks to [axolotl](https://github.com/OpenAccess-AI-Collective/axolotl) for making the repository I used to make this model. Thanks to all open source AI community. [<img src="https://raw.githubusercontent.com/OpenAccess-AI-Collective/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/OpenAccess-AI-Collective/axolotl) If you would like to support me: [☕ Buy Me a Coffee](https://www.buymeacoffee.com/weyaxi)
[ "SCIQ" ]
medspaner/EriBERTa-clinical-trials-attributes
medspaner
null
[ "pytorch", "roberta", "generated_from_trainer", "arxiv:2306.07373", "license:cc-by-nc-4.0", "region:us" ]
2024-09-13T11:57:33Z
2024-10-01T06:39:05+00:00
23
0
--- license: cc-by-nc-4.0 metrics: - precision - recall - f1 - accuracy tags: - generated_from_trainer widget: - text: Paciente acompañado de su madre y con antecedentes de epilepsia. model-index: - name: EriBERTa-es-clinical-trials-attributes results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # EriBERTa-clinical-trials-attributes This named entity recognition model detects the following types of medical attributes: - Experiencer: - Patient: e.g. *paciente* - Family_member: e.g. *padre* - Other: e.g. *cirujano* - Event temporality: - Future: e.g. ***cirugía*** *pendiente* - History_of: e.g. *antecedentes de* ***migraña*** The model achieves the following results on the test set (when trained with the training and development set; results are averaged over 5 evaluation rounds): - Precision: 0.890 (±0.007) - Recall: 0.848 (±0.008) - F1: 0.868 (±0.002) - Accuracy: 0.989 (±0.001) ## Model description This model adapts the pre-trained model [EriBERTa-base](https://huggingface.co/HiTZ/EriBERTa-base), presented in [De la Iglesia et al. (2023)](https://arxiv.org/abs/2306.07373). It is fine-tuned to conduct medical named entity recognition on Spanish texts about clinical trials. The model is fine-tuned on the [CT-EBM-ES corpus (Campillos-Llanos et al. 2021)](https://bmcmedinformdecismak.biomedcentral.com/articles/10.1186/s12911-021-01395-z) vs 2. If you use this model, please, cite as follows: ``` @article{campillosetal2024,         title = {{Hybrid tool for semantic annotation and concept extraction of medical texts in Spanish}},         author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\'o}n, Adri{\'a}n},         journal = {BMC Bioinformatics}, year={2024}, publisher={BioMed Central} } ``` ## Intended uses & limitations **Disclosure**: *This model is under development and needs to be improved. It should not be used for medical decision making without human assistance and supervision* This model is intended for a generalist purpose, and may have bias and/or any other undesirable distortions. Third parties who deploy or provide systems and/or services using any of these models (or using systems based on these models) should note that it is their responsibility to mitigate the risks arising from their use. Third parties, in any event, need to comply with applicable regulations, including regulations concerning the use of artificial intelligence. The owner or creator of the models will in no event be liable for any results arising from the use made by third parties of these models. **Descargo de responsabilidad**: *Esta herramienta se encuentra en desarrollo y no debe ser empleada para la toma de decisiones médicas* La finalidad de este modelo es generalista, y se advierte que puede tener sesgos y/u otro tipo de distorsiones indeseables. Terceras partes que desplieguen o proporcionen sistemas y/o servicios usando alguno de estos modelos (o utilizando sistemas basados en estos modelos) han tener presente que es su responsabilidad abordar y minimizar los riesgos derivados de su uso. Las terceras partes, en cualquier circunstancia, deben cumplir con la normativa aplicable, incluyendo la normativa que concierne al uso de la inteligencia artificial. El propietario o creador de los modelos de ningún modo será responsable de los resultados derivados del uso que las terceras partes hagan de estos modelos. ## Training and evaluation data The data used for fine-tuning are the [Clinical Trials for Evidence-Based-Medicine in Spanish corpus](http://www.lllf.uam.es/ESP/nlpdata/wp2/) vs 2. It is a collection of 1200 texts about clinical trials studies and clinical trials announcements: - 500 abstracts from journals published under a Creative Commons license, e.g. available in PubMed or the Scientific Electronic Library Online (SciELO) - 700 clinical trials announcements published in the European Clinical Trials Register and Repositorio Español de Estudios Clínicos If you use the CT-EBM-ES resource, please, cite as follows: ``` @article{campillosetal-midm2021,         title = {A clinical trials corpus annotated with UMLS© entities to enhance the access to Evidence-Based Medicine},         author = {Campillos-Llanos, Leonardo and Valverde-Mateos, Ana and Capllonch-Carri{\'o}n, Adri{\'a}n and Moreno-Sandoval, Antonio},         journal = {BMC Medical Informatics and Decision Making},         volume={21}, number={1}, pages={1--19}, year={2021}, publisher={BioMed Central} } ``` ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: we used different seeds for 5 evaluation rounds, and uploaded the model with the best results - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: average 16.40 epochs (±3.36); trained with early stopping if no improvement after 5 epochs (early stopping patience: 5) ### Training results (test set; average and standard deviation of 5 rounds with different seeds) | Precision | Recall | F1 | Accuracy | |:--------------:|:--------------:|:--------------:|:--------------:| | 0.890 (±0.007) | 0.848 (±0.008) | 0.868 (±0.002) | 0.989 (±0.001) | ### Framework versions - Transformers 4.17.0 - Pytorch 1.10.2+cu113 - Datasets 1.18.4 - Tokenizers 0.11.6
[ "SCIELO" ]
tt1225/Vintern-1B-v2-Custom
tt1225
visual-question-answering
[ "transformers", "safetensors", "internvl_chat", "feature-extraction", "vision", "visual-question-answering", "custom_code", "vi", "en", "dataset:5CD-AI/Viet-OCR-VQA", "dataset:5CD-AI/Viet-Doc-VQA", "dataset:5CD-AI/Viet-Doc-VQA-II", "dataset:Vi-VLM/Vista", "dataset:5CD-AI/Viet-Receipt-VQA", "dataset:5CD-AI/Viet-Sketches-VQA", "dataset:5CD-AI/Viet-Geometry-VQA", "dataset:5CD-AI/Viet-Wiki-Handwriting", "dataset:5CD-AI/Viet-ComputerScience-VQA", "dataset:5CD-AI/Viet-Handwriting-gemini-VQA", "dataset:5CD-AI/Viet-Menu-gemini-VQA", "dataset:5CD-AI/Viet-Vintext-gemini-VQA", "dataset:5CD-AI/Viet-OpenViVQA-gemini-VQA", "dataset:5CD-AI/Viet-Resume-VQA", "dataset:5CD-AI/Viet-ViTextVQA-gemini-VQA", "arxiv:2408.12480", "arxiv:2407.10671", "arxiv:2404.16821", "arxiv:2404.07922", "base_model:OpenGVLab/InternVL2-1B", "base_model:finetune:OpenGVLab/InternVL2-1B", "region:us" ]
2024-10-14T05:50:46Z
2024-10-14T11:33:39+00:00
23
0
--- base_model: OpenGVLab/InternVL2-1B datasets: - 5CD-AI/Viet-OCR-VQA - 5CD-AI/Viet-Doc-VQA - 5CD-AI/Viet-Doc-VQA-II - Vi-VLM/Vista - 5CD-AI/Viet-Receipt-VQA - 5CD-AI/Viet-Sketches-VQA - 5CD-AI/Viet-Geometry-VQA - 5CD-AI/Viet-Wiki-Handwriting - 5CD-AI/Viet-ComputerScience-VQA - 5CD-AI/Viet-Handwriting-gemini-VQA - 5CD-AI/Viet-Menu-gemini-VQA - 5CD-AI/Viet-Vintext-gemini-VQA - 5CD-AI/Viet-OpenViVQA-gemini-VQA - 5CD-AI/Viet-Resume-VQA - 5CD-AI/Viet-ViTextVQA-gemini-VQA language: - vi - en library_name: transformers pipeline_tag: visual-question-answering tags: - vision --- <div align="center"> <img src="Vintern_logo.png" width="700"/> </div> ## Vintern-1B-v2 ❄️ (Viet-InternVL2-1B-v2) - The LLaVA 🌋 Challenger We are excited to introduce **Vintern-1B-v2** the Vietnamese 🇻🇳 multimodal model that combines the advanced Vietnamese language model [Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct)[1] with the latest visual model, [InternViT-300M-448px](https://huggingface.co/OpenGVLab/InternViT-300M-448px)[2], CVPR 2024. This model excels in tasks such as OCR-VQA, Doc-VQA, and Chart-VQA,... With only 1 billion parameters, it is **4096 context length** finetuned from the [Viet-InternVL2-1B](https://huggingface.co/5CD-AI/Viet-InternVL2-1B) model on over 3 million specialized image-question-answer pairs for optical character recognition 🔍, text recognition 🔤, document extraction 📑, and general VQA. The model can be integrated into various on-device applications 📱, demonstrating its versatility and robust capabilities. [**\[🤗 HF Demo\]**](https://huggingface.co/spaces/khang119966/Vintern-v2-Demo) The special thing is that our model can be easily finetuned with a T4 GPU on Google Colab by following the instructions provided at the end of this section. ## Model Details | Model Name | Vision Part | Language Part | | :------------------: | :---------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------------: | | Vintern-1B-v2 | [InternViT-300M-448px](https://huggingface.co/OpenGVLab/InternViT-300M-448px) | [Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct) | Vintern-1B-v2 is a multimodal large language model series, featuring models of various sizes. For each size, we release instruction-tuned models optimized for multimodal tasks. Vintern-1B-v2 consists of [InternViT-300M-448px](https://huggingface.co/OpenGVLab/InternViT-300M-448px), an MLP projector, and [Qwen2-0.5B-Instruct](https://huggingface.co/Qwen/Qwen2-0.5B-Instruct). ## Training details 📚 The fine-tuning dataset was meticulously sampled in part from the following datasets: [Viet-OCR-VQA 📚](https://huggingface.co/datasets/5CD-AI/Viet-OCR-VQA), [Viet-Doc-VQA 📄](https://huggingface.co/datasets/5CD-AI/Viet-Doc-VQA), [Viet-Doc-VQA-II 📑](https://huggingface.co/datasets/5CD-AI/Viet-Doc-VQA-II), [Vista 🖼️](https://huggingface.co/datasets/Vi-VLM/Vista), [Viet-Receipt-VQA 🧾](https://huggingface.co/datasets/5CD-AI/Viet-Receipt-VQA), [Viet-Sketches-VQA ✏️](https://huggingface.co/datasets/5CD-AI/Viet-Sketches-VQA), [Viet-Geometry-VQA 📐](https://huggingface.co/datasets/5CD-AI/Viet-Geometry-VQA), [Viet-Wiki-Handwriting ✍️](https://huggingface.co/datasets/5CD-AI/Viet-Wiki-Handwriting), [Viet-ComputerScience-VQA 💻](https://huggingface.co/datasets/5CD-AI/Viet-ComputerScience-VQA), [Viet-Handwriting-gemini-VQA 🖋️](https://huggingface.co/datasets/5CD-AI/Viet-Handwriting-gemini-VQA), [Viet-Menu-gemini-VQA 🍽️](https://huggingface.co/datasets/5CD-AI/Viet-Menu-gemini-VQA), [Viet-Vintext-gemini-VQA 📜](https://huggingface.co/datasets/5CD-AI/Viet-Vintext-gemini-VQA), [Viet-OpenViVQA-gemini-VQA 🧠](https://huggingface.co/datasets/5CD-AI/Viet-OpenViVQA-gemini-VQA), [Viet-Resume-VQA 📃](https://huggingface.co/datasets/5CD-AI/Viet-Resume-VQA), [Viet-ViTextVQA-gemini-VQA 📑](https://huggingface.co/datasets/5CD-AI/Viet-ViTextVQA-gemini-VQA) ## Benchmarks 📈 Since there are still many different metrics that need to be tested, **we chose a quick and simple metric first to guide the development of our model**. Our metric is inspired by Lavy[4]. For the time being, we are using GPT-4 to evaluate the quality of answers on two datasets: OpenViVQA and ViTextVQA. Detailed results can be found at the provided [here](https://huggingface.co/datasets/5CD-AI/Vintern-1B-v2-Benchmark-gpt4o-score). The inputs are images, questions, labels, and predicted answers. The model will return a score from 0 to 10 for the corresponding answer quality. The results table is shown below. <table border="1" cellspacing="0" cellpadding="5"> <tr align="center"> <td rowspan="2"><b>Model</b></td> <td colspan="2"><b>gpt4o-score</b></td> </tr> <tr align="center"> <td><b>OpenViVQA-dev</b></td> <td><b>ViTextVQA-dev</b></td> </tr> <tr align="center"> <td align="left">Vintern-1B</td> <td>7.1/10</td> <td>7.6/10</td> </tr> <tr align="center"> <td align="left"><b>Vintern-1B-v2</b></td> <td><b>7.7/10</b></td> <td><b>7.7/10</b></td> </tr> </table> The benchmark result in [MTVQA](https://github.com/bytedance/MTVQA/tree/main) | Models | Open-Source | Vietnamese Score | |:----------------------------------:|:-------------:|:------------------:| | Qwen2-VL 72B (Top 1) | ✗ | 41.6 | | GPT-4o (Top 2) | ✗ | 34.2 | | **Vintern-1B-V2** (Top 3) | ✓ | **31.7** | | Qwen2-VL 7B | ✓ | 30.0 | | Claude3 Opus | ✗ | 29.1 | | GPT-4o mini | ✗ | 29.1 | | GPT-4V | ✗ | 28.9 | | Gemini Ultra | ✗ | 28.6 | | InternVL2 76B | ✓ | 26.9 | | QwenVL Max | ✗ | 23.5 | | Claude3 Sonnet | ✗ | 20.8 | | QwenVL Plus | ✗ | 18.1 | | MiniCPM-V2.5 | ✓ | 15.3 | | InternVL-V1.5 | ✗ | 12.4 | We are still working on more detailed benchmarks. ## Examples <div align="center"> <img src="ex_images/1.png" width="500"/> </div> ``` User: <image> Mô tả hình ảnh một cách chi tiết. Assistant: Bức ảnh là một trang giấy kẻ ô vuông với dòng chữ viết tay bằng mực đen. Bố cục đơn giản, chỉ có nội dung văn bản được viết theo chiều dọc từ trên xuống dưới. Các dòng chữ được sắp xếp theo thứ tự từ trên xuống dưới như sau: - Nghe thầy đọc thơ - Em nghe thầy đọc bao ngày - Tiếng thơ đỏ nắng xanh cây quanh nhà - Mái chèo nghe vọng sông xa - Em êm nghe tiếng của bà năm xưa - Nghe trăng thở động tàu dừa - Rào rào nghe chuyển con mưa giữa trời - Thêm yêu tiếng hát nụ cười - Nghe thơ em thấy đất trời đẹp ra. - Trần Đăng Khoa ``` <div align="center"> <img src="ex_images/4.jpg" width="500"/> </div> ``` User: <image> Tam giác ABC là gì của đường tròn tâm O ? Assistant: Tam giác ABC là tam giác nội tiếp của đường tròn tâm O. ``` <div align="center"> <img src="ex_images/2.jpg" width="500"/> </div> ``` User: <image> Mô tả hình ảnh một cách chi tiết. Assistant: Bức ảnh chụp một con sông ở Việt Nam với những ngôi nhà cổ kính được sơn màu vàng nhạt. Phía trước là một con thuyền nhỏ màu đỏ và một chiếc thuyền lớn màu xanh dương. Trên thuyền có một người đàn ông đang chèo thuyền. Hai bên bờ sông là những hàng cây xanh mát. Bầu trời trong xanh và có một vài đám mây trắng. Ở góc dưới bên trái của bức ảnh có dòng chữ "VNEXPRESS". ``` <div align="center"> <img src="ex_images/3.png" width="400"/> </div> ``` User: <image> Mô tả hình ảnh một cách chi tiết. Assistant: Bức ảnh là một thông báo tuyển dụng của công ty Mobifone. Bố cục được chia thành 3 phần chính: - Phần đầu tiên là tiêu đề "TUYỂN DỤNG" được in chữ màu đỏ đậm, cỡ chữ lớn, nằm ở vị trí trung tâm phía trên cùng của bức ảnh. - Phần thứ hai là phần giới thiệu về vị trí và yêu cầu của ứng viên. Phần này được trình bày theo dạng liệt kê các mục: - Vị trí: Nhân viên kinh doanh - Địa điểm: Thành phố và các huyện Yên Sơn, Sơn Dương, Hàm Yên, Chiêm Hóa - Yêu cầu: Trình độ: Tốt nghiệp Trung cấp trở lên, Độ tuổi: Không quá 35 tuổi - Phần thứ ba là phần giới thiệu về chế độ đãi ngộ. Phần này được trình bày theo dạng liệt kê các mục: - Chế độ đãi ngộ: - Mức lương: 7.000.000đ - 17.000.000đ - Thưởng thi đua hàng tháng, quý: Thưởng theo doanh số - Chế độ bảo hiểm theo quy định - Được trau dồi các kỹ năng: Giao tiếp, thuyết phục, chăm sóc khách hàng... Phần cuối cùng là phần "NỘP HỒ SƠ" với thông tin liên lạc của người nộp hồ sơ. Phần này được trình bày theo dạng liệt kê các mục: - Người nhận: Trần Thị Thúy - Địa chỉ: 547 đường Quang Trung, Tổ 25, Phường Phan Thiết, TP Tuyên Quang - Điện thoại liên hệ: 0901.452.888 - Email nhận hồ sơ: [email protected] - Thời hạn nộp hồ sơ: đến hết ngày 28/5/2021 ``` <div align="center"> <img src="ex_images/5.jpg" width="400"/> </div> ``` User: <image> Con nào sẽ thắng ? Assistant: Con thỏ sẽ thắng. ``` <div align="center"> <img src="ex_images/6.png" width="400"/> </div> ``` User: <image> Mô tả chi tiết hình ảnh. Assistant: Bức ảnh hiển thị một đoạn mã nguồn được viết bằng ngôn ngữ lập trình Python. Đoạn mã này bao gồm hai phần: Phần đầu: Nêu tiêu đề "Example of avoiding complex expressions" (Mô tả ví dụ về việc tránh các hàm phức tạp). Phần chính: Bao gồm hai hàm square_numbers và main(). Hàm square_numbers nhận một tham số là danh sách số từ 1 đến 4. Hàm này sử dụng vòng lặp for để duyệt qua mỗi số trong danh sách và thêm nó vào danh sách squares. Sau đó, hàm trả về danh sách squares. Hàm main() tạo một biến numbers với giá trị [1, 2, 3, 4], sau đó gọi hàm square_numbers và ghi kết quả vào biến result. Cuối cùng, hàm print(result) sẽ in ra kết quả của hàm main(). ``` ## Quickstart Here provides a code snippet to show you how to load the tokenizer and model and how to generate contents. To run inference using the model, follow the steps outlined in our Colab inference notebook [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1ZD1oB56PF0lF66RCuTVJYLTEV0tM3CFf?usp=sharing) ```python import numpy as np import torch import torchvision.transforms as T # from decord import VideoReader, cpu from PIL import Image from torchvision.transforms.functional import InterpolationMode from transformers import AutoModel, AutoTokenizer IMAGENET_MEAN = (0.485, 0.456, 0.406) IMAGENET_STD = (0.229, 0.224, 0.225) def build_transform(input_size): MEAN, STD = IMAGENET_MEAN, IMAGENET_STD transform = T.Compose([ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), T.ToTensor(), T.Normalize(mean=MEAN, std=STD) ]) return transform def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): best_ratio_diff = float('inf') best_ratio = (1, 1) area = width * height for ratio in target_ratios: target_aspect_ratio = ratio[0] / ratio[1] ratio_diff = abs(aspect_ratio - target_aspect_ratio) if ratio_diff < best_ratio_diff: best_ratio_diff = ratio_diff best_ratio = ratio elif ratio_diff == best_ratio_diff: if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: best_ratio = ratio return best_ratio def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): orig_width, orig_height = image.size aspect_ratio = orig_width / orig_height # calculate the existing image aspect ratio target_ratios = set( (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num) target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) # find the closest aspect ratio to the target target_aspect_ratio = find_closest_aspect_ratio( aspect_ratio, target_ratios, orig_width, orig_height, image_size) # calculate the target width and height target_width = image_size * target_aspect_ratio[0] target_height = image_size * target_aspect_ratio[1] blocks = target_aspect_ratio[0] * target_aspect_ratio[1] # resize the image resized_img = image.resize((target_width, target_height)) processed_images = [] for i in range(blocks): box = ( (i % (target_width // image_size)) * image_size, (i // (target_width // image_size)) * image_size, ((i % (target_width // image_size)) + 1) * image_size, ((i // (target_width // image_size)) + 1) * image_size ) # split the image split_img = resized_img.crop(box) processed_images.append(split_img) assert len(processed_images) == blocks if use_thumbnail and len(processed_images) != 1: thumbnail_img = image.resize((image_size, image_size)) processed_images.append(thumbnail_img) return processed_images def load_image(image_file, input_size=448, max_num=12): image = Image.open(image_file).convert('RGB') transform = build_transform(input_size=input_size) images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) pixel_values = [transform(image) for image in images] pixel_values = torch.stack(pixel_values) return pixel_values model = AutoModel.from_pretrained( "5CD-AI/Vintern-1B-v2", torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, trust_remote_code=True, ).eval().cuda() tokenizer = AutoTokenizer.from_pretrained("5CD-AI/Vintern-1B-v2", trust_remote_code=True, use_fast=False) test_image = 'test-image.jpg' pixel_values = load_image(test_image, max_num=12).to(torch.bfloat16).cuda() generation_config = dict(max_new_tokens= 1024, do_sample=False, num_beams = 3, repetition_penalty=2.5) question = '<image>\nMô tả hình ảnh một cách chi tiết.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) print(f'User: {question}\nAssistant: {response}') #question = "Câu hỏi khác ......" #response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True) #print(f'User: {question}\nAssistant: {response}') ``` ## Finetune on your Data [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1bK6fpWfResjv9UxWoKHDStXQ8bop3a6Z?usp=sharing) ## Citation ``` @misc{doan2024vintern1befficientmultimodallarge, title={Vintern-1B: An Efficient Multimodal Large Language Model for Vietnamese}, author={Khang T. Doan and Bao G. Huynh and Dung T. Hoang and Thuc D. Pham and Nhat H. Pham and Quan T. M. Nguyen and Bang Q. Vo and Suong N. Hoang}, year={2024}, eprint={2408.12480}, archivePrefix={arXiv}, primaryClass={cs.LG}, url={https://arxiv.org/abs/2408.12480}, } ``` ## References [1] Yang, An, et al. "Qwen2 technical report." arXiv preprint arXiv:2407.10671 (2024). [2] Chen, Zhe, et al. "Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks." Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition. 2024. [3] Chen, Zhe, et al. "How far are we to gpt-4v? closing the gap to commercial multimodal models with open-source suites." arXiv preprint arXiv:2404.16821 (2024). [4] Tran, Chi, and Huong Le Thanh. "LaVy: Vietnamese Multimodal Large Language Model." arXiv preprint arXiv:2404.07922 (2024).
[ "CHIA" ]
ICB-UMA/ClinLinker-KB-P
ICB-UMA
null
[ "pytorch", "roberta", "medical", "es", "arxiv:2404.06367", "base_model:PlanTL-GOB-ES/roberta-base-biomedical-clinical-es", "base_model:finetune:PlanTL-GOB-ES/roberta-base-biomedical-clinical-es", "license:apache-2.0", "region:us" ]
2024-10-15T07:48:14Z
2024-10-15T10:06:02+00:00
23
0
--- base_model: - PlanTL-GOB-ES/roberta-base-biomedical-clinical-es language: - es license: apache-2.0 tags: - medical --- # **ClinLinker-KB-P** ## Model Description ClinLinker-KB-P is a state-of-the-art model designed for medical entity linking (MEL) in Spanish, specifically optimized for tasks in the clinical domain. It is based on bi-encoder models enriched with knowledge from medical knowledge graphs like UMLS and SNOMED-CT. This model leverages contrastive learning techniques to enhance the quality of embedding spaces and improve the retrieval of relevant concepts for medical entities mentioned in clinical text. The "P" in ClinLinker-KB-P stands for **Parents**. In this model, hierarchical relationships were used, focusing on **parent** terms as positive candidates. This strategy improves the embedding quality by incorporating terms that are conceptually close at the parent level in the knowledge graph, enhancing the linking process. ## Intended Use - **Domain:** Clinical Natural Language Processing (NLP) for medical entity linking in Spanish. - **Primary Tasks:** Recognizing and normalizing medical entities such as diseases, symptoms, and procedures from clinical texts and linking them to their corresponding standardized terminologies in SNOMED-CT. - **Corpora Evaluated:** ClinLinker-KB-P was tested on several Spanish medical corpora including DisTEMIST (for diseases), MedProcNER (for procedures), and SympTEMIST (for symptoms). It achieved top-tier performance, with top-200 accuracy values of 0.965 in SympTEMIST, 0.940 in MedProcNER, and 0.905 in DisTEMIST. - **Target Users:** Researchers, healthcare practitioners, and developers working with Spanish medical data for entity recognition and normalization tasks. ## Performance ClinLinker-KB-P achieved the following key results: - **Top-200 Accuracy:** - DisTEMIST: 90.5% - MedProcNER: 94.0% - SympTEMIST: 96.5% - **Top-25 Accuracy:** - The model achieves up to 85.9% accuracy in retrieving the correct concept in the top-25 candidates for disease and procedure normalization tasks. - **Cross-Encoder Integration:** ClinLinker-KB-P is particularly effective when used with a cross-encoder for reranking candidate concepts, leading to improved accuracy in zero-shot and few-shot learning scenarios. ## Technical Details - **Architecture:** The model is a bi-encoder with contrastive learning, designed to generate embeddings for clinical terms, using the relational structure of medical concepts extracted from the UMLS and SNOMED-CT knowledge bases. - **Training Strategy:** ClinLinker-KB-P was trained with a hierarchical relationship structure, incorporating **parent** nodes from medical knowledge graphs to enhance the embeddings’ quality. The training process also utilizes hard negative mining techniques to optimize candidate retrieval. ## Usage Users can utilize our pre-trained model in several ways: - By using the provided **FaissEncoder** class to perform efficient entity linking with FAISS-based search. - By training their own Bi-encoder model for medical entity linking using our framework available on GitHub: [https://github.com/ICB-UMA/ClinLinker-KB](https://github.com/ICB-UMA/ClinLinker-KB) - Alternatively, users can load the model directly with Hugging Face’s `AutoModel` and `AutoTokenizer` for flexible integration in custom pipelines: ```python from transformers import AutoModel, AutoTokenizer model = AutoModel.from_pretrained("ICB-UMA/ClinLinker-KB-P") tokenizer = AutoTokenizer.from_pretrained("ICB-UMA/ClinLinker-KB-P") ## Limitations - **Language Restriction:** ClinLinker-KB-GP is currently optimized for Spanish clinical corpora. - **Expert Supervision:** While the model shows high accuracy in entity linking tasks, it is designed to assist semi-automated systems, requiring expert supervision for final validation. ## Citation If you use ClinLinker-KB-GP in your research, please cite the following: ```bibtex @misc{gallego2024clinlinker, title={ClinLinker: Medical Entity Linking of Clinical Concept Mentions in Spanish}, author={Fernando Gallego and Guillermo López-García and Luis Gasco-Sánchez and Martin Krallinger and Francisco J. Veredas}, year={2024}, eprint={2404.06367}, archivePrefix={arXiv}, primaryClass={cs.CL} }
[ "DISTEMIST", "MEDICAL DATA", "SYMPTEMIST" ]
chau9ho/marbles
chau9ho
text-to-image
[ "diffusers", "text-to-image", "flux", "lora", "template:sd-lora", "ai-toolkit", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
2024-10-25T06:33:05Z
2024-10-25T06:46:02+00:00
23
1
--- base_model: black-forest-labs/FLUX.1-dev license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md tags: - text-to-image - flux - lora - diffusers - template:sd-lora - ai-toolkit widget: - text: A sophisticated toy poodle with apricot-colored fur, expertly groomed into a round teddy bear cut, wearing a lime green silk bow tie, lounging in a French garden conservatory, soft bokeh background with blooming blue hydrangeas, professional portrait lighting, shallow depth of field, shot on Canon EOS R5, marbles_hk output: url: samples/1729837920462__000001200_0.jpg - text: Whimsical portrait of a miniature poodle as a Victorian aristocrat, fluffy cream-colored coat perfectly styled, emerald silk bow at neck, reclining on a velvet fainting couch, surrounded by English cottage garden flowers, dreamy afternoon light, studio photography, f/2.8 aperture, marbles_hk output: url: samples/1729837951296__000001200_1.jpg - text: 'High-fashion pet editorial: caramel-toned toy poodle with cloud-like fur texture, sporting a spring green designer bow, posed against an impressionist garden backdrop, blue hydrangeas adding pop of color, soft natural lighting with gentle shadows, magazine-style composition, shot on medium format digital, marbles_hk' output: url: samples/1729837982097__000001200_2.jpg instance_prompt: marbles_hk --- # marbles Model trained with [AI Toolkit by Ostris](https://github.com/ostris/ai-toolkit) <Gallery /> ## Trigger words You should use `marbles_hk` to trigger the image generation. ## Download model and use it with ComfyUI, AUTOMATIC1111, SD.Next, Invoke AI, etc. Weights for this model are available in Safetensors format. [Download](/chau9ho/marbles/tree/main) them in the Files & versions tab. ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers) ```py from diffusers import AutoPipelineForText2Image import torch pipeline = AutoPipelineForText2Image.from_pretrained('black-forest-labs/FLUX.1-dev', torch_dtype=torch.bfloat16).to('cuda') pipeline.load_lora_weights('chau9ho/marbles', weight_name='marbles.safetensors') image = pipeline('A sophisticated toy poodle with apricot-colored fur, expertly groomed into a round teddy bear cut, wearing a lime green silk bow tie, lounging in a French garden conservatory, soft bokeh background with blooming blue hydrangeas, professional portrait lighting, shallow depth of field, shot on Canon EOS R5, marbles_hk').images[0] image.save("my_image.png") ``` For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
[ "BEAR" ]
FallenMerick/MN-Chunky-Lotus-12B
FallenMerick
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "storywriting", "text adventure", "creative", "story", "writing", "fiction", "roleplaying", "rp", "mergekit", "merge", "en", "arxiv:2306.01708", "base_model:Epiculous/Violet_Twilight-v0.2", "base_model:merge:Epiculous/Violet_Twilight-v0.2", "base_model:TheDrummer/Rocinante-12B-v1.1", "base_model:merge:TheDrummer/Rocinante-12B-v1.1", "base_model:flammenai/Mahou-1.5-mistral-nemo-12B", "base_model:merge:flammenai/Mahou-1.5-mistral-nemo-12B", "base_model:nbeerbower/mistral-nemo-gutenberg-12B-v4", "base_model:merge:nbeerbower/mistral-nemo-gutenberg-12B-v4", "license:cc-by-4.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-11-07T23:42:28Z
2024-11-09T19:19:13+00:00
23
9
--- base_model: - TheDrummer/Rocinante-12B-v1.1 - Epiculous/Violet_Twilight-v0.2 - nbeerbower/mistral-nemo-gutenberg-12B-v4 - flammenai/Mahou-1.5-mistral-nemo-12B language: - en library_name: transformers license: cc-by-4.0 tags: - storywriting - text adventure - creative - story - writing - fiction - roleplaying - rp - mergekit - merge --- ![pic](https://huggingface.co/FallenMerick/MN-Chunky-Lotus-12B/resolve/main/chunky-lotus.jpg) # MN-Chunky-Lotus-12B I had originally planned to use this model for future/further merges, but decided to go ahead and release it since it scored rather high on my local EQ Bench testing (79.58 w/ 100% parsed @ 8-bit). </br> Bear in mind that most models tend to score a bit higher on my own local tests as compared to their posted scores. Still, its the highest score I've personally seen from all the models I've tested. </br> Its a decent model, with great emotional intelligence and acceptable adherence to various character personalities. It does a good job at roleplaying despite being a bit bland at times. </br> </br> Overall, I like the way it writes, but it has a few formatting issues that show up from time to time, and it has an uncommon tendency to paste walls of character feelings/intentions at the end of some outputs without any prompting. This is something I hope to correct with future iterations. </br> </br> This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). GGUF quants: * https://huggingface.co/backyardai/MN-Chunky-Lotus-12B-GGUF * https://huggingface.co/mradermacher/MN-Chunky-Lotus-12B-GGUF * https://huggingface.co/mradermacher/MN-Chunky-Lotus-12B-i1-GGUF * https://huggingface.co/FallenMerick/MN-Chunky-Lotus-12B-GGUF ## Merge Details ### Merge Method This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method. ### Models Merged The following models were included in the merge: * [Epiculous/Violet_Twilight-v0.2](https://huggingface.co/Epiculous/Violet_Twilight-v0.2) * [nbeerbower/mistral-nemo-gutenberg-12B-v4](https://huggingface.co/nbeerbower/mistral-nemo-gutenberg-12B-v4) * [flammenai/Mahou-1.5-mistral-nemo-12B](https://huggingface.co/flammenai/Mahou-1.5-mistral-nemo-12B) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: Epiculous/Violet_Twilight-v0.2 parameters: weight: 1.0 density: 1.0 - model: nbeerbower/mistral-nemo-gutenberg-12B-v4 parameters: weight: 1.0 density: 0.54 - model: flammenai/Mahou-1.5-mistral-nemo-12B parameters: weight: 1.0 density: 0.26 merge_method: ties base_model: TheDrummer/Rocinante-12B-v1.1 parameters: normalize: true dtype: bfloat16 ``` The idea behind this recipe was to take the long-form writing capabilities of Gutenberg, curtail it a bit with the very short output formatting of Mahou, and use Violet Twilight as an extremely solid roleplaying foundation underneath. </br> Rocinante is used as the base model in this merge in order to really target the delta weights from Gutenberg, since those seemed to have the highest impact on the resulting EQ of the model. </br> </br> Special shoutout to [@matchaaaaa](https://huggingface.co/matchaaaaa) for helping with testing, and for all the great model recommendations. Also, for just being an all around great person who's really inspired and motivated me to continue merging and working on models.
[ "BEAR" ]
Robertsowasp/bert-finetuned-squad
Robertsowasp
question-answering
[ "transformers", "tensorboard", "safetensors", "bert", "question-answering", "generated_from_trainer", "base_model:google-bert/bert-base-cased", "base_model:finetune:google-bert/bert-base-cased", "license:apache-2.0", "endpoints_compatible", "region:us" ]
2024-11-19T02:54:15Z
2024-11-30T19:07:30+00:00
23
0
--- base_model: bert-base-cased library_name: transformers license: apache-2.0 tags: - generated_from_trainer widget: - text: What's my name, eh? context: My name is Jason and I live in Phoenix. example_title: Name - text: Where do I live? context: My name is Sarah and I live in London example_title: Location - text: What does Chhauni Silkhana mean? context: The National Museum is located in the western part of Kathmandu, near the Swayambhunath stupa in an historical building. This building was constructed in the early 19th century by General Bhimsen Thapa. It is the most important museum in the country, housing an extensive collection of weapons, art and antiquities of historic and cultural importance. The museum was established in 1928 as a collection house of war trophies and weapons, and the initial name of this museum was Chhauni Silkhana, meaning 'the stone house of arms and ammunition'. Given its focus, the museum contains many weapons, including locally made firearms used in wars, leather cannons from the 18th–19th century, and medieval and modern works in wood, bronze, stone and paintings. example_title: Katmandu - text: What are some diseases which won't benefit from PCR methods? context: Thus, the technological ability to detect any infectious agent rapidly and specifically are currently available. The only remaining blockades to the use of PCR as a standard tool of diagnosis are in its cost and application, neither of which is insurmountable. The diagnosis of a few diseases will not benefit from the development of PCR methods, such as some of the clostridial diseases (tetanus and botulism). These diseases are fundamentally biological poisonings by relatively small numbers of infectious bacteria that produce extremely potent neurotoxins. A significant proliferation of the infectious agent does not occur, this limits the ability of PCR to detect the presence of any bacteria. example_title: PCR methods - text: When is the action? context: On September 27, 2010, Public Safety Canada partnered with STOP.THINK.CONNECT, a coalition of non-profit, private sector, and government organizations dedicated to informing the general public on how to protect themselves online. On February 4, 2014, the Government of Canada launched the Cyber Security Cooperation Program. The program is a $1.5 million five-year initiative aimed at improving Canada’s cyber systems through grants and contributions to projects in support of this objective. Public Safety Canada aims to begin an evaluation of Canada's Cyber Security Strategy in early 2015. Public Safety Canada administers and routinely updates the GetCyberSafe portal for Canadian citizens, and carries out Cyber Security Awareness Month during October. example_title: Canada cyber model-index: - name: bert-finetuned-squad results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # bert-finetuned-squad This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased) on an unknown dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: linear - num_epochs: 3 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.46.2 - Pytorch 2.5.1+cu121 - Datasets 3.1.0 - Tokenizers 0.20.3
[ "PCR" ]
thomaskim1130/stella_en_400M_v5-FinanceRAG
thomaskim1130
sentence-similarity
[ "sentence-transformers", "safetensors", "new", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:2256", "loss:MultipleNegativesRankingLoss", "custom_code", "arxiv:1908.10084", "arxiv:1705.00652", "base_model:dunzhang/stella_en_400M_v5", "base_model:finetune:dunzhang/stella_en_400M_v5", "doi:10.57967/hf/3681", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-11-25T20:05:22Z
2024-11-25T20:06:18+00:00
23
3
--- base_model: dunzhang/stella_en_400M_v5 library_name: sentence-transformers metrics: - cosine_accuracy@1 - cosine_accuracy@3 - cosine_accuracy@5 - cosine_accuracy@10 - cosine_precision@1 - cosine_precision@3 - cosine_precision@5 - cosine_precision@10 - cosine_recall@1 - cosine_recall@3 - cosine_recall@5 - cosine_recall@10 - cosine_ndcg@10 - cosine_mrr@10 - cosine_map@100 - dot_accuracy@1 - dot_accuracy@3 - dot_accuracy@5 - dot_accuracy@10 - dot_precision@1 - dot_precision@3 - dot_precision@5 - dot_precision@10 - dot_recall@1 - dot_recall@3 - dot_recall@5 - dot_recall@10 - dot_ndcg@10 - dot_mrr@10 - dot_map@100 pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:2256 - loss:MultipleNegativesRankingLoss widget: - source_sentence: "Instruct: Given a web search query, retrieve relevant passages\ \ that answer the query.\nQuery: Title: \nText: by what percentage did the aeco\ \ natural gas sales index decline from 2011 to 2013?" sentences: - "Title: \nText: | | 2017 | 2016 |\n| Projected benefit obligation | $74,953 |\ \ $76,586 |\n| Accumulated benefit obligation | 71,975 | 74,081 |\n| Fair value\ \ of plan assets | $58,353 | $56,530 |\nAssumptions The following assumptions,\ \ which are the weighted average for all plans, are used to calculate the benefit\ \ obligation at December 31 of each year and the net periodic benefit cost for\ \ the subsequent year." - "Title: \nText: discount to Brent was narrower in 2013 than in 2012 and 2011.\n\ As a result of the significant increase in U. S. production of light sweet crude\ \ oil, the historical relationship between WTI, Brent and LLS pricing may not\ \ be indicative of future periods.\nComposition – The proportion of our liquid\ \ hydrocarbon sales volumes that are NGLs continues to increase due to our development\ \ of United States unconventional liquids-rich plays.\nNGLs were 15 percent of\ \ our North America E&P liquid hydrocarbon sales volumes in 2013 compared to 10\ \ percent in 2012 and 7 percent in 2011.\nNatural gas – A significant portion\ \ of our natural gas production in the U. S. is sold at bid-week prices, or first-of-month\ \ indices relative to our specific producing areas.\nAverage Henry Hub settlement\ \ prices for natural gas were 31 percent higher for 2013 than for 2012. International\ \ E&P Liquid hydrocarbons – Our International E&P crude oil production is relatively\ \ sweet and has historically sold in relation to the Brent crude benchmark, which\ \ on average was 3 percent lower for 2013 than 2012.\nNatural gas – Our major\ \ International E&P natural gas-producing regions are Europe and E. G. Natural\ \ gas prices in Europe have been considerably higher than the U. S. in recent\ \ years.\nIn the case of E. G. , our natural gas sales are subject to term contracts,\ \ making realized prices in these areas less volatile.\nThe natural gas sales\ \ from E. G. are at fixed prices; therefore, our reported average International\ \ E&P natural gas realized prices may not fully track market price movements." - "Title: \nText: american tower corporation and subsidiaries notes to consolidated\ \ financial statements towerco ghana for an agreed purchase price of up to approximately\ \ $ 430 million , of which the company will pay up to approximately $ 220 million\ \ for its 51% ( 51 % ) stake in the holding company .\nmtn ghana will be the anchor\ \ tenant , on commercial terms , on each of the towers being purchased .\nthe\ \ company also expects that towerco ghana will build at least an additional 400\ \ sites for both mtn ghana and other wireless operators in ghana over the next\ \ five years .\nthe company expects to close on an initial tranche of towers in\ \ the first half of 2011 , subject to customary closing conditions .\n6 .\nlong-term\ \ obligations outstanding amounts under the company 2019s long-term financing\ \ arrangements consist of the following as of december 31 , ( in thousands ) :\ \ .\n\n | 2010 \ \ | 2009 \n-----------------------------------------------------------\ \ | ---------------- | ----------------\ncommercial mortgage pass-through certificates\ \ series 2007-1 | $ 1750000 | $ 1750000 \nrevolving credit facility\ \ | 300000 | 550000 \nterm\ \ loan | 325000 |\ \ 325000 \nxcel credit facility \ \ | 2014 | 73367 \ncolombian short-term credit facility\ \ | 72889 | 2014 \n4.50% ( 4.50 %\ \ ) senior notes | 999216 | 2014 \ \ \n5.05% ( 5.05 % ) senior notes | 699186\ \ | 2014 \n4.625% ( 4.625 % ) senior notes \ \ | 599346 | 599210 \n7.00% ( 7.00 % ) senior\ \ notes | 500000 | 500000 \n\ 7.25% ( 7.25 % ) senior notes | 295420 \ \ | 295038 \n5.0% ( 5.0 % ) convertible notes \ \ | 2014 | 59683 \n7.25% ( 7.25 % ) senior subordinated\ \ notes | 2014 | 288 \nnotes payable\ \ and capital leases | 46331 | 58995 \ \ \ntotal | 5587388\ \ | 4211581 \nless current portion of long term obligations \ \ | -74896 ( 74896 ) | -70521 ( 70521 )\nlong-term obligations \ \ | $ 5512492 | $ 4141060 \n\n\ commercial mortgage pass-through certificates , series 2007-1 2014during the year\ \ ended december 31 , 2007 , the company completed a securitization transaction\ \ ( the 201csecuritization 201d ) involving assets related to 5295 broadcast and\ \ wireless communications towers ( the 201csecured towers 201d ) owned by two\ \ special purpose subsidiaries of the company , through a private offering of\ \ $ 1.75 billion of commercial mortgage pass-through certificates , series 2007-1\ \ ( the 201ccertificates 201d ) .\nthe certificates were issued by american tower\ \ trust i ( the trust ) , a trust established by american tower depositor sub\ \ , llc ( the 201cdepositor 201d ) , an indirect wholly owned special purpose\ \ subsidiary of the company .\nthe assets of the trust consist of a recourse loan\ \ ( the 201cloan 201d ) initially made by the depositor to american tower asset\ \ sub , llc and american tower asset sub ii , llc ( the 201cborrowers 201d ) ,\ \ pursuant to a loan and security agreement among the foregoing parties dated\ \ as of may 4 , 2007 ( the 201cloan agreement 201d ) .\nthe borrowers are special\ \ purpose entities formed solely for the purpose of holding the secured towers\ \ subject to the securitization .\nthe certificates were issued in seven separate\ \ classes , comprised of class a-fx , class a-fl , class b , class c , class d\ \ , class e and class f .\neach of the certificates in classes b , c , d , e and\ \ f are subordinated in right of payment to any other class of certificates which\ \ has an earlier alphabetical designation .\nthe certificates were issued with\ \ terms identical to the loan except for the class a-fl certificates , which bear\ \ interest at a floating " - source_sentence: "Instruct: Given a web search query, retrieve relevant passages\ \ that answer the query.\nQuery: Title: \nText: In the year with lowest amount\ \ of Interest in table 2, what's the increasing rate of Operating leases in table\ \ 2?" sentences: - "Title: \nText: Management Discussion and Analysis\nAssociates\n\"nm\" denotes\ \ not meaningful.\nNotes: (1) Based on Singapore Financial Reporting Standards\ \ (International).\n(2) Assuming constant exchange rates for the regional currencies\ \ (Indian Rupee, Indonesian Rupiah, Philippine Peso and Thai Baht) from FY 2018.\n\ (3) Share of results excluded the Group’s share of the associates’ significant\ \ one-off items which have been classified as exceptional items of the Group.\n\ (4) Singtel holds an equity interest of 21.0% in Intouch which has an equity interest\ \ of 40.5% in AIS.\n(5) Bharti Telecom Limited (BTL) holds an equity interest\ \ of 50.1% in Airtel as at 31 March 2019. In BTL’s standalone books, its loss\ \ comprised mainly interest charges on its borrowings.\n(5) Bharti Telecom Limited\ \ (BTL) holds an equity interest of 50.1% in Airtel as at 31 March 2019. In BTL’s\ \ standalone books, its loss comprised mainly interest charges on its borrowings.\n\ (6) Singtel ceased to own units in NetLink Trust following the sale to NetLink\ \ NBN Trust in July 2017 but continues to have an interest of 24.8% in NetLink\ \ NBN Trust, the holding company of NetLink Trust. The share of results included\ \ Singtel’s amortisation of deferred gain of S$20 million (FY 2018: S$32 million)\ \ on assets previously transferred to NetLink Trust, but excluded the fair value\ \ adjustments recorded by NetLink NBN Trust in respect of its acquisition of units\ \ in NetLink Trust.\n(7) Include the share of results of Singapore Post Limited.\n\ \n | Financial Year ended 31 March\ \ | | | \n----------------------------------------------\ \ | ----------------------------- | ------------ | ------ | -------------------------------\n\ \ | 2019 \ \ | 2018 | Change | Change in constant currency (2)\n \ \ | (S$ million) | (S$ million)\ \ | (%) | (%) \nGroup share of associates' pre-tax\ \ profits (3) | 1,536 | 2,461 | -37.6 | -36.2\ \ \nShare of post-tax profits |\ \ | | | \ \ \nTelkomsel | 843 \ \ | 1,031 | -18.3 | -12.4 \n\ AIS | 286 \ \ | 292 | -1.7 | -3.9 \nGlobe (3) \ \ | | \ \ | | \n- ordinary results \ \ | 251 | 180 | 39.3 | 45.3\ \ \n- exceptional items |\ \ - | 22 | nm | nm \ \ \n | 251 \ \ | 202 | 23.9 | 29.1 \n\ Intouch (3) (4) | \ \ | | | \n- operating results\ \ | 101 | 106 |\ \ -4.4 | -6.5 \n- amortisation of acquired intangibles\ \ | (22) | (21) | 8.3 | 5.9 \ \ \n | 79 \ \ | 86 | -7.5 | -9.5 \ \ \nAirtel (3) | (131) \ \ | 101 | nm | nm \nBTL (5)\ \ | (40) | (18)\ \ | 127.8 | 140.9 \n \ \ | (171) | 83 | nm \ \ | nm \nRegional associates (3) \ \ | 1,287 | 1,694 | -24.0 | -21.5 \ \ \nNetLink NBN Trust/ NetLink Trust (6) | 48\ \ | 72 | -32.9 | -32.9 \ \ \nOther associates (3) (7) | 47 \ \ | 57 | -17.6 | -17.6 \nGroup\ \ share of associates’ post-tax profits (3) | 1,383 | 1,823\ \ | -24.1 | -21.8 " - "Title: \nText: Long-term product offerings include alpha-seeking active and index\ \ strategies.\nOur alpha-seeking active strategies seek to earn attractive returns\ \ in excess of a market benchmark or performance hurdle while maintaining an appropriate\ \ risk profile, and leverage fundamental research and quantitative models to drive\ \ portfolio construction.\nIn contrast, index strategies seek to closely track\ \ the returns of a corresponding index, generally by investing in substantially\ \ the same underlying securities within the index or in a subset of those securities\ \ selected to approximate a similar risk and return profile of the index.\nIndex\ \ strategies include both our non-ETF index products and iShares ETFs.\nAlthough\ \ many clients use both alpha-seeking active and index strategies, the application\ \ of these strategies may differ.\nFor example, clients may use index products\ \ to gain exposure to a market or asset class, or may use a combination of index\ \ strategies to target active returns.\nIn addition, institutional non-ETF index\ \ assignments tend to be very large (multi-billion dollars) and typically reflect\ \ low fee rates.\nNet flows in institutional index products generally have a small\ \ impact on BlackRock’s revenues and earnings.\nEquity Year-end 2017 equity AUM\ \ totaled $3.372 trillion, reflecting net inflows of $130.1 billion.\nNet inflows\ \ included $174.4 billion into iShares ETFs, driven by net inflows into Core funds\ \ and broad developed and emerging market equities, partially offset by non-ETF\ \ index and active net outflows of $25.7 billion and $18.5 billion, respectively.\n\ BlackRock’s effective fee rates fluctuate due to changes in AUM mix.\nApproximately\ \ half of BlackRock’s equity AUM is tied to international markets, including emerging\ \ markets, which tend to have higher fee rates than U. S. equity strategies.\n\ Accordingly, fluctuations in international equity markets, which may not consistently\ \ move in tandem with U. S. markets, have a greater impact on BlackRock’s equity\ \ revenues and effective fee rate.\nFixed Income Fixed income AUM ended 2017 at\ \ $1.855 trillion, reflecting net inflows of $178.8 billion.\nIn 2017, active\ \ net inflows of $21.5 billion were diversified across fixed income offerings,\ \ and included strong inflows into municipal, unconstrained and total return bond\ \ funds.\niShares ETFs net inflows of $67.5 billion were led by flows into Core,\ \ corporate and treasury bond funds.\nNon-ETF index net inflows of $89.8 billion\ \ were driven by demand for liability-driven investment solutions.\nMulti-Asset\ \ BlackRock’s multi-asset team manages a variety of balanced funds and bespoke\ \ mandates for a diversified client base that leverages our broad investment expertise\ \ in global equities, bonds, currencies and commodities, and our extensive risk\ \ management capabilities.\nInvestment solutions might include a combination of\ \ long-only portfolios and alternative investments as well as tactical asset allocation\ \ overlays.\nComponent changes in multi-asset AUM for 2017 are presented below." - "Title: PEPSICO_2022_10K\nText: Forward-Looking Statements\nThis Annual Report\ \ on Form 10-K contains statements reflecting our views about our future performance\ \ that constitute\nforward-looking statements within the meaning of the Private\ \ Securities Litigation Reform Act of 1995 (Reform Act).\nStatements that constitute\ \ forward-looking statements within the meaning of the Reform Act are generally\ \ identified through the\ninclusion of words such as aim, anticipate, believe,\ \ drive, estimate, expect, expressed confidence, forecast,\nfuture, goal, guidance,\ \ intend, may, objective, outlook, plan, position, potential, project, seek,\n\ should, strategy, target, will or similar statements or variations of such words\ \ and other similar expressions. All\nstatements addressing our future operating\ \ performance, and statements addressing events and developments that we expect\ \ or\nanticipate will occur in the future, are forward-looking statements within\ \ the meaning of the Reform Act. These forward-looking\nstatements are based on\ \ currently available information, operating plans and projections about future\ \ events and trends. They\ninherently involve risks and uncertainties that could\ \ cause actual results to differ materially from those predicted in any such\n\ forward-looking statement. These risks and uncertainties include, but are not\ \ limited to, those described in Item 1A. Risk\nFactors and Item 7. Managements\ \ Discussion and Analysis of Financial Condition and Results of Operations Our\ \ Business\n Our Business Risks. Investors are cautioned not to place undue reliance\ \ on any such forward-looking statements, which speak\nonly as of the date they\ \ are made. We undertake no obligation to update any forward-looking statement,\ \ whether as a result of\nnew information, future events or otherwise. The discussion\ \ of risks in this report is by no means all-inclusive but is designed to\nhighlight\ \ what we believe are important factors to consider when evaluating our future\ \ performance.\nPART I\nItem 1. Business.\nWhen used in this report, the terms\ \ we, us, our, PepsiCo and the Company mean PepsiCo, Inc. and its consolidated\n\ subsidiaries, collectively. Certain terms used in this Annual Report on Form 10-K\ \ are defined in the Glossary included in Item 7.\nof this report.\nCompany Overview\n\ We were incorporated in Delaware in 1919 and reincorporated in North Carolina\ \ in 1986. We are a leading global beverage and\nconvenient food company with\ \ a complementary portfolio of brands, including Lays, Doritos, Cheetos, Gatorade,\ \ Pepsi-Cola,\nMountain Dew, Quaker and SodaStream. Through our operations, authorized\ \ bottlers, contract manufacturers and other third\nparties, we make, market,\ \ distribute and sell a wide variety of beverages and convenient foods, serving\ \ customers and consumers\nin more than 200 countries and territories.\nOur Operations\n\ We are organized into seven reportable segments (also referred to as divisions),\ \ as follows:\n1) Frito-Lay North America (FLNA), which includes our branded convenient\ \ food businesses in the United States and\nCanada;\n2) Quaker Foods North America\ \ (QFNA), which includes our branded convenient food businesses, such as cereal,\ \ rice, pasta\nand other branded food, in the United States and Canada;\n3) PepsiCo\ \ Beverages North America (PBNA), which includes our beverage businesses in the\ \ United States and Canada;\n4) Latin America (LatAm), which includes all of our\ \ beverage and convenient food businesses in Latin America;\n5) Europe, which\ \ includes all of our beverage and convenient food businesses in Europe;" - source_sentence: "Instruct: Given a web search query, retrieve relevant passages\ \ that answer the query.\nQuery: Title: \nText: What was the rate of compensation\ \ increase in 2019?" sentences: - "Title: \nText: Assumptions\nWeighted-average actuarial assumptions used to determine\ \ net periodic benefit cost and projected benefit obligation for the plans for\ \ the fiscal years 2019, 2018 and 2017 were as follows:\n(1) The expected return\ \ on plan assets assumption used in calculating net periodic benefit cost is based\ \ on historical return experience and estimates of future long-term performance\ \ with consideration to the expected investment mix of the plan.\n(2) The discount\ \ rate is used to state expected cash flows relating to future benefits at a present\ \ value on the measurement date. This rate represents the market rate for high-quality\ \ fixed income investments whose timing would match the cash outflow of retirement\ \ benefits. Other assumptions include demographic factors such as retirement,\ \ mortality and turnover.\n\n | \ \ | Pension | \n------------------------------------------- | ---- | -------\ \ | ----\n | 2019 | 2018 | 2017\n\ Net periodic benefit cost: | | | \nExpected\ \ long-term return on plan assets(1) | 3.6% | 3.8% | 3.3%\nRate of compensation\ \ increase | 4.4% | 3.3% | 2.7%\nDiscount rate \ \ | 2.2% | 2.1% | 1.9%\nProjected benefit obligation: \ \ | | | \nExpected long-term return on plan assets \ \ | 2.0% | 3.6% | 4.0%\nRate of compensation increase | 4.3%\ \ | 4.4% | 4.4%\nDiscount rate(2) | 1.7% | 2.2%\ \ | 2.3%" - "Title: \nText: decentralized business model .\nour business segments are focused\ \ on distinct product categories and are responsible for their own performance\ \ .\nthis structure enables each of our segments to independently best position\ \ itself within each category in which it competes and reinforces strong accountability\ \ for operational and financial performance .\neach of our segments focuses on\ \ its unique set of consumers , customers , competitors and suppliers , while\ \ also sharing best practices .\nstrong capital structure .\nwe exited 2017 with\ \ a strong balance sheet .\nin 2017 , we repurchased 3.4 million of our shares\ \ .\nas of december 31 , 2017 , we had $ 323.0 million of cash and cash equivalents\ \ and total debt was $ 1507.6 million , resulting in a net debt position of $\ \ 1184.6 million .\nin addition , we had $ 635.0 million available under our credit\ \ facility as of december 31 , 2017 .\nbusiness segments we have four business\ \ segments : cabinets , plumbing , doors and security .\nthe following table shows\ \ net sales for each of these segments and key brands within each segment : segment\ \ net sales ( in millions ) percentage of total 2017 net sales key brands cabinets\ \ $ 2467.1 47% ( 47 % ) aristokraft , diamond , mid-continent , kitchen craft\ \ , schrock , homecrest , omega , thomasville ( a ) , kemper , starmark , ultracraft\ \ plumbing 1720.8 33% ( 33 % ) moen , rohl , riobel , perrin & rowe , victoria\ \ + albert , shaws , waste king .\n\nsegment | 2017net sales ( in millions )\ \ | percentage of total 2017 net sales | key brands \ \ \ \ \n-------- | ----------------------------- | ----------------------------------\ \ | ------------------------------------------------------------------------------------------------------------------\n\ cabinets | $ 2467.1 | 47% ( 47 % ) \ \ | aristokraft diamondmid-continentkitchen craft schrock homecrest omega thomasville\ \ ( a ) kemper starmark ultracraft\nplumbing | 1720.8 |\ \ 33% ( 33 % ) | moen rohl riobel perrin & rowe victoria\ \ + albert shaws waste king \n\ doors | 502.9 | 9% ( 9 % ) \ \ | therma-trufypon \ \ \nsecurity | 592.5 \ \ | 11% ( 11 % ) | master lock american lock sentrysafe\ \ \ \ \ntotal | $ 5283.3 | 100% ( 100 % ) \ \ | \ \ \n\n( a ) thomasville is a registered\ \ trademark of hhg global designs llc .\nour segments compete on the basis of\ \ innovation , fashion , quality , price , service and responsiveness to distributor\ \ , retailer and installer needs , as well as end-user consumer preferences .\n\ our markets are very competitive .\napproximately 15% ( 15 % ) of 2017 net sales\ \ were to international markets , and sales to two of the company 2019s customers\ \ , the home depot , inc .\n( 201cthe home depot 201d ) and lowe 2019s companies\ \ , inc .\n( 201clowe 2019s 201d ) , each accounted for more than 10% ( 10 % )\ \ of the company 2019s net sales in 2017 .\nsales to all u.s .\nhome centers in\ \ the aggregate were approximately 27% ( 27 % ) of net sales in 2017 .\ncabinets\ \ .\nour cabinets segment manufactures custom , semi-custom and stock cabinetry\ \ , as well as vanities , for the kitchen , bath and other parts of the home through\ \ a regional supply chain footprint to deliver high quality and service to our\ \ customers .\nthis segment sells a portfolio of brands that enables our customers\ \ to differentiate themselves against competitors .\nthis portfolio includes brand\ \ names such as aristokraft , diamond , mid-continent , kitchen craft , schrock\ \ , homecrest , omega , thomasville , kemper , starmark and ultracraft .\nsubstantially\ \ all of this segment 2019s sales are in north america .\nthis segment sells directly\ \ to kitchen and bath dealers , home centers , wholesalers and large builders\ \ .\nin aggregate , sales to the home depot and lowe 2019s comprised approximately\ \ 34% ( 34 % ) of net sales of the cabinets segment in 2017 .\nthis segment 2019s\ \ competitors include masco , american woodmark and rsi ( owned by american woodmark\ \ ) , as well as a large number of regional and local suppliers .\nplumbing .\n\ our plumbing segment manufactures or assembles and sells faucets , accessories\ \ , kitchen sinks and waste disposals in north america and china , predominantly\ \ under the moen , rohl , riobel , perrin & rowe , victoria + albert , shaws and\ \ waste king brands .\nalthough this segment sells products principally in the\ \ u.s. , canada and china , this segment also sells in mexico , southeast asia\ \ , europe and " - "Title: \nText: Performance Graph The annual changes for the period shown December\ \ 1, 2013 (when our ordinary shares began trading) to December 31, 2017 in the\ \ graph on this page are based on the assumption that $100 had been invested in\ \ Allegion plc ordinary shares, the Standard & Poor’s 500 Stock Index (\"S&P 500\"\ ) and the Standard & Poor's 400 Capital Goods Index (\"S&P 400 Capital Goods\"\ ) on December 1, 2013, and that all quarterly dividends were reinvested.\nThe\ \ total cumulative dollar returns shown on the graph represent the value that\ \ such investments would have had on December 31, 2017." - source_sentence: "Instruct: Given a web search query, retrieve relevant passages\ \ that answer the query.\nQuery: Title: \nText: In which years is Other comprehensive\ \ income before reclassifications greater than Net current-period other comprehensive\ \ income (for Changes related to cash flow derivative hedges)?" sentences: - "Title: \nText: NOTE 5.\nPROPERTY AND EQUIPMENT Property and equipment consists\ \ of the following (in thousands):\n| | 2015 | 2014 |\n| Ships | $22,102,025\ \ | $21,620,336 |\n| Ship improvements | 2,019,294 | 1,904,524 |\n| Ships under\ \ construction | 734,998 | 561,779 |\n| Land, buildings and improvements, including\ \ leasehold improvements and port facilities | 337,109 | 303,394 |\n| Computer\ \ hardware and software, transportation equipment and other | 1,025,264 | 889,579\ \ |\n| Total property and equipment | 26,218,690 | 25,279,612 |\n| Less—accumulated\ \ depreciation and amortization | -7,440,912 | -7,085,985 |\n| | $18,777,778\ \ | $18,193,627 |\nShips under construction include progress payments for the\ \ construction of new ships as well as planning, design, interest and other associated\ \ costs.\nWe capitalized interest costs of $26.5 million, $28.8 million and $17.9\ \ million for the years 2015, 2014 and 2013, respectively.\nWe review our long-lived\ \ assets for impairment whenever events or changes in circumstances indicate potential\ \ impairment.\nIn conjunction with performing the two-step goodwill impairment\ \ test for the Pullmantur reporting unit, we identified that the estimated fair\ \ value of certain long-lived assets, consisting of two ships and three aircraft\ \ were less than their carrying values.\nAs a result of this determination, we\ \ evaluated these assets pursuant to our long-lived asset impairment test.\nThe\ \ decision to significantly reduce our exposure to the Latin American market negatively\ \ impacted the expected undiscounted cash flows of these vessels and aircraft\ \ and resulted in an impairment charge of $113.2 million to write down these assets\ \ to their estimated fair values.\nThis impairment charge was recognized in earnings\ \ during the third quarter of 2015 and is reported within Impairment of Pullmantur\ \ related assets within our consolidated statements of comprehensive income (loss).\n\ Additionally, during 2013, the fair value of Pullmantur’s aircraft were determined\ \ to be less than their carrying value which led to a restructuring related impairment\ \ charge of $13.5 million.\nFurthermore, Pullmantur’s non-core businesses met\ \ the accounting criteria to be classified as held for sale during the fourth\ \ quarter of 2013 which led to restructuring related impairment charges of $18.2\ \ million to adjust the carrying value of property and equipment held for sale\ \ to its fair value, less cost to sell.\nThese impairment charges were reported\ \ within Restructuring and related impairment charges in our consolidated statements\ \ of comprehensive income (loss).\nNotes to the Consolidated Financial Statements\ \ << 84 >> | 2015 ANNUAL REPORT NOTE 13.\nCHANGES IN ACCUMULATED OTHER COMPREHENSIVE\ \ INCOME (LOSS) The following table presents the changes in accumulated other\ \ comprehensive income (loss) by component for the years ended December 31, 2015\ \ and 2014 (in thousands):\n| | Changes related to cash flow derivative hedges\ \ | Changes in definedbenefit plans | Foreign currency translation adjustments\ \ | Accumulated other comprehensive income (loss) |\n| Accumulated comprehensive\ \ loss at January 1, 2013 | $-84,505 | $-34,823 | $-15,188 | $-134,516 |\n| Other\ \ comprehensive income before reclassifications | 188,073 | 8,240 | 1,529 | 197,842\ \ |\n| Amounts reclassified from accumulated other comprehensive income (loss)\ \ | -60,244 | 2,589 | — | -57,655 |\n| Net current-period other comprehensive\ \ income | 127,829 | 10,829 | 1,529 | 140,187 |\n| Accumulated comprehensive income\ \ (loss) at January 1, 2014 | 43,324 | -23,994 | -13,659 | 5,671 |\n| Other comprehensive\ \ loss before reclassifications | -919,094 | -8,937 | -28,099 | -956,130 |\n|\ \ Amounts reclassified from accumulated other comprehensive income (loss) | 49,744\ \ | 1,724 | 1,997 | 53,465 |\n| Net current-period other comprehensive loss |\ \ -869,350 | -7,213 | -26,102 | -902,665 |\n| Accumulated comprehensive loss at\ \ January 1, 2015 | -826,026 | -31,207 | -39,761 | -896,994 |\n| Other comprehensive\ \ (loss) income before reclassifications | -697,671 | 3,053 | -25,952 | -720,570\ \ |\n| Amounts reclassified from accumulated other comprehensive income (loss)\ \ | 291,624 | 1,707 | -4,200 | 289,131 |\n| Net current-period other comprehensive\ \ (loss) income | -406,047 | 4,760 | -30,152 | -431,439 |\n| Accumulated comprehensive\ \ loss at December 31, 2015 | $-1,232,073 | $-26,447 | $-69,913 | $-1,328,433\ \ |\nThe following table presents reclassifications out of accumulated other comprehensive\ \ income (loss) for the years ended December 31, 2015 and 2014 (in thousands):" - "Title: \nText: defined contribution plan the company and certain subsidiaries\ \ have various defined contribution plans , in which all eligible employees may\ \ participate .\nin the u.s. , the 401 ( k ) plan is a contributory plan .\nmatching\ \ contributions are based upon the amount of the employees 2019 contributions\ \ .\nafter temporarily suspending all matching contributions , effective july\ \ 1 , 2010 , the company reinstated matching contributions and provides a dollar\ \ for dollar ( 100% ( 100 % ) ) match on the first 4% ( 4 % ) of employee contributions\ \ .\nthe maximum matching contribution for 2010 was pro-rated to account for the\ \ number of months remaining after the reinstatement .\nthe company 2019s expenses\ \ for material defined contribution plans for the years ended december 31 , 2012\ \ , 2011 and 2010 were $ 42 million , $ 48 million and $ 23 million , respectively\ \ .\nbeginning january 1 , 2012 , the company may make an additional discretionary\ \ 401 ( k ) plan matching contribution to eligible employees .\nfor the year ended\ \ december 31 , 2012 , the company made no discretionary matching contributions\ \ .\n8 .\nshare-based compensation plans and other incentive plans stock options\ \ , stock appreciation rights and employee stock purchase plan the company grants\ \ options to acquire shares of common stock to certain employees and to existing\ \ option holders of acquired companies in connection with the merging of option\ \ plans following an acquisition .\neach option granted and stock appreciation\ \ right has an exercise price of no less than 100% ( 100 % ) of the fair market\ \ value of the common stock on the date of the grant .\nthe awards have a contractual\ \ life of five to ten years and vest over two to four years .\nstock options and\ \ stock appreciation rights assumed or replaced with comparable stock options\ \ or stock appreciation rights in conjunction with a change in control of the\ \ company only become exercisable if the holder is also involuntarily terminated\ \ ( for a reason other than cause ) or quits for good reason within 24 months\ \ of a change in control .\nthe employee stock purchase plan allows eligible participants\ \ to purchase shares of the company 2019s common stock through payroll deductions\ \ of up to 20% ( 20 % ) of eligible compensation on an after-tax basis .\nplan\ \ participants cannot purchase more than $ 25000 of stock in any calendar year\ \ .\nthe price an employee pays per share is 85% ( 85 % ) of the lower of the\ \ fair market value of the company 2019s stock on the close of the first trading\ \ day or last trading day of the purchase period .\nthe plan has two purchase\ \ periods , the first one from october 1 through march 31 and the second one from\ \ april 1 through september 30 .\nfor the years ended december 31 , 2012 , 2011\ \ and 2010 , employees purchased 1.4 million , 2.2 million and 2.7 million shares\ \ , respectively , at purchase prices of $ 34.52 and $ 42.96 , $ 30.56 and $ 35.61\ \ , and $ 41.79 and $ 42.00 , respectively .\nthe company calculates the value\ \ of each employee stock option , estimated on the date of grant , using the black-scholes\ \ option pricing model .\nthe weighted-average estimated fair value of employee\ \ stock options granted during 2012 , 2011 and 2010 was $ 9.60 , $ 13.25 and $\ \ 21.43 , respectively , using the following weighted-average assumptions: .\n\ \n | 2012 | 2011 | 2010 \ \ \n----------------------- | ---------------- | ---------------- | ----------------\n\ expected volatility | 24.0% ( 24.0 % ) | 28.8% ( 28.8 % ) | 41.7% ( 41.7 %\ \ )\nrisk-free interest rate | 0.8% ( 0.8 % ) | 2.1% ( 2.1 % ) | 2.1% ( 2.1\ \ % ) \ndividend yield | 2.2% ( 2.2 % ) | 0.0% ( 0.0 % ) | 0.0%\ \ ( 0.0 % ) \nexpected life ( years ) | 6.1 | 6.0 |\ \ 6.1 \n\nthe company uses the implied volatility for traded options\ \ on the company 2019s stock as the expected volatility assumption required in\ \ the black-scholes model .\nthe selection of the implied volatility approach\ \ was based upon the availability of actively traded options on the company 2019s\ \ stock and the company 2019s assessment that implied volatility is more representative\ \ of future stock price trends than historical volatility .\nthe risk-free interest\ \ rate assumption is based upon the average daily closing rates during the year\ \ for u.s .\ntreasury notes that have a life which approximates the expected life\ \ of the option .\nthe dividend yield assumption is based on the company 2019s\ \ future expectation of dividend payouts .\nthe expected life of employee stock\ \ options represents the average of the contractual term of the options and the\ \ weighted-average vesting period for all option tranches .\nthe company has applied\ \ forfeiture rates , estimated based on historical data , of 13%-50% ( 13%-50\ \ % ) to the option fair values calculated by the black-scholes option pricing\ \ model .\nthese estimated forfeiture rates are applied to grants based on their\ \ remaining vesting term and may be revised in subsequent periods if actual forfeitures\ \ differ from these estimates. " - "Title: \nText: Copyright?2019 Standard & Poor's, a division of S&P Global.\n\ All rights reserved\nMetLife, Inc. Notes to the Consolidated Financial Statements\ \ \x80\x94 (Continued) Commitments Leases In accordance with industry practice,\ \ certain of the Company\x80\x99s income from lease agreements with retail tenants\ \ are contingent upon the level of the tenants\x80\x99 revenues.\nAdditionally,\ \ the Company, as lessee, has entered into various lease and sublease agreements\ \ for office space, information technology and other equipment.\nFuture minimum\ \ rental and sublease income, and minimum gross rental payments relating to these\ \ lease agreements are as follows:" - source_sentence: "Instruct: Given a web search query, retrieve relevant passages\ \ that answer the query.\nQuery: Title: \nText: what was the average for \"other\"\ \ loans held in 2012 and 2011?" sentences: - "Title: \nText: Investments and Derivative Instruments (continued) Security Unrealized\ \ Loss Aging The following tables present the Company’s unrealized loss aging\ \ for AFS securities by type and length of time the security was in a continuous\ \ unrealized loss position.\n| | December 31, 2011 |\n| | Less Than 12 Months\ \ | 12 Months or More | Total |\n| | Amortized | Fair | Unrealized | Amortized\ \ | Fair | Unrealized | Amortized | Fair | Unrealized |\n| | Cost | Value | Losses\ \ | Cost | Value | Losses | Cost | Value | Losses |\n| ABS | $629 | $594 | $-35\ \ | $1,169 | $872 | $-297 | $1,798 | $1,466 | $-332 |\n| CDOs | 81 | 59 | -22\ \ | 2,709 | 2,383 | -326 | 2,790 | 2,442 | -348 |\n| CMBS | 1,297 | 1,194 | -103\ \ | 2,144 | 1,735 | -409 | 3,441 | 2,929 | -512 |\n| Corporate [1] | 4,388 | 4,219\ \ | -169 | 3,268 | 2,627 | -570 | 7,656 | 6,846 | -739 |\n| Foreign govt./govt.\ \ agencies | 218 | 212 | -6 | 51 | 47 | -4 | 269 | 259 | -10 |\n| Municipal |\ \ 299 | 294 | -5 | 627 | 560 | -67 | 926 | 854 | -72 |\n| RMBS | 415 | 330 | -85\ \ | 1,206 | 835 | -371 | 1,621 | 1,165 | -456 |\n| U.S. Treasuries | 343 | 341\ \ | -2 | — | — | — | 343 | 341 | -2 |\n| Total fixed maturities | 7,670 | 7,243\ \ | -427 | 11,174 | 9,059 | -2,044 | 18,844 | 16,302 | -2,471 |\n| Equity securities\ \ | 167 | 138 | -29 | 439 | 265 | -174 | 606 | 403 | -203 |\n| Total securities\ \ in an unrealized loss | $7,837 | $7,381 | $-456 | $11,613 | $9,324 | $-2,218\ \ | $19,450 | $16,705 | $-2,674 |\nDecember 31, 2010\n| | December 31, 2010 |\n\ | | Less Than 12 Months | 12 Months or More | Total |\n| | Amortized | Fair\ \ | Unrealized | Amortized | Fair | Unrealized | Amortized | Fair | Unrealized\ \ |\n| | Cost | Value | Losses | Cost | Value | Losses | Cost | Value | Losses\ \ |\n| ABS | $302 | $290 | $-12 | $1,410 | $1,026 | $-384 | $1,712 | $1,316 |\ \ $-396 |\n| CDOs | 321 | 293 | -28 | 2,724 | 2,274 | -450 | 3,045 | 2,567 | -478\ \ |\n| CMBS | 556 | 530 | -26 | 3,962 | 3,373 | -589 | 4,518 | 3,903 | -615 |\n\ | Corporate | 5,533 | 5,329 | -199 | 4,017 | 3,435 | -548 | 9,550 | 8,764 | -747\ \ |\n| Foreign govt./govt. agencies | 356 | 349 | -7 | 78 | 68 | -10 | 434 | 417\ \ | -17 |\n| Municipal | 7,485 | 7,173 | -312 | 1,046 | 863 | -183 | 8,531 | 8,036\ \ | -495 |\n| RMBS | 1,744 | 1,702 | -42 | 1,567 | 1,147 | -420 | 3,311 | 2,849\ \ | -462 |\n| U.S. Treasuries | 2,436 | 2,321 | -115 | 158 | 119 | -39 | 2,594\ \ | 2,440 | -154 |\n| Total fixed maturities | 18,733 | 17,987 | -741 | 14,962\ \ | 12,305 | -2,623 | 33,695 | 30,292 | -3,364 |\n| Equity securities | 53 | 52\ \ | -1 | 637 | 506 | -131 | 690 | 558 | -132 |\n| Total securities in an unrealized\ \ loss | $18,786 | $18,039 | $-742 | $15,599 | $12,811 | $-2,754 | $34,385 | $30,850\ \ | $-3,496 |\n[1] Unrealized losses exclude the change in fair value of bifurcated\ \ embedded derivative features of certain securities.\nSubsequent changes in fair\ \ value are recorded in net realized capital gains (losses).\nAs of December 31,\ \ 2011, AFS securities in an unrealized loss position, comprised of 2,549 securities,\ \ primarily related to corporate securities within the financial services sector,\ \ CMBS, and RMBS which have experienced significant price deterioration.\nAs of\ \ December 31, 2011, 75% of these securities were depressed less than 20% of cost\ \ or amortized cost.\nThe decline in unrealized losses during 2011 was primarily\ \ attributable to a decline in interest rates, partially offset by credit spread\ \ widening.\nMost of the securities depressed for twelve months or more relate\ \ to structured securities with exposure to commercial and residential real estate,\ \ as well as certain floating rate corporate securities or those securities with\ \ greater than 10 years to maturity, concentrated in the financial services sector.\n\ Current market spreads continue to be significantly wider for structured securities\ \ with exposure to commercial and residential real estate, as compared to spreads\ \ at the security’s respective purchase date, largely due to the economic and\ \ market uncertainties regarding future performance of commercial and residential\ \ real estate.\nIn addition, the majority of securities have a floating-rate coupon\ \ referenced to a market index where rates have declined substantially.\nThe Company\ \ neither has an intention to sell nor does it expect to be required to sell the\ \ securities outlined above." - "Title: \nText: LOANS HELD FOR SALE Table 15: Loans Held For Sale\n| In millions\ \ | December 312012 | December 312011 |\n| Commercial mortgages at fair value\ \ | $772 | $843 |\n| Commercial mortgages at lower of cost or market | 620 | 451\ \ |\n| Total commercial mortgages | 1,392 | 1,294 |\n| Residential mortgages at\ \ fair value | 2,096 | 1,415 |\n| Residential mortgages at lower of cost or market\ \ | 124 | 107 |\n| Total residential mortgages | 2,220 | 1,522 |\n| Other | 81\ \ | 120 |\n| Total | $3,693 | $2,936 |\nWe stopped originating commercial mortgage\ \ loans held for sale designated at fair value in 2008 and continue pursuing opportunities\ \ to reduce these positions at appropriate prices.\nAt December 31, 2012, the\ \ balance relating to these loans was $772 million, compared to $843 million at\ \ December 31, 2011.\nWe sold $32 million in unpaid principal balances of these\ \ commercial mortgage loans held for sale carried at fair value in 2012 and sold\ \ $25 million in 2011." - "Title: \nText: at december 31 , 2012 , total future minimum commitments under\ \ existing non-cancelable operat- ing leases and purchase obligations were as\ \ follows: .\n\nin millions | 2013 | 2014 | 2015 | 2016 |\ \ 2017 | thereafter\n-------------------------- | ------ | ----- | ----- | -----\ \ | ----- | ----------\nlease obligations | $ 198 | $ 136 | $ 106 |\ \ $ 70 | $ 50 | $ 141 \npurchase obligations ( a ) | 3213 | 828 | 722\ \ | 620 | 808 | 2654 \ntotal | $ 3411 | $ 964\ \ | $ 828 | $ 690 | $ 858 | $ 2795 \n\n( a ) includes $ 3.6 billion relating\ \ to fiber supply agreements entered into at the time of the company 2019s 2006\ \ transformation plan forestland sales and in conjunction with the 2008 acquis-\ \ ition of weyerhaeuser company 2019s containerboard , packaging and recycling\ \ business .\nrent expense was $ 231 million , $ 205 million and $ 210 million\ \ for 2012 , 2011 and 2010 , respectively .\nguarantees in connection with sales\ \ of businesses , property , equipment , forestlands and other assets , interna-\ \ tional paper commonly makes representations and warranties relating to such\ \ businesses or assets , and may agree to indemnify buyers with respect to tax\ \ and environmental liabilities , breaches of representations and warranties ,\ \ and other matters .\nwhere liabilities for such matters are determined to be\ \ probable and subject to reasonable estimation , accrued liabilities are recorded\ \ at the time of sale as a cost of the transaction .\nenvironmental proceedings\ \ international paper has been named as a potentially responsible party in environmental\ \ remediation actions under various federal and state laws , includ- ing the comprehensive\ \ environmental response , compensation and liability act ( cercla ) .\nmany of\ \ these proceedings involve the cleanup of hazardous substances at large commercial\ \ landfills that received waste from many different sources .\nwhile joint and\ \ several liability is authorized under cercla and equivalent state laws , as\ \ a practical matter , liability for cercla cleanups is typically allocated among\ \ the many potential responsible parties .\nremedial costs are recorded in the\ \ consolidated financial statements when they become probable and reasonably estimable\ \ .\ninternational paper has estimated the probable liability associated with\ \ these matters to be approximately $ 92 million in the aggregate at december\ \ 31 , 2012 .\none of the matters referenced above is a closed wood treating facility\ \ located in cass lake , minneso- ta .\nduring 2009 , in connection with an environmental\ \ site remediation action under cercla , international paper submitted to the\ \ epa a site remediation feasi- bility study .\nin june 2011 , the epa selected\ \ and published a proposed soil remedy at the site with an estimated cost of $\ \ 46 million .\nthe overall remediation reserve for the site is currently $ 48\ \ mil- lion to address this selection of an alternative for the soil remediation\ \ component of the overall site remedy .\nin october 2011 , the epa released a\ \ public statement indicating that the final soil remedy deci- sion would be delayed\ \ .\nin the unlikely event that the epa changes its proposed soil remedy and approves\ \ instead a more expensive clean-up alternative , the remediation costs could\ \ be material , and sig- nificantly higher than amounts currently recorded .\n\ in october 2012 , the natural resource trustees for this site provided notice\ \ to international paper and other potentially responsible parties of their intent\ \ to per- form a natural resource damage assessment .\nit is premature to predict\ \ the outcome of the assessment or to estimate a loss or range of loss , if any\ \ , which may be incurred .\nin addition to the above matters , other remediation\ \ costs typically associated with the cleanup of hazardous substances at the company\ \ 2019s current , closed or formerly-owned facilities , and recorded as liabilities\ \ in the balance sheet , totaled approximately $ 46 million at december 31 , 2012\ \ .\nother than as described above , completion of required remedial actions is\ \ not expected to have a material effect on our consolidated financial statements\ \ .\nthe company is a potentially responsible party with respect to the allied\ \ paper , inc./portage creek/ kalamazoo river superfund site ( kalamazoo river\ \ superfund site ) in michigan .\nthe epa asserts that the site is contaminated\ \ primarily by pcbs as a result of discharges from various paper mills located\ \ along the river , including a paper mill formerly owned by st .\nregis .\nthe\ \ company is a successor in interest to st .\nregis .\ninternational paper has\ \ not received any orders from the epa with respect to the site and is in the\ \ process of collecting information from the epa and other parties relative to\ \ the kalamazoo river superfund site to evaluate the extent of its liability ,\ \ if any , with respect to the site .\naccordingly , it is pre- mature to estimate\ \ a loss or range of loss with respect to this site .\nalso in connection with\ \ the kalamazoo river superfund site , the company was named as a defendant by\ \ georgia-pacific consumer products lp , fort james corporation and georgia pacific\ \ llc in a contribution and cost recovery action for alleged pollution at the\ \ kalamazoo river super- fund site .\nthe suit seeks contribution under cercla\ \ for $ 79 million in costs purportedly expended by plaintiffs as of the filing\ \ of the com- plaint , and for future remediation costs .\nthe suit alleges that\ \ a mill , during the time it was allegedly owned and operated by st .\nregis\ \ , discharged pcb contaminated solids and paper residuals resulting from paper\ \ de-inking and recycling .\nalso named as defendants in the suit are ncr corporation\ \ and weyerhaeuser company .\nin mid-2011 , the suit was " model-index: - name: SentenceTransformer based on dunzhang/stella_en_400M_v5 results: - task: type: information-retrieval name: Information Retrieval dataset: name: Evaluate type: Evaluate metrics: - type: cosine_accuracy@1 value: 0.3616504854368932 name: Cosine Accuracy@1 - type: cosine_accuracy@3 value: 0.5194174757281553 name: Cosine Accuracy@3 - type: cosine_accuracy@5 value: 0.6092233009708737 name: Cosine Accuracy@5 - type: cosine_accuracy@10 value: 0.7014563106796117 name: Cosine Accuracy@10 - type: cosine_precision@1 value: 0.3616504854368932 name: Cosine Precision@1 - type: cosine_precision@3 value: 0.17880258899676374 name: Cosine Precision@3 - type: cosine_precision@5 value: 0.12669902912621356 name: Cosine Precision@5 - type: cosine_precision@10 value: 0.07524271844660194 name: Cosine Precision@10 - type: cosine_recall@1 value: 0.33098705501618125 name: Cosine Recall@1 - type: cosine_recall@3 value: 0.4768203883495145 name: Cosine Recall@3 - type: cosine_recall@5 value: 0.5613673139158576 name: Cosine Recall@5 - type: cosine_recall@10 value: 0.6548139158576051 name: Cosine Recall@10 - type: cosine_ndcg@10 value: 0.49595926983262306 name: Cosine Ndcg@10 - type: cosine_mrr@10 value: 0.4667870627215285 name: Cosine Mrr@10 - type: cosine_map@100 value: 0.44823876084168546 name: Cosine Map@100 - type: dot_accuracy@1 value: 0.3325242718446602 name: Dot Accuracy@1 - type: dot_accuracy@3 value: 0.5242718446601942 name: Dot Accuracy@3 - type: dot_accuracy@5 value: 0.5922330097087378 name: Dot Accuracy@5 - type: dot_accuracy@10 value: 0.6747572815533981 name: Dot Accuracy@10 - type: dot_precision@1 value: 0.3325242718446602 name: Dot Precision@1 - type: dot_precision@3 value: 0.1796116504854369 name: Dot Precision@3 - type: dot_precision@5 value: 0.12475728155339803 name: Dot Precision@5 - type: dot_precision@10 value: 0.07257281553398058 name: Dot Precision@10 - type: dot_recall@1 value: 0.30590614886731393 name: Dot Recall@1 - type: dot_recall@3 value: 0.4762135922330097 name: Dot Recall@3 - type: dot_recall@5 value: 0.54457928802589 name: Dot Recall@5 - type: dot_recall@10 value: 0.6272653721682848 name: Dot Recall@10 - type: dot_ndcg@10 value: 0.47234567950079603 name: Dot Ndcg@10 - type: dot_mrr@10 value: 0.4421887039605485 name: Dot Mrr@10 - type: dot_map@100 value: 0.4264453452263192 name: Dot Map@100 --- # SentenceTransformer based on dunzhang/stella_en_400M_v5 This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [dunzhang/stella_en_400M_v5](https://huggingface.co/dunzhang/stella_en_400M_v5). It maps sentences & paragraphs to a 1024-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [dunzhang/stella_en_400M_v5](https://huggingface.co/dunzhang/stella_en_400M_v5) <!-- at revision 24e2e1ffe95e95d807989938a5f3b8c18ee651f5 --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 1024 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: NewModel (1): Pooling({'word_embedding_dimension': 1024, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) (2): Dense({'in_features': 1024, 'out_features': 1024, 'bias': True, 'activation_function': 'torch.nn.modules.linear.Identity'}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("sentence_transformers_model_id") # Run inference sentences = [ 'Instruct: Given a web search query, retrieve relevant passages that answer the query.\nQuery: Title: \nText: what was the average for "other" loans held in 2012 and 2011?', 'Title: \nText: LOANS HELD FOR SALE Table 15: Loans Held For Sale\n| In millions | December 312012 | December 312011 |\n| Commercial mortgages at fair value | $772 | $843 |\n| Commercial mortgages at lower of cost or market | 620 | 451 |\n| Total commercial mortgages | 1,392 | 1,294 |\n| Residential mortgages at fair value | 2,096 | 1,415 |\n| Residential mortgages at lower of cost or market | 124 | 107 |\n| Total residential mortgages | 2,220 | 1,522 |\n| Other | 81 | 120 |\n| Total | $3,693 | $2,936 |\nWe stopped originating commercial mortgage loans held for sale designated at fair value in 2008 and continue pursuing opportunities to reduce these positions at appropriate prices.\nAt December 31, 2012, the balance relating to these loans was $772 million, compared to $843 million at December 31, 2011.\nWe sold $32 million in unpaid principal balances of these commercial mortgage loans held for sale carried at fair value in 2012 and sold $25 million in 2011.', 'Title: \nText: Investments and Derivative Instruments (continued) Security Unrealized Loss Aging The following tables present the Company’s unrealized loss aging for AFS securities by type and length of time the security was in a continuous unrealized loss position.\n| | December 31, 2011 |\n| | Less Than 12 Months | 12 Months or More | Total |\n| | Amortized | Fair | Unrealized | Amortized | Fair | Unrealized | Amortized | Fair | Unrealized |\n| | Cost | Value | Losses | Cost | Value | Losses | Cost | Value | Losses |\n| ABS | $629 | $594 | $-35 | $1,169 | $872 | $-297 | $1,798 | $1,466 | $-332 |\n| CDOs | 81 | 59 | -22 | 2,709 | 2,383 | -326 | 2,790 | 2,442 | -348 |\n| CMBS | 1,297 | 1,194 | -103 | 2,144 | 1,735 | -409 | 3,441 | 2,929 | -512 |\n| Corporate [1] | 4,388 | 4,219 | -169 | 3,268 | 2,627 | -570 | 7,656 | 6,846 | -739 |\n| Foreign govt./govt. agencies | 218 | 212 | -6 | 51 | 47 | -4 | 269 | 259 | -10 |\n| Municipal | 299 | 294 | -5 | 627 | 560 | -67 | 926 | 854 | -72 |\n| RMBS | 415 | 330 | -85 | 1,206 | 835 | -371 | 1,621 | 1,165 | -456 |\n| U.S. Treasuries | 343 | 341 | -2 | — | — | — | 343 | 341 | -2 |\n| Total fixed maturities | 7,670 | 7,243 | -427 | 11,174 | 9,059 | -2,044 | 18,844 | 16,302 | -2,471 |\n| Equity securities | 167 | 138 | -29 | 439 | 265 | -174 | 606 | 403 | -203 |\n| Total securities in an unrealized loss | $7,837 | $7,381 | $-456 | $11,613 | $9,324 | $-2,218 | $19,450 | $16,705 | $-2,674 |\nDecember 31, 2010\n| | December 31, 2010 |\n| | Less Than 12 Months | 12 Months or More | Total |\n| | Amortized | Fair | Unrealized | Amortized | Fair | Unrealized | Amortized | Fair | Unrealized |\n| | Cost | Value | Losses | Cost | Value | Losses | Cost | Value | Losses |\n| ABS | $302 | $290 | $-12 | $1,410 | $1,026 | $-384 | $1,712 | $1,316 | $-396 |\n| CDOs | 321 | 293 | -28 | 2,724 | 2,274 | -450 | 3,045 | 2,567 | -478 |\n| CMBS | 556 | 530 | -26 | 3,962 | 3,373 | -589 | 4,518 | 3,903 | -615 |\n| Corporate | 5,533 | 5,329 | -199 | 4,017 | 3,435 | -548 | 9,550 | 8,764 | -747 |\n| Foreign govt./govt. agencies | 356 | 349 | -7 | 78 | 68 | -10 | 434 | 417 | -17 |\n| Municipal | 7,485 | 7,173 | -312 | 1,046 | 863 | -183 | 8,531 | 8,036 | -495 |\n| RMBS | 1,744 | 1,702 | -42 | 1,567 | 1,147 | -420 | 3,311 | 2,849 | -462 |\n| U.S. Treasuries | 2,436 | 2,321 | -115 | 158 | 119 | -39 | 2,594 | 2,440 | -154 |\n| Total fixed maturities | 18,733 | 17,987 | -741 | 14,962 | 12,305 | -2,623 | 33,695 | 30,292 | -3,364 |\n| Equity securities | 53 | 52 | -1 | 637 | 506 | -131 | 690 | 558 | -132 |\n| Total securities in an unrealized loss | $18,786 | $18,039 | $-742 | $15,599 | $12,811 | $-2,754 | $34,385 | $30,850 | $-3,496 |\n[1] Unrealized losses exclude the change in fair value of bifurcated embedded derivative features of certain securities.\nSubsequent changes in fair value are recorded in net realized capital gains (losses).\nAs of December 31, 2011, AFS securities in an unrealized loss position, comprised of 2,549 securities, primarily related to corporate securities within the financial services sector, CMBS, and RMBS which have experienced significant price deterioration.\nAs of December 31, 2011, 75% of these securities were depressed less than 20% of cost or amortized cost.\nThe decline in unrealized losses during 2011 was primarily attributable to a decline in interest rates, partially offset by credit spread widening.\nMost of the securities depressed for twelve months or more relate to structured securities with exposure to commercial and residential real estate, as well as certain floating rate corporate securities or those securities with greater than 10 years to maturity, concentrated in the financial services sector.\nCurrent market spreads continue to be significantly wider for structured securities with exposure to commercial and residential real estate, as compared to spreads at the security’s respective purchase date, largely due to the economic and market uncertainties regarding future performance of commercial and residential real estate.\nIn addition, the majority of securities have a floating-rate coupon referenced to a market index where rates have declined substantially.\nThe Company neither has an intention to sell nor does it expect to be required to sell the securities outlined above.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 1024] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Information Retrieval * Dataset: `Evaluate` * Evaluated with [<code>InformationRetrievalEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.InformationRetrievalEvaluator) | Metric | Value | |:--------------------|:-----------| | cosine_accuracy@1 | 0.3617 | | cosine_accuracy@3 | 0.5194 | | cosine_accuracy@5 | 0.6092 | | cosine_accuracy@10 | 0.7015 | | cosine_precision@1 | 0.3617 | | cosine_precision@3 | 0.1788 | | cosine_precision@5 | 0.1267 | | cosine_precision@10 | 0.0752 | | cosine_recall@1 | 0.331 | | cosine_recall@3 | 0.4768 | | cosine_recall@5 | 0.5614 | | cosine_recall@10 | 0.6548 | | cosine_ndcg@10 | 0.496 | | cosine_mrr@10 | 0.4668 | | **cosine_map@100** | **0.4482** | | dot_accuracy@1 | 0.3325 | | dot_accuracy@3 | 0.5243 | | dot_accuracy@5 | 0.5922 | | dot_accuracy@10 | 0.6748 | | dot_precision@1 | 0.3325 | | dot_precision@3 | 0.1796 | | dot_precision@5 | 0.1248 | | dot_precision@10 | 0.0726 | | dot_recall@1 | 0.3059 | | dot_recall@3 | 0.4762 | | dot_recall@5 | 0.5446 | | dot_recall@10 | 0.6273 | | dot_ndcg@10 | 0.4723 | | dot_mrr@10 | 0.4422 | | dot_map@100 | 0.4264 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### Unnamed Dataset * Size: 2,256 training samples * Columns: <code>sentence_0</code> and <code>sentence_1</code> * Approximate statistics based on the first 1000 samples: | | sentence_0 | sentence_1 | |:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------| | type | string | string | | details | <ul><li>min: 29 tokens</li><li>mean: 45.01 tokens</li><li>max: 121 tokens</li></ul> | <ul><li>min: 26 tokens</li><li>mean: 406.1 tokens</li><li>max: 512 tokens</li></ul> | * Samples: | sentence_0 | sentence_1 | |:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| | <code>Instruct: Given a web search query, retrieve relevant passages that answer the query.<br>Query: Title: <br>Text: In the year with largest amount of Net credit losses, what's the amount of Revenues, net of interest expense and Total operating expenses? (in million)</code> | <code>Title: <br>Text: Comparison of Five-Year Cumulative Total Return The following graph compares the cumulative total return on Citigroup’s common stock with the S&P 500 Index and the S&P Financial Index over the five-year period extending through December31, 2009.<br>The graph assumes that $100 was invested on December31, 2004 in Citigroup’s common stock, the S&P 500 Index and the S&P Financial Index and that all dividends were reinvested.</code> | | <code>Instruct: Given a web search query, retrieve relevant passages that answer the query.<br>Query: Title: <br>Text: what was the total of net earnings attributable to pmi in 2017?</code> | <code>Title: <br>Text: the fair value of the psu award at the date of grant is amortized to expense over the performance period , which is typically three years after the date of the award , or upon death , disability or reaching the age of 58 .<br>as of december 31 , 2017 , pmi had $ 34 million of total unrecognized compensation cost related to non-vested psu awards .<br>this cost is recognized over a weighted-average performance cycle period of two years , or upon death , disability or reaching the age of 58 .<br>during the years ended december 31 , 2017 , and 2016 , there were no psu awards that vested .<br>pmi did not grant any psu awards during note 10 .<br>earnings per share : unvested share-based payment awards that contain non-forfeitable rights to dividends or dividend equivalents are participating securities and therefore are included in pmi 2019s earnings per share calculation pursuant to the two-class method .<br>basic and diluted earnings per share ( 201ceps 201d ) were calculated using the following: .<br><br>( in millions ) | for the years ended december 31 , 2017 | for the years ended december 31 , 2016 | for the years ended december 31 , 2015<br>-------------------------------------------------------------------------------------- | -------------------------------------- | -------------------------------------- | --------------------------------------<br>net earnings attributable to pmi | $ 6035 | $ 6967 | $ 6873 <br>less distributed and undistributed earnings attributable to share-based payment awards | 14 | 19 | 24 <br>net earnings for basic and diluted eps | $ 6021 | $ 6948 | $ 6849 <br>weighted-average shares for basic eps | 1552 | 1551 | 1549 <br>plus contingently issuable performance stock units ( psus ) | 1 | 2014 | 2014 <br>weighted-average shares for diluted eps | 1553 | 1551 | 1549 <br><br>for the 2017 , 2016 and 2015 computations , there were no antidilutive stock options. </code> | | <code>Instruct: Given a web search query, retrieve relevant passages that answer the query.<br>Query: Title: <br>Text: for the terrestar acquisition what will the final cash purchase price be in millions paid upon closing?</code> | <code>Title: <br>Text: dish network corporation notes to consolidated financial statements - continued this transaction was accounted for as a business combination using purchase price accounting .<br>the allocation of the purchase consideration is in the table below .<br>purchase allocation ( in thousands ) .<br><br> | purchase price allocation ( in thousands )<br>----------------------- | ------------------------------------------<br>cash | $ 107061 <br>current assets | 153258 <br>property and equipment | 28663 <br>acquisition intangibles | 17826 <br>other noncurrent assets | 12856 <br>current liabilities | -86080 ( 86080 ) <br>total purchase price | $ 233584 <br><br>the pro forma revenue and earnings associated with the blockbuster acquisition are not included in this filing .<br>due to the material ongoing modifications of the business , management has determined that insufficient information exists to accurately develop meaningful historical pro forma financial information .<br>moreover , the historical operations of blockbuster materially changed during the periods preceding the acquisition as a result of blockbuster inc . 2019s bankruptcy proceedings , and any historical pro forma information would not prove useful in assessing our post acquisition earnings and cash flows .<br>the cost of goods sold on a unit basis for blockbuster in the current period was lower-than-historical costs .<br>the carrying values in the current period of the rental library and merchandise inventories ( 201cblockbuster inventory 201d ) were reduced to their estimated fair value due to the application of purchase accounting .<br>this impact on cost of goods sold on a unit basis will diminish in the future as we purchase new blockbuster inventory .<br>10 .<br>spectrum investments terrestar transaction gamma acquisition l.l.c .<br>( 201cgamma 201d ) , a wholly-owned subsidiary of dish network , entered into the terrestar transaction on june 14 , 2011 .<br>on july 7 , 2011 , the u.s .<br>bankruptcy court for the southern district of new york approved the asset purchase agreement with terrestar and we subsequently paid $ 1.345 billion of the cash purchase price .<br>dish network is a party to the asset purchase agreement solely with respect to certain guaranty obligations .<br>we have paid all but $ 30 million of the purchase price for the terrestar transaction , which will be paid upon closing of the terrestar transaction , or upon certain other conditions being met under the asset purchase agreement .<br>consummation of the acquisition contemplated in the asset purchase agreement is subject to , among other things , approval by the fcc .<br>on february 7 , 2012 , the canadian federal department of industry ( 201cindustry canada 201d ) approved the transfer of the canadian spectrum licenses held by terrestar to us .<br>if the remaining required approvals are not obtained , subject to certain exceptions , we have the right to require and direct the sale of some or all of the terrestar assets to a third party and we would be entitled to the proceeds from such a sale .<br>these proceeds could , however , be substantially less than amounts we have paid in the terrestar transaction .<br>additionally , gamma is responsible for providing certain working capital and certain administrative expenses of terrestar and certain of its subsidiaries after december 31 , 2011 .<br>we expect that the terrestar transaction will be accounted for as a business combination using purchase price accounting .<br>we also expect to allocate the purchase price to the various components of the acquisition based upon the fair value of each component using various valuation techniques , including the market approach , income approach and/or cost approach .<br>we expect the purchase price of the terrestar assets to be allocated to , among other things , spectrum and satellites. </code> | * Loss: [<code>MultipleNegativesRankingLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#multiplenegativesrankingloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `num_train_epochs`: 2 - `fp16`: True - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: round_robin #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 16 - `per_device_eval_batch_size`: 16 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1 - `num_train_epochs`: 2 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.0 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `batch_sampler`: no_duplicates - `multi_dataset_batch_sampler`: round_robin </details> ### Training Logs | Epoch | Step | Evaluate_cosine_map@100 | |:-----:|:----:|:-----------------------:| | 0 | 0 | 0.2566 | | 1.0 | 141 | 0.3931 | | 2.0 | 282 | 0.4482 | ### Framework Versions - Python: 3.10.12 - Sentence Transformers: 3.1.1 - Transformers: 4.45.2 - PyTorch: 2.5.1+cu121 - Accelerate: 1.1.1 - Datasets: 3.1.0 - Tokenizers: 0.20.3 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### MultipleNegativesRankingLoss ```bibtex @misc{henderson2017efficient, title={Efficient Natural Language Response Suggestion for Smart Reply}, author={Matthew Henderson and Rami Al-Rfou and Brian Strope and Yun-hsuan Sung and Laszlo Lukacs and Ruiqi Guo and Sanjiv Kumar and Balint Miklos and Ray Kurzweil}, year={2017}, eprint={1705.00652}, archivePrefix={arXiv}, primaryClass={cs.CL} } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
[ "BEAR", "CRAFT" ]
aisingapore/Gemma2-9b-WangchanLIONv2-instruct
aisingapore
text-generation
[ "transformers", "safetensors", "gemma2", "text-generation", "conversational", "th", "en", "dataset:airesearch/WangchanThaiInstruct", "dataset:airesearch/WangchanX-FLAN-v6.1", "dataset:airesearch/wangchanx-seed-free-synthetic-instruct-thai-120k", "base_model:aisingapore/gemma2-9b-cpt-sea-lionv3-base", "base_model:finetune:aisingapore/gemma2-9b-cpt-sea-lionv3-base", "license:gemma", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-11-27T06:55:08Z
2024-11-28T07:58:15+00:00
23
2
--- base_model: - aisingapore/gemma2-9b-cpt-sea-lionv3-base datasets: - airesearch/WangchanThaiInstruct - airesearch/WangchanX-FLAN-v6.1 - airesearch/wangchanx-seed-free-synthetic-instruct-thai-120k language: - th - en library_name: transformers license: gemma pipeline_tag: text-generation --- # Gemma2 9B WangchanLIONv2 Instruct WangchanLION is a joint effort between VISTEC and AI Singapore to develop a Thai-specific collection of Large Language Models (LLMs), pre-trained for Southeast Asian (SEA) languages, and instruct-tuned specifically for the Thai language. Gemma2 9B WangchanLIONv2 Instruct is a multilingual model that has been fine-tuned with around **3,760,000 Thai instruction-completion pairs** from human-annotated instructions, automatic data construction in FLAN-style, and synthetic samples. - **Developed by:** Products Pillar, AI Singapore, and VISTEC - **Funded by:** Singapore NRF, PTT Public Company Limited, SCB Public Company Limited, and SCBX Public Company Limited - **Model type:** Decoder - **Languages:** English, Thai - **License:** [Gemma Community License](https://ai.google.dev/gemma/terms) ## Model Details ### Model Description We performed instruction tuning in Thai on [continued pre-trained Gemma2 9B CPT SEA-LIONv3](https://huggingface.co/aisingapore/gemma2-9b-cpt-sea-lionv3-base), a decoder model using the Gemma2 architecture, to create Gemma2 9B CPT WangchanLIONv2 Instruct. For tokenization, the model employs the default tokenizer used in Gemma-2-9B. The model has a context length of 8192. ### Benchmark Performance We evaluated Gemma2 9B WangchanLIONv2 Instruct in Thai using the [Thai LLM Benchmark](https://blog.opentyphoon.ai/introducing-the-thaillm-leaderboard-thaillm-evaluation-ecosystem-508e789d06bf). The benchmark consists of Thai multiple-choice exams, multi-turn chat, reading comprehension and language generation. The evaluation results are available on [the leaderboard](https://huggingface.co/spaces/ThaiLLM-Leaderboard/leaderboard). ### Usage **NOTE** This model has not been trained to use a system prompt or to use tool calling. Gemma2 9B WangchanLIONv2 Instruct can be run using the 🤗 Transformers library ```python # Please use transformers==4.45.2 import transformers import torch model_id = "aisingapore/Gemma2-9b-WangchanLIONv2-instruct" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device_map="auto", ) messages = [ {"role": "user", "content": "แต่งกลอนให้หน่อย"}, ] outputs = pipeline( messages, max_new_tokens=256, ) print(outputs[0]["generated_text"][-1]) ``` ### Caveats It is important for users to be aware that our model exhibits certain limitations that warrant consideration. Like many LLMs, the model can hallucinate and occasionally generates irrelevant content, introducing fictional elements that are not grounded in the provided context. Users should also exercise caution in interpreting and validating the model's responses due to the potential inconsistencies in its reasoning. ## Limitations ### Safety Current SEA-LION models, including this commercially permissive release, have not been aligned for safety. Developers and users should perform their own safety fine-tuning and related security measures. In no event shall the authors be held liable for any claim, damages, or other liability arising from the use of the released weights and codes. ## Technical Specifications ### Fine-Tuning Details Gemma2 9B WangchanLIONv2 Instruct was built using parameter-efficient fine-tuning. The training process for fine-tuning was approximately 3 days on 8x H100-80GB GPUs. The following hyperparameters were used during training: - quantization_bit: 4 - quantization_method: bitsandbytes - finetuning_type: lora - lora_target: all - cutoff_len: 2048 - per_device_train_batch_size: 4 - gradient_accumulation_steps: 8 - learning_rate: 1.0e-4 - num_train_epochs: 3 - lr_scheduler_type: cosine - warmup_ratio: 0.1 - bf16: true - val_size: 0.01 - per_device_eval_batch_size: 1 - eval_strategy: steps - eval_steps: 4000 We use [LLaMA Factory](https://github.com/hiyouga/LLaMA-Factory) framework for the fine-tuning process. ## Data Gemma2 9B WangchanLIONv2 Instruct was trained on 1. [Human-Annotated Thai Instruction Dataset](https://huggingface.co/datasets/airesearch/WangchanThaiInstruct) 2. [FLAN-like dataset in Thai](https://huggingface.co/datasets/airesearch/WangchanX-FLAN-v6.1) 3. [WangchanX Seed-Free Synthetic Instruct Thai 120k](https://huggingface.co/datasets/airesearch/wangchanx-seed-free-synthetic-instruct-thai-120k) ## Call for Contributions We encourage researchers, developers, and language enthusiasts to actively contribute to the enhancement and expansion of SEA-LION. Contributions can involve identifying and reporting bugs, sharing pre-training, instruction, and preference data, improving documentation usability, proposing and implementing new model evaluation tasks and metrics, or training versions of the model in additional Southeast Asian languages. Join us in shaping the future of SEA-LION by sharing your expertise and insights to make these models more accessible, accurate, and versatile. Please check out our GitHub for further information on the call for contributions. ## The Team ### AISG Chan Adwin, Choa Esther, Cheng Nicholas, Huang Yuli, Lau Wayne, Lee Chwan Ren, Leong Wai Yi, Leong Wei Qi, Limkonchotiwat Peerat, Liu Bing Jie Darius, Montalan Jann Railey, Ng Boon Cheong Raymond, Ngui Jian Gang, Nguyen Thanh Ngan, Ong Brandon, Ong Tat-Wee David, Ong Zhi Hao, Rengarajan Hamsawardhini, Siow Bryan, Susanto Yosephine, Tai Ngee Chia, Tan Choon Meng, Teo Eng Sipp Leslie, Teo Wei Yi, Tjhi William, Teng Walter, Yeo Yeow Tong, Yong Xianbin ### WangchanX Can Udomcharoenchaikit, Chalermpun Mai-On, Chayapat Uthayopas, Ekapol Chuangsuwanich, Lalita Lowphansirikul, Nonthakit Chaiwong, Panuthep Tasawong, Patomporn Payoungkhamdee, Pume Tuchinda, Romrawin Chumpu, Sarana Nutanong, Wannaphong Phatthiyaphaibun ## Acknowledgements [AI Singapore](​​https://aisingapore.org/) is a national programme supported by the National Research Foundation, Singapore and hosted by the National University of Singapore. Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not reflect the views of the National Research Foundation or the National University of Singapore. This release is part of WangchanX, a Large Language Model (LLM) research and development project supported by PTT Public Company Limited, SCB Public Company Limited, and SCBX Public Company Limited. The project is a collaborative effort originated by PyThaiNLP and VISTEC-depa Thailand AI Research Institute, focusing on the development of Adaptation Toolsets, Instruction Tuning & Alignment Datasets, and Benchmarks. ## Contact - Chalermpun Mai-On [email protected] - Patomporn Payoungkhamdee [email protected] - Peerat Limkonchotiwat [email protected] [Link to SEA-LION's GitHub repository](https://github.com/aisingapore/sealion) [Link to WangchanX FLAN-like Dataset Creation Github repository](https://github.com/vistec-AI/WangchanX/tree/datasets) ## Disclaimer This is the repository for the commercial instruction-tuned model. The model has _not_ been aligned for safety. Developers and users should perform their own safety fine-tuning and related security measures. In no event shall the authors be held liable for any claims, damages, or other liabilities arising from the use of the released weights and codes.
[ "CHIA" ]
OpenGVLab/Mini-InternVL2-1B-DA-RS
OpenGVLab
image-text-to-text
[ "transformers", "safetensors", "internvl_chat", "feature-extraction", "internvl", "custom_code", "image-text-to-text", "conversational", "multilingual", "arxiv:2410.16261", "arxiv:2312.14238", "arxiv:2404.16821", "arxiv:2412.05271", "base_model:OpenGVLab/InternVL2-1B", "base_model:merge:OpenGVLab/InternVL2-1B", "license:mit", "region:us" ]
2024-12-07T11:22:41Z
2024-12-09T13:05:02+00:00
23
0
--- base_model: - OpenGVLab/InternVL2-1B language: - multilingual library_name: transformers license: mit pipeline_tag: image-text-to-text tags: - internvl - custom_code base_model_relation: merge --- # Mini-InternVL2-DA-RS [\[📂 GitHub\]](https://github.com/OpenGVLab/InternVL) [\[🆕 Blog\]](https://internvl.github.io/blog/) [\[📜 Mini-InternVL\]](https://arxiv.org/abs/2410.16261) [\[📜 InternVL 1.0\]](https://arxiv.org/abs/2312.14238) [\[📜 InternVL 1.5\]](https://arxiv.org/abs/2404.16821) [\[📜 InternVL 2.5\]](https://huggingface.co/papers/2412.05271) [\[🗨️ InternVL Chat Demo\]](https://internvl.opengvlab.com/) [\[🤗 HF Demo\]](https://huggingface.co/spaces/OpenGVLab/InternVL) [\[🚀 Quick Start\]](#quick-start) [\[📖 中文解读\]](https://zhuanlan.zhihu.com/p/706547971) [\[📖 Documents\]](https://internvl.readthedocs.io/en/latest/internvl2.0/domain_adaptation.html#data-preparation) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64564b0e4a7ffb7d5a47f412/Qp9tEtBAjbq39bJZ7od4A.png) ## Introduction We release the adaptation models for the specific domains: autonomous driving, medical images, and remote sensing. These models are built upon Mini-InternVL and fine-tuned using a unified adaptation framework, achieving good performance on tasks in specific domains. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/64564b0e4a7ffb7d5a47f412/rlz4XL8DFWXplvp0Yx4lg.png) <table> <tr> <th>Model Name</th> <th>HF Link</th> <th>Note</th> </tr> <tr> <td>Mini-InternVL2-DA-Drivelm</td> <td><a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-1B-DA-Drivelm">🤗1B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-2B-DA-Drivelm">🤗2B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-4B-DA-Drivelm">🤗4B</a></td> <td> Adaptation for <a href="https://github.com/OpenDriveLab/DriveLM/tree/main/challenge"> CVPR 2024 Autonomous Driving Challenge </a></td> </tr> <tr> <td>Mini-InternVL2-DA-BDD</td> <td><a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-1B-DA-BDD">🤗1B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-2B-DA-BDD">🤗2B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-4B-DA-BDD">🤗4B</a></td> <td> Fine-tuning with data constructed by <a href="https://tonyxuqaq.github.io/projects/DriveGPT4/"> DriveGPT4 </a></td> </tr> <tr> <td>Mini-InternVL2-DA-RS</td> <td><a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-1B-DA-RS">🤗1B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-2B-DA-RS">🤗2B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-4B-DA-RS">🤗4B</a></td> <td> Adaptation for remote sensing domain </td> </tr> <tr> <td>Mini-InternVL2-DA-Medical</td> <td><a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-1B-DA-Medical">🤗1B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-2B-DA-Medical">🤗2B</a> / <a href="https://huggingface.co/OpenGVLab/Mini-InternVL2-4B-DA-Medical">🤗4B</a></td> <td> Fine-tuning using our <a href="https://huggingface.co/datasets/OpenGVLab/InternVL-Domain-Adaptation-Data/blob/main/train_meta/internvl_1_2_finetune_medical.json">medical data</a>.</td> </tr> </table> The script for evaluation is in the [document](https://internvl.readthedocs.io/en/latest/internvl2.0/domain_adaptation.html#id3). ## Training datasets - General domain dataset: ShareGPT4V, AllSeeingV2, LLaVA-Instruct-ZH, DVQA, ChartQA, AI2D, DocVQA, GeoQA+, SynthDoG-EN - Remote sensing dataset: GeoChat instruction set, RSVQA-HR, DIOR-RSVG, FIT-RS. ## Quick Start We provide an example code to run `Mini-InternVL2-1B` using `transformers`. > Please use transformers>=4.37.2 to ensure the model works normally. ```python import numpy as np import torch import torchvision.transforms as T from decord import VideoReader, cpu from PIL import Image from torchvision.transforms.functional import InterpolationMode from transformers import AutoModel, AutoTokenizer IMAGENET_MEAN = (0.485, 0.456, 0.406) IMAGENET_STD = (0.229, 0.224, 0.225) def build_transform(input_size): MEAN, STD = IMAGENET_MEAN, IMAGENET_STD transform = T.Compose([ T.Lambda(lambda img: img.convert('RGB') if img.mode != 'RGB' else img), T.Resize((input_size, input_size), interpolation=InterpolationMode.BICUBIC), T.ToTensor(), T.Normalize(mean=MEAN, std=STD) ]) return transform def find_closest_aspect_ratio(aspect_ratio, target_ratios, width, height, image_size): best_ratio_diff = float('inf') best_ratio = (1, 1) area = width * height for ratio in target_ratios: target_aspect_ratio = ratio[0] / ratio[1] ratio_diff = abs(aspect_ratio - target_aspect_ratio) if ratio_diff < best_ratio_diff: best_ratio_diff = ratio_diff best_ratio = ratio elif ratio_diff == best_ratio_diff: if area > 0.5 * image_size * image_size * ratio[0] * ratio[1]: best_ratio = ratio return best_ratio def dynamic_preprocess(image, min_num=1, max_num=12, image_size=448, use_thumbnail=False): orig_width, orig_height = image.size aspect_ratio = orig_width / orig_height # calculate the existing image aspect ratio target_ratios = set( (i, j) for n in range(min_num, max_num + 1) for i in range(1, n + 1) for j in range(1, n + 1) if i * j <= max_num and i * j >= min_num) target_ratios = sorted(target_ratios, key=lambda x: x[0] * x[1]) # find the closest aspect ratio to the target target_aspect_ratio = find_closest_aspect_ratio( aspect_ratio, target_ratios, orig_width, orig_height, image_size) # calculate the target width and height target_width = image_size * target_aspect_ratio[0] target_height = image_size * target_aspect_ratio[1] blocks = target_aspect_ratio[0] * target_aspect_ratio[1] # resize the image resized_img = image.resize((target_width, target_height)) processed_images = [] for i in range(blocks): box = ( (i % (target_width // image_size)) * image_size, (i // (target_width // image_size)) * image_size, ((i % (target_width // image_size)) + 1) * image_size, ((i // (target_width // image_size)) + 1) * image_size ) # split the image split_img = resized_img.crop(box) processed_images.append(split_img) assert len(processed_images) == blocks if use_thumbnail and len(processed_images) != 1: thumbnail_img = image.resize((image_size, image_size)) processed_images.append(thumbnail_img) return processed_images def load_image(image_file, input_size=448, max_num=12): image = Image.open(image_file).convert('RGB') transform = build_transform(input_size=input_size) images = dynamic_preprocess(image, image_size=input_size, use_thumbnail=True, max_num=max_num) pixel_values = [transform(image) for image in images] pixel_values = torch.stack(pixel_values) return pixel_values # If you want to load a model using multiple GPUs, please refer to the `Multiple GPUs` section. path = 'OpenGVLab/Mini-InternVL2-1B-DA-RS' model = AutoModel.from_pretrained( path, torch_dtype=torch.bfloat16, low_cpu_mem_usage=True, use_flash_attn=True, trust_remote_code=True).eval().cuda() tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True, use_fast=False) # set the max number of tiles in `max_num` pixel_values = load_image('path/to/image.jpg', max_num=12).to(torch.bfloat16).cuda() generation_config = dict(max_new_tokens=1024, do_sample=True) # pure-text conversation (纯文本对话) question = 'Hello, who are you?' response, history = model.chat(tokenizer, None, question, generation_config, history=None, return_history=True) print(f'User: {question}\nAssistant: {response}') question = 'Can you tell me a story?' response, history = model.chat(tokenizer, None, question, generation_config, history=history, return_history=True) print(f'User: {question}\nAssistant: {response}') # single-image single-round conversation (单图单轮对话) question = '<image>\nPlease describe the image shortly.' response = model.chat(tokenizer, pixel_values, question, generation_config) print(f'User: {question}\nAssistant: {response}') # single-image multi-round conversation (单图多轮对话) question = '<image>\nPlease describe the image in detail.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) print(f'User: {question}\nAssistant: {response}') question = 'Please write a poem according to the image.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True) print(f'User: {question}\nAssistant: {response}') # multi-image multi-round conversation, combined images (多图多轮对话,拼接图像) pixel_values1 = load_image('path/to/image1.jpg', max_num=12).to(torch.bfloat16).cuda() pixel_values2 = load_image('path/to/image2.jpg', max_num=12).to(torch.bfloat16).cuda() pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0) question = '<image>\nDescribe the two images in detail.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=None, return_history=True) print(f'User: {question}\nAssistant: {response}') question = 'What are the similarities and differences between these two images.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, history=history, return_history=True) print(f'User: {question}\nAssistant: {response}') # multi-image multi-round conversation, separate images (多图多轮对话,独立图像) pixel_values1 = load_image('path/to/image1.jpg', max_num=12).to(torch.bfloat16).cuda() pixel_values2 = load_image('path/to/image2.jpg', max_num=12).to(torch.bfloat16).cuda() pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0) num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)] question = 'Image-1: <image>\nImage-2: <image>\nDescribe the two images in detail.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, num_patches_list=num_patches_list, history=None, return_history=True) print(f'User: {question}\nAssistant: {response}') question = 'What are the similarities and differences between these two images.' response, history = model.chat(tokenizer, pixel_values, question, generation_config, num_patches_list=num_patches_list, history=history, return_history=True) print(f'User: {question}\nAssistant: {response}') # batch inference, single image per sample (单图批处理) pixel_values1 = load_image('path/to/image1.jpg', max_num=12).to(torch.bfloat16).cuda() pixel_values2 = load_image('path/to/image1.jpg', max_num=12).to(torch.bfloat16).cuda() num_patches_list = [pixel_values1.size(0), pixel_values2.size(0)] pixel_values = torch.cat((pixel_values1, pixel_values2), dim=0) questions = ['<image>\nDescribe the image in detail.'] * len(num_patches_list) responses = model.batch_chat(tokenizer, pixel_values, num_patches_list=num_patches_list, questions=questions, generation_config=generation_config) for question, response in zip(questions, responses): print(f'User: {question}\nAssistant: {response}') ``` ## Citation If you find this project useful in your research, please consider citing: ```BibTeX @article{gao2024mini, title={Mini-internvl: A flexible-transfer pocket multimodal model with 5\% parameters and 90\% performance}, author={Gao, Zhangwei and Chen, Zhe and Cui, Erfei and Ren, Yiming and Wang, Weiyun and Zhu, Jinguo and Tian, Hao and Ye, Shenglong and He, Junjun and Zhu, Xizhou and others}, journal={arXiv preprint arXiv:2410.16261}, year={2024} } @article{chen2024expanding, title={Expanding Performance Boundaries of Open-Source Multimodal Models with Model, Data, and Test-Time Scaling}, author={Chen, Zhe and Wang, Weiyun and Cao, Yue and Liu, Yangzhou and Gao, Zhangwei and Cui, Erfei and Zhu, Jinguo and Ye, Shenglong and Tian, Hao and Liu, Zhaoyang and others}, journal={arXiv preprint arXiv:2412.05271}, year={2024} } @article{chen2024far, title={How Far Are We to GPT-4V? Closing the Gap to Commercial Multimodal Models with Open-Source Suites}, author={Chen, Zhe and Wang, Weiyun and Tian, Hao and Ye, Shenglong and Gao, Zhangwei and Cui, Erfei and Tong, Wenwen and Hu, Kongzhi and Luo, Jiapeng and Ma, Zheng and others}, journal={arXiv preprint arXiv:2404.16821}, year={2024} } @inproceedings{chen2024internvl, title={Internvl: Scaling up vision foundation models and aligning for generic visual-linguistic tasks}, author={Chen, Zhe and Wu, Jiannan and Wang, Wenhai and Su, Weijie and Chen, Guo and Xing, Sen and Zhong, Muyan and Zhang, Qinglong and Zhu, Xizhou and Lu, Lewei and others}, booktitle={Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition}, pages={24185--24198}, year={2024} } ```
[ "MEDICAL DATA" ]
sonyashijin/tinyllama_pubmed_merged
sonyashijin
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "sft", "en", "base_model:unsloth/tinyllama-bnb-4bit", "base_model:finetune:unsloth/tinyllama-bnb-4bit", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-12-09T23:44:52Z
2024-12-09T23:56:57+00:00
23
0
--- base_model: unsloth/tinyllama-bnb-4bit language: - en license: apache-2.0 tags: - text-generation-inference - transformers - unsloth - llama - trl - sft --- # Uploaded model - **Developed by:** sonyashijin - **License:** apache-2.0 - **Finetuned from model :** unsloth/tinyllama-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth) # Evaluation | Tasks |Version|Filter|n-shot|Metric| |Value| |Stderr| |--------|------:|------|-----:|------|---|----:|---|-----:| |pubmedqa| 1|none | 0|acc |↑ |0.716|± |0.0202| Base model (unsloth/tinyllama-bnb-4bit) | Tasks |Version|Filter|n-shot|Metric| |Value| |Stderr| |--------|------:|------|-----:|------|---|----:|---|-----:| |pubmedqa| 1|none | 0|acc |↑ |0.636|± |0.0215|
[ "PUBMEDQA" ]
neginashz/sft-qwen-25-7b-instruct-2
neginashz
text-generation
[ "transformers", "pytorch", "safetensors", "qwen2", "text-generation", "axolotl", "generated_from_trainer", "conversational", "dataset:medalpaca/medical_meadow_medqa", "base_model:Qwen/Qwen2.5-7B-Instruct", "base_model:finetune:Qwen/Qwen2.5-7B-Instruct", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-12-17T12:06:40Z
2024-12-17T13:50:20+00:00
23
0
--- base_model: Qwen/Qwen2.5-7B-Instruct datasets: - medalpaca/medical_meadow_medqa library_name: transformers license: apache-2.0 tags: - axolotl - generated_from_trainer model-index: - name: sft-qwen-25-7b-instruct-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) <details><summary>See axolotl config</summary> axolotl version: `0.6.0` ```yaml base_model: Qwen/Qwen2.5-7B-Instruct trust_remote_code: true model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: load_in_4bit: strict: false datasets: - path: medalpaca/medical_meadow_medqa type: alpaca dataset_prepared_path: val_set_size: 0.1 output_dir: ./sft-qwen25 sequence_len: 8192 sample_packing: true eval_sample_packing: true pad_to_sequence_len: true wandb_project: sft-qwen-25-7b-instruct wandb_entity: wandb_watch: wandb_name: wandb_log_model: gradient_accumulation_steps: 1 micro_batch_size: 1 num_epochs: 2 optimizer: adamw_torch lr_scheduler: cosine learning_rate: 0.000005 train_on_inputs: false group_by_length: false bf16: true fp16: false tf32: false gradient_checkpointing: true logging_steps: 1 xformers_attention: flash_attention: true warmup_steps: eval_steps: save_steps: evals_per_epoch: saves_per_epoch: debug: deepspeed: deepspeed_configs/zero2.json weight_decay: fsdp: fsdp_config: special_tokens: hub_model_id: neginashz/sft-qwen-25-7b-instruct-2 hub_strategy: early_stopping_patience: resume_from_checkpoint: auto_resume_from_checkpoints: true ``` </details><br> # sft-qwen-25-7b-instruct-2 This model is a fine-tuned version of [Qwen/Qwen2.5-7B-Instruct](https://huggingface.co/Qwen/Qwen2.5-7B-Instruct) on the medalpaca/medical_meadow_medqa dataset. It achieves the following results on the evaluation set: - Loss: 0.1054 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 5e-06 - train_batch_size: 1 - eval_batch_size: 1 - seed: 42 - distributed_type: multi-GPU - num_devices: 4 - total_train_batch_size: 4 - total_eval_batch_size: 4 - optimizer: Use adamw_torch with betas=(0.9,0.999) and epsilon=1e-08 and optimizer_args=No additional optimizer arguments - lr_scheduler_type: cosine - lr_scheduler_warmup_steps: 4 - num_epochs: 2 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:------:|:----:|:---------------:| | 0.1381 | 0.1235 | 10 | 0.1342 | | 0.1495 | 0.2469 | 20 | 0.1229 | | 0.1215 | 0.3704 | 30 | 0.1246 | | 0.1354 | 0.4938 | 40 | 0.1175 | | 0.1223 | 0.6173 | 50 | 0.1115 | | 0.1068 | 0.7407 | 60 | 0.1101 | | 0.1061 | 0.8642 | 70 | 0.1056 | | 0.118 | 0.9877 | 80 | 0.1055 | | 0.0644 | 1.1111 | 90 | 0.1054 | | 0.0554 | 1.2346 | 100 | 0.1054 | | 0.0564 | 1.3580 | 110 | 0.1054 | | 0.0601 | 1.4815 | 120 | 0.1054 | | 0.0482 | 2.0 | 162 | 0.1054 | ### Framework versions - Transformers 4.47.0 - Pytorch 2.5.1+cu124 - Datasets 3.1.0 - Tokenizers 0.21.0
[ "MEDQA" ]
Daemontatox/AetherLlama
Daemontatox
text-generation
[ "transformers", "pytorch", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "Llama3", "trl", "COT", "Reasoning", "conversational", "en", "dataset:Daemontatox/LongCOT-Reason", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-12-23T13:33:40Z
2024-12-23T23:45:20+00:00
23
1
--- base_model: - Meta/LLama3.1-8B-Instruct datasets: - Daemontatox/LongCOT-Reason language: - en library_name: transformers license: apache-2.0 metrics: - accuracy - character - competition_math - code_eval pipeline_tag: text-generation tags: - text-generation-inference - transformers - unsloth - Llama3 - trl - COT - Reasoning new_version: Daemontatox/AetherLlama --- ![image](./image.webp) # AetherLlama - **Developed by:** Daemontatox - **License:** Apache 2.0 - **Finetuned Using:** [Unsloth](https://github.com/unslothai/unsloth), Hugging Face Transformers, and TRL Library ## Model Overview The ** AetherLlama Model** is an advanced AI system optimized for logical reasoning, multi-step problem-solving, and decision-making tasks. Designed with efficiency and accuracy in mind, it employs a structured system prompt to ensure high-quality answers through a transparent and iterative thought process. ### System Prompt and Workflow This model operates using an innovative reasoning framework structured around the following steps: 1. **Initial Thought:** The model uses `<Thinking>` tags to reason step-by-step and craft its best possible response. Example: 2. **Self-Critique:** It evaluates its initial response within `<Critique>` tags, focusing on: - **Accuracy:** Is it factually correct and verifiable? - **Clarity:** Is it clear and free of ambiguity? - **Completeness:** Does it fully address the request? - **Improvement:** What can be enhanced? Example: 3. **Revision:** Based on the critique, the model refines its response within `<Revising>` tags. Example: 4. **Final Response:** The revised response is presented clearly within `<Final>` tags. Example: 5. **Tag Innovation:** When needed, the model creates and defines new tags for better structuring or clarity, ensuring consistent usage. Example: ### Key Features - **Structured Reasoning:** Transparent, multi-step approach for generating and refining answers. - **Self-Improvement:** Built-in critique and revision ensure continuous response enhancement. - **Clarity and Adaptability:** Tagging system provides organized, adaptable responses tailored to user needs. - **Creative Flexibility:** Supports dynamic problem-solving with the ability to introduce new tags and concepts. --- ## Use Cases The model is designed for various domains, including: 1. **Research and Analysis:** Extracting insights and providing structured explanations. 2. **Education:** Assisting with tutoring by breaking down complex problems step-by-step. 3. **Problem-Solving:** Offering logical and actionable solutions for multi-step challenges. 4. **Content Generation:** Producing clear, well-organized creative or professional content. --- ## Training Details - **Frameworks:** - [Unsloth](https://github.com/unslothai/unsloth) for accelerated training. - Hugging Face Transformers and the TRL library for reinforcement learning with human feedback (RLHF). - **Dataset:** Finetuned on diverse reasoning-focused tasks, including logical puzzles, mathematical problems, and commonsense reasoning scenarios. - **Hardware Efficiency:** - Trained with bnb-4bit precision for reduced memory usage. - Optimized training pipeline achieving 2x faster development cycles. --- ## Performance Metrics The model excels in reasoning benchmarks: - **ARC (AI2 Reasoning Challenge):** High accuracy in logical and commonsense tasks. - **GSM8K (Math Reasoning):** Superior results in multi-step problem-solving. - **CommonsenseQA:** Strong comprehension of everyday reasoning tasks. --- ## Ethical Considerations - **Transparency:** Responses are structured for verifiability through tagging. - **Bias Mitigation:** Includes self-critique to minimize biases and ensure fairness. - **Safe Deployment:** Users are encouraged to evaluate outputs to prevent harm or misinformation. --- ## License This model is distributed under the Apache 2.0 license, allowing users to use, modify, and share it in compliance with the license terms. --- ## Acknowledgments Special thanks to: - [Unsloth](https://github.com/unslothai/unsloth) for accelerated training workflows. - Hugging Face for their powerful tools and libraries. --- Experience the **AetherLlama l**, leveraging its structured reasoning and self-improvement capabilities for any task requiring advanced AI reasoning.
[ "CRAFT" ]
hongkeon/granite-embedding-278m-multilingual-Q4_K_M-GGUF
hongkeon
sentence-similarity
[ "transformers", "gguf", "language", "granite", "embeddings", "multilingual", "mteb", "llama-cpp", "gguf-my-repo", "sentence-similarity", "en", "ar", "cs", "de", "es", "fr", "it", "ja", "ko", "nl", "pt", "zh", "base_model:ibm-granite/granite-embedding-278m-multilingual", "base_model:quantized:ibm-granite/granite-embedding-278m-multilingual", "license:apache-2.0", "model-index", "endpoints_compatible", "region:us", "feature-extraction" ]
2025-01-27T00:27:29Z
2025-01-27T00:27:39+00:00
23
0
--- base_model: ibm-granite/granite-embedding-278m-multilingual language: - en - ar - cs - de - es - fr - it - ja - ko - nl - pt - zh library_name: transformers license: apache-2.0 pipeline_tag: sentence-similarity tags: - language - granite - embeddings - multilingual - mteb - llama-cpp - gguf-my-repo model-index: - name: ibm-granite/granite-embedding-278m-multilingual results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en-ext) type: mteb/amazon_counterfactual config: en-ext split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 73.4333 - type: f1 value: 61.2301 - type: f1_weighted value: 78.40899999999999 - type: ap value: 23.347 - type: ap_weighted value: 23.347 - type: main_score value: 73.4333 - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 71.806 - type: f1 value: 65.6467 - type: f1_weighted value: 74.4815 - type: ap value: 34.045700000000004 - type: ap_weighted value: 34.045700000000004 - type: main_score value: 71.806 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification (default) type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 67.5907 - type: f1 value: 67.36370000000001 - type: f1_weighted value: 67.36370000000001 - type: ap value: 62.0368 - type: ap_weighted value: 62.0368 - type: main_score value: 67.5907 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 37.278 - type: f1 value: 36.4099 - type: f1_weighted value: 36.4099 - type: main_score value: 37.278 - task: type: Retrieval dataset: name: MTEB AppsRetrieval (default) type: CoIR-Retrieval/apps config: default split: test revision: f22508f96b7a36c2415181ed8bb76f76e04ae2d5 metrics: - type: ndcg_at_1 value: 3.453 - type: ndcg_at_3 value: 4.882000000000001 - type: ndcg_at_5 value: 5.564 - type: ndcg_at_10 value: 6.214 - type: ndcg_at_20 value: 6.814000000000001 - type: ndcg_at_100 value: 8.581 - type: ndcg_at_1000 value: 12.215 - type: map_at_1 value: 3.453 - type: map_at_3 value: 4.515000000000001 - type: map_at_5 value: 4.89 - type: map_at_10 value: 5.151 - type: map_at_20 value: 5.313 - type: map_at_100 value: 5.539000000000001 - type: map_at_1000 value: 5.638 - type: recall_at_1 value: 3.453 - type: recall_at_3 value: 5.949999999999999 - type: recall_at_5 value: 7.623 - type: recall_at_10 value: 9.668000000000001 - type: recall_at_20 value: 12.058 - type: recall_at_100 value: 21.859 - type: recall_at_1000 value: 52.722 - type: precision_at_1 value: 3.453 - type: precision_at_3 value: 1.983 - type: precision_at_5 value: 1.525 - type: precision_at_10 value: 0.967 - type: precision_at_20 value: 0.603 - type: precision_at_100 value: 0.219 - type: precision_at_1000 value: 0.053 - type: mrr_at_1 value: 3.4528999999999996 - type: mrr_at_3 value: 4.5153 - type: mrr_at_5 value: 4.889799999999999 - type: mrr_at_10 value: 5.1507 - type: mrr_at_20 value: 5.3135 - type: mrr_at_100 value: 5.5391 - type: mrr_at_1000 value: 5.6382 - type: nauc_ndcg_at_1_max value: 37.1714 - type: nauc_ndcg_at_1_std value: 15.306700000000001 - type: nauc_ndcg_at_1_diff1 value: 46.2252 - type: nauc_ndcg_at_3_max value: 32.0309 - type: nauc_ndcg_at_3_std value: 14.2983 - type: nauc_ndcg_at_3_diff1 value: 34.7174 - type: nauc_ndcg_at_5_max value: 29.3613 - type: nauc_ndcg_at_5_std value: 13.0358 - type: nauc_ndcg_at_5_diff1 value: 30.8369 - type: nauc_ndcg_at_10_max value: 26.820100000000004 - type: nauc_ndcg_at_10_std value: 12.3422 - type: nauc_ndcg_at_10_diff1 value: 27.3719 - type: nauc_ndcg_at_20_max value: 25.5643 - type: nauc_ndcg_at_20_std value: 11.383000000000001 - type: nauc_ndcg_at_20_diff1 value: 25.7058 - type: nauc_ndcg_at_100_max value: 23.2131 - type: nauc_ndcg_at_100_std value: 12.4787 - type: nauc_ndcg_at_100_diff1 value: 21.6874 - type: nauc_ndcg_at_1000_max value: 22.900499999999997 - type: nauc_ndcg_at_1000_std value: 13.2218 - type: nauc_ndcg_at_1000_diff1 value: 19.668 - type: nauc_map_at_1_max value: 37.1714 - type: nauc_map_at_1_std value: 15.306700000000001 - type: nauc_map_at_1_diff1 value: 46.2252 - type: nauc_map_at_3_max value: 33.1012 - type: nauc_map_at_3_std value: 14.4117 - type: nauc_map_at_3_diff1 value: 36.8859 - type: nauc_map_at_5_max value: 31.404700000000002 - type: nauc_map_at_5_std value: 13.5956 - type: nauc_map_at_5_diff1 value: 34.3454 - type: nauc_map_at_10_max value: 30.1013 - type: nauc_map_at_10_std value: 13.2253 - type: nauc_map_at_10_diff1 value: 32.487 - type: nauc_map_at_20_max value: 29.5747 - type: nauc_map_at_20_std value: 12.843499999999999 - type: nauc_map_at_20_diff1 value: 31.8252 - type: nauc_map_at_100_max value: 28.968899999999998 - type: nauc_map_at_100_std value: 12.967699999999999 - type: nauc_map_at_100_diff1 value: 30.924000000000003 - type: nauc_map_at_1000_max value: 28.894599999999997 - type: nauc_map_at_1000_std value: 12.997800000000002 - type: nauc_map_at_1000_diff1 value: 30.7653 - type: nauc_recall_at_1_max value: 37.1714 - type: nauc_recall_at_1_std value: 15.306700000000001 - type: nauc_recall_at_1_diff1 value: 46.2252 - type: nauc_recall_at_3_max value: 29.6485 - type: nauc_recall_at_3_std value: 14.072799999999999 - type: nauc_recall_at_3_diff1 value: 29.9536 - type: nauc_recall_at_5_max value: 25.251099999999997 - type: nauc_recall_at_5_std value: 11.9121 - type: nauc_recall_at_5_diff1 value: 23.9203 - type: nauc_recall_at_10_max value: 20.8856 - type: nauc_recall_at_10_std value: 10.7653 - type: nauc_recall_at_10_diff1 value: 18.3716 - type: nauc_recall_at_20_max value: 18.9378 - type: nauc_recall_at_20_std value: 8.8933 - type: nauc_recall_at_20_diff1 value: 15.7693 - type: nauc_recall_at_100_max value: 15.7027 - type: nauc_recall_at_100_std value: 12.6519 - type: nauc_recall_at_100_diff1 value: 9.2726 - type: nauc_recall_at_1000_max value: 16.2321 - type: nauc_recall_at_1000_std value: 15.2717 - type: nauc_recall_at_1000_diff1 value: 4.4337 - type: nauc_precision_at_1_max value: 37.1714 - type: nauc_precision_at_1_std value: 15.306700000000001 - type: nauc_precision_at_1_diff1 value: 46.2252 - type: nauc_precision_at_3_max value: 29.6485 - type: nauc_precision_at_3_std value: 14.072799999999999 - type: nauc_precision_at_3_diff1 value: 29.9536 - type: nauc_precision_at_5_max value: 25.251099999999997 - type: nauc_precision_at_5_std value: 11.9121 - type: nauc_precision_at_5_diff1 value: 23.9203 - type: nauc_precision_at_10_max value: 20.8856 - type: nauc_precision_at_10_std value: 10.7653 - type: nauc_precision_at_10_diff1 value: 18.3716 - type: nauc_precision_at_20_max value: 18.9378 - type: nauc_precision_at_20_std value: 8.8933 - type: nauc_precision_at_20_diff1 value: 15.7693 - type: nauc_precision_at_100_max value: 15.7027 - type: nauc_precision_at_100_std value: 12.6519 - type: nauc_precision_at_100_diff1 value: 9.2726 - type: nauc_precision_at_1000_max value: 16.2321 - type: nauc_precision_at_1000_std value: 15.2717 - type: nauc_precision_at_1000_diff1 value: 4.4337 - type: nauc_mrr_at_1_max value: 37.1714 - type: nauc_mrr_at_1_std value: 15.306700000000001 - type: nauc_mrr_at_1_diff1 value: 46.2252 - type: nauc_mrr_at_3_max value: 33.1012 - type: nauc_mrr_at_3_std value: 14.4117 - type: nauc_mrr_at_3_diff1 value: 36.8859 - type: nauc_mrr_at_5_max value: 31.404700000000002 - type: nauc_mrr_at_5_std value: 13.5956 - type: nauc_mrr_at_5_diff1 value: 34.3454 - type: nauc_mrr_at_10_max value: 30.1013 - type: nauc_mrr_at_10_std value: 13.2253 - type: nauc_mrr_at_10_diff1 value: 32.487 - type: nauc_mrr_at_20_max value: 29.5747 - type: nauc_mrr_at_20_std value: 12.843499999999999 - type: nauc_mrr_at_20_diff1 value: 31.8252 - type: nauc_mrr_at_100_max value: 28.968899999999998 - type: nauc_mrr_at_100_std value: 12.967699999999999 - type: nauc_mrr_at_100_diff1 value: 30.9239 - type: nauc_mrr_at_1000_max value: 28.894599999999997 - type: nauc_mrr_at_1000_std value: 12.997800000000002 - type: nauc_mrr_at_1000_diff1 value: 30.7653 - type: main_score value: 6.214 - task: type: Retrieval dataset: name: MTEB ArguAna (default) type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: ndcg_at_1 value: 31.152 - type: ndcg_at_3 value: 45.050000000000004 - type: ndcg_at_5 value: 50.458999999999996 - type: ndcg_at_10 value: 55.24400000000001 - type: ndcg_at_20 value: 57.918000000000006 - type: ndcg_at_100 value: 58.97 - type: ndcg_at_1000 value: 59.080999999999996 - type: map_at_1 value: 31.152 - type: map_at_3 value: 41.513 - type: map_at_5 value: 44.542 - type: map_at_10 value: 46.544000000000004 - type: map_at_20 value: 47.304 - type: map_at_100 value: 47.467999999999996 - type: map_at_1000 value: 47.473 - type: recall_at_1 value: 31.152 - type: recall_at_3 value: 55.334 - type: recall_at_5 value: 68.35 - type: recall_at_10 value: 83.001 - type: recall_at_20 value: 93.38499999999999 - type: recall_at_100 value: 98.791 - type: recall_at_1000 value: 99.644 - type: precision_at_1 value: 31.152 - type: precision_at_3 value: 18.445 - type: precision_at_5 value: 13.669999999999998 - type: precision_at_10 value: 8.3 - type: precision_at_20 value: 4.6690000000000005 - type: precision_at_100 value: 0.988 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 31.7212 - type: mrr_at_3 value: 41.7141 - type: mrr_at_5 value: 44.754599999999996 - type: mrr_at_10 value: 46.7491 - type: mrr_at_20 value: 47.515299999999996 - type: mrr_at_100 value: 47.679300000000005 - type: mrr_at_1000 value: 47.6841 - type: nauc_ndcg_at_1_max value: -7.8191 - type: nauc_ndcg_at_1_std value: -4.0581 - type: nauc_ndcg_at_1_diff1 value: 14.383199999999999 - type: nauc_ndcg_at_3_max value: -4.6856 - type: nauc_ndcg_at_3_std value: -3.4165 - type: nauc_ndcg_at_3_diff1 value: 10.7764 - type: nauc_ndcg_at_5_max value: -3.2999 - type: nauc_ndcg_at_5_std value: -3.6675 - type: nauc_ndcg_at_5_diff1 value: 11.6249 - type: nauc_ndcg_at_10_max value: -3.2984 - type: nauc_ndcg_at_10_std value: -3.0373 - type: nauc_ndcg_at_10_diff1 value: 11.9938 - type: nauc_ndcg_at_20_max value: -3.147 - type: nauc_ndcg_at_20_std value: -2.9219 - type: nauc_ndcg_at_20_diff1 value: 12.4893 - type: nauc_ndcg_at_100_max value: -4.2572 - type: nauc_ndcg_at_100_std value: -2.8537 - type: nauc_ndcg_at_100_diff1 value: 12.1039 - type: nauc_ndcg_at_1000_max value: -4.3526 - type: nauc_ndcg_at_1000_std value: -3.0145 - type: nauc_ndcg_at_1000_diff1 value: 12.1685 - type: nauc_map_at_1_max value: -7.8191 - type: nauc_map_at_1_std value: -4.0581 - type: nauc_map_at_1_diff1 value: 14.383199999999999 - type: nauc_map_at_3_max value: -5.5556 - type: nauc_map_at_3_std value: -3.515 - type: nauc_map_at_3_diff1 value: 11.5486 - type: nauc_map_at_5_max value: -4.840599999999999 - type: nauc_map_at_5_std value: -3.6663 - type: nauc_map_at_5_diff1 value: 12.053899999999999 - type: nauc_map_at_10_max value: -4.9401 - type: nauc_map_at_10_std value: -3.3724 - type: nauc_map_at_10_diff1 value: 12.1558 - type: nauc_map_at_20_max value: -4.9365 - type: nauc_map_at_20_std value: -3.3676999999999997 - type: nauc_map_at_20_diff1 value: 12.2729 - type: nauc_map_at_100_max value: -5.0695 - type: nauc_map_at_100_std value: -3.3561 - type: nauc_map_at_100_diff1 value: 12.237 - type: nauc_map_at_1000_max value: -5.0709 - type: nauc_map_at_1000_std value: -3.3594 - type: nauc_map_at_1000_diff1 value: 12.2408 - type: nauc_recall_at_1_max value: -7.8191 - type: nauc_recall_at_1_std value: -4.0581 - type: nauc_recall_at_1_diff1 value: 14.383199999999999 - type: nauc_recall_at_3_max value: -2.0358 - type: nauc_recall_at_3_std value: -3.1464 - type: nauc_recall_at_3_diff1 value: 8.510900000000001 - type: nauc_recall_at_5_max value: 2.4358999999999997 - type: nauc_recall_at_5_std value: -3.727 - type: nauc_recall_at_5_diff1 value: 10.2867 - type: nauc_recall_at_10_max value: 6.5777 - type: nauc_recall_at_10_std value: -1.0198 - type: nauc_recall_at_10_diff1 value: 11.9244 - type: nauc_recall_at_20_max value: 22.8541 - type: nauc_recall_at_20_std value: 4.1539 - type: nauc_recall_at_20_diff1 value: 19.3648 - type: nauc_recall_at_100_max value: 18.5148 - type: nauc_recall_at_100_std value: 41.1822 - type: nauc_recall_at_100_diff1 value: 5.1883 - type: nauc_recall_at_1000_max value: 13.995099999999999 - type: nauc_recall_at_1000_std value: 53.7961 - type: nauc_recall_at_1000_diff1 value: 14.8451 - type: nauc_precision_at_1_max value: -7.8191 - type: nauc_precision_at_1_std value: -4.0581 - type: nauc_precision_at_1_diff1 value: 14.383199999999999 - type: nauc_precision_at_3_max value: -2.0358 - type: nauc_precision_at_3_std value: -3.1464 - type: nauc_precision_at_3_diff1 value: 8.510900000000001 - type: nauc_precision_at_5_max value: 2.4358999999999997 - type: nauc_precision_at_5_std value: -3.727 - type: nauc_precision_at_5_diff1 value: 10.2867 - type: nauc_precision_at_10_max value: 6.5777 - type: nauc_precision_at_10_std value: -1.0198 - type: nauc_precision_at_10_diff1 value: 11.9244 - type: nauc_precision_at_20_max value: 22.8541 - type: nauc_precision_at_20_std value: 4.1539 - type: nauc_precision_at_20_diff1 value: 19.3648 - type: nauc_precision_at_100_max value: 18.5148 - type: nauc_precision_at_100_std value: 41.1822 - type: nauc_precision_at_100_diff1 value: 5.1883 - type: nauc_precision_at_1000_max value: 13.995099999999999 - type: nauc_precision_at_1000_std value: 53.7961 - type: nauc_precision_at_1000_diff1 value: 14.8451 - type: nauc_mrr_at_1_max value: -8.1904 - type: nauc_mrr_at_1_std value: -4.0896 - type: nauc_mrr_at_1_diff1 value: 12.7103 - type: nauc_mrr_at_3_max value: -6.6608 - type: nauc_mrr_at_3_std value: -3.6741 - type: nauc_mrr_at_3_diff1 value: 9.851 - type: nauc_mrr_at_5_max value: -5.7596 - type: nauc_mrr_at_5_std value: -3.7391 - type: nauc_mrr_at_5_diff1 value: 10.4908 - type: nauc_mrr_at_10_max value: -5.8613 - type: nauc_mrr_at_10_std value: -3.4377999999999997 - type: nauc_mrr_at_10_diff1 value: 10.5641 - type: nauc_mrr_at_20_max value: -5.8497 - type: nauc_mrr_at_20_std value: -3.4543 - type: nauc_mrr_at_20_diff1 value: 10.6822 - type: nauc_mrr_at_100_max value: -5.9873 - type: nauc_mrr_at_100_std value: -3.4431000000000003 - type: nauc_mrr_at_100_diff1 value: 10.6379 - type: nauc_mrr_at_1000_max value: -5.9887999999999995 - type: nauc_mrr_at_1000_std value: -3.4465000000000003 - type: nauc_mrr_at_1000_diff1 value: 10.641399999999999 - type: main_score value: 55.24400000000001 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P (default) type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 43.1321 - type: v_measure_std value: 13.594000000000001 - type: main_score value: 43.1321 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S (default) type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 32.9343 - type: v_measure_std value: 14.2478 - type: main_score value: 32.9343 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions (default) type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 62.3443 - type: mrr value: 76.3882 - type: nAUC_map_max value: 28.3073 - type: nAUC_map_std value: 15.5307 - type: nAUC_map_diff1 value: 12.6855 - type: nAUC_mrr_max value: 36.409200000000006 - type: nAUC_mrr_std value: 22.6271 - type: nAUC_mrr_diff1 value: 19.1211 - type: main_score value: 62.3443 - task: type: STS dataset: name: MTEB BIOSSES (default) type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: pearson value: 84.3253 - type: spearman value: 81.6362 - type: cosine_pearson value: 84.3253 - type: cosine_spearman value: 81.6362 - type: manhattan_pearson value: 82.70960000000001 - type: manhattan_spearman value: 81.3037 - type: euclidean_pearson value: 82.6906 - type: euclidean_spearman value: 81.6362 - type: main_score value: 81.6362 - task: type: Classification dataset: name: MTEB Banking77Classification (default) type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 78.0617 - type: f1 value: 77.2085 - type: f1_weighted value: 77.2085 - type: main_score value: 78.0617 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P (default) type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 35.8271 - type: v_measure_std value: 0.7191000000000001 - type: main_score value: 35.8271 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S (default) type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 30.3905 - type: v_measure_std value: 0.7136 - type: main_score value: 30.3905 - task: type: Retrieval dataset: name: MTEB COIRCodeSearchNetRetrieval (python) type: CoIR-Retrieval/CodeSearchNet config: python split: test revision: 4adc7bc41202b5c13543c9c886a25f340634dab3 metrics: - type: ndcg_at_1 value: 83.22800000000001 - type: ndcg_at_3 value: 87.41799999999999 - type: ndcg_at_5 value: 88.089 - type: ndcg_at_10 value: 88.789 - type: ndcg_at_20 value: 89.156 - type: ndcg_at_100 value: 89.60900000000001 - type: ndcg_at_1000 value: 89.79 - type: map_at_1 value: 83.22800000000001 - type: map_at_3 value: 86.431 - type: map_at_5 value: 86.80499999999999 - type: map_at_10 value: 87.09599999999999 - type: map_at_20 value: 87.198 - type: map_at_100 value: 87.263 - type: map_at_1000 value: 87.27000000000001 - type: recall_at_1 value: 83.22800000000001 - type: recall_at_3 value: 90.253 - type: recall_at_5 value: 91.876 - type: recall_at_10 value: 94.03399999999999 - type: recall_at_20 value: 95.475 - type: recall_at_100 value: 97.882 - type: recall_at_1000 value: 99.316 - type: precision_at_1 value: 83.22800000000001 - type: precision_at_3 value: 30.084 - type: precision_at_5 value: 18.375 - type: precision_at_10 value: 9.403 - type: precision_at_20 value: 4.774 - type: precision_at_100 value: 0.979 - type: precision_at_1000 value: 0.099 - type: mrr_at_1 value: 83.235 - type: mrr_at_3 value: 86.4336 - type: mrr_at_5 value: 86.8077 - type: mrr_at_10 value: 87.0979 - type: mrr_at_20 value: 87.2001 - type: mrr_at_100 value: 87.26509999999999 - type: mrr_at_1000 value: 87.2718 - type: nauc_ndcg_at_1_max value: 82.2462 - type: nauc_ndcg_at_1_std value: 11.4635 - type: nauc_ndcg_at_1_diff1 value: 90.5106 - type: nauc_ndcg_at_3_max value: 83.9742 - type: nauc_ndcg_at_3_std value: 12.7085 - type: nauc_ndcg_at_3_diff1 value: 88.2182 - type: nauc_ndcg_at_5_max value: 84.18870000000001 - type: nauc_ndcg_at_5_std value: 13.167499999999999 - type: nauc_ndcg_at_5_diff1 value: 88.44999999999999 - type: nauc_ndcg_at_10_max value: 84.2219 - type: nauc_ndcg_at_10_std value: 13.5219 - type: nauc_ndcg_at_10_diff1 value: 88.6386 - type: nauc_ndcg_at_20_max value: 84.2289 - type: nauc_ndcg_at_20_std value: 14.0686 - type: nauc_ndcg_at_20_diff1 value: 88.7516 - type: nauc_ndcg_at_100_max value: 84.12049999999999 - type: nauc_ndcg_at_100_std value: 14.1778 - type: nauc_ndcg_at_100_diff1 value: 88.8592 - type: nauc_ndcg_at_1000_max value: 84.0367 - type: nauc_ndcg_at_1000_std value: 13.9125 - type: nauc_ndcg_at_1000_diff1 value: 88.9054 - type: nauc_map_at_1_max value: 82.2462 - type: nauc_map_at_1_std value: 11.4635 - type: nauc_map_at_1_diff1 value: 90.5106 - type: nauc_map_at_3_max value: 83.5638 - type: nauc_map_at_3_std value: 12.3576 - type: nauc_map_at_3_diff1 value: 88.8502 - type: nauc_map_at_5_max value: 83.6625 - type: nauc_map_at_5_std value: 12.582099999999999 - type: nauc_map_at_5_diff1 value: 88.9876 - type: nauc_map_at_10_max value: 83.6605 - type: nauc_map_at_10_std value: 12.6859 - type: nauc_map_at_10_diff1 value: 89.07119999999999 - type: nauc_map_at_20_max value: 83.65629999999999 - type: nauc_map_at_20_std value: 12.8105 - type: nauc_map_at_20_diff1 value: 89.1036 - type: nauc_map_at_100_max value: 83.6413 - type: nauc_map_at_100_std value: 12.823699999999999 - type: nauc_map_at_100_diff1 value: 89.1193 - type: nauc_map_at_1000_max value: 83.6386 - type: nauc_map_at_1000_std value: 12.815999999999999 - type: nauc_map_at_1000_diff1 value: 89.1209 - type: nauc_recall_at_1_max value: 82.2462 - type: nauc_recall_at_1_std value: 11.4635 - type: nauc_recall_at_1_diff1 value: 90.5106 - type: nauc_recall_at_3_max value: 85.512 - type: nauc_recall_at_3_std value: 14.061399999999999 - type: nauc_recall_at_3_diff1 value: 85.7898 - type: nauc_recall_at_5_max value: 86.5434 - type: nauc_recall_at_5_std value: 15.894400000000001 - type: nauc_recall_at_5_diff1 value: 86.0934 - type: nauc_recall_at_10_max value: 87.59909999999999 - type: nauc_recall_at_10_std value: 18.9872 - type: nauc_recall_at_10_diff1 value: 86.26740000000001 - type: nauc_recall_at_20_max value: 88.76190000000001 - type: nauc_recall_at_20_std value: 25.6618 - type: nauc_recall_at_20_diff1 value: 86.5002 - type: nauc_recall_at_100_max value: 91.0976 - type: nauc_recall_at_100_std value: 40.9161 - type: nauc_recall_at_100_diff1 value: 86.5441 - type: nauc_recall_at_1000_max value: 96.018 - type: nauc_recall_at_1000_std value: 65.6217 - type: nauc_recall_at_1000_diff1 value: 86.8456 - type: nauc_precision_at_1_max value: 82.2462 - type: nauc_precision_at_1_std value: 11.4635 - type: nauc_precision_at_1_diff1 value: 90.5106 - type: nauc_precision_at_3_max value: 85.512 - type: nauc_precision_at_3_std value: 14.061399999999999 - type: nauc_precision_at_3_diff1 value: 85.7898 - type: nauc_precision_at_5_max value: 86.5434 - type: nauc_precision_at_5_std value: 15.894400000000001 - type: nauc_precision_at_5_diff1 value: 86.0934 - type: nauc_precision_at_10_max value: 87.59909999999999 - type: nauc_precision_at_10_std value: 18.9872 - type: nauc_precision_at_10_diff1 value: 86.26740000000001 - type: nauc_precision_at_20_max value: 88.76190000000001 - type: nauc_precision_at_20_std value: 25.6618 - type: nauc_precision_at_20_diff1 value: 86.5002 - type: nauc_precision_at_100_max value: 91.0976 - type: nauc_precision_at_100_std value: 40.9161 - type: nauc_precision_at_100_diff1 value: 86.5441 - type: nauc_precision_at_1000_max value: 96.018 - type: nauc_precision_at_1000_std value: 65.6217 - type: nauc_precision_at_1000_diff1 value: 86.8456 - type: nauc_mrr_at_1_max value: 82.2393 - type: nauc_mrr_at_1_std value: 11.5163 - type: nauc_mrr_at_1_diff1 value: 90.50160000000001 - type: nauc_mrr_at_3_max value: 83.5623 - type: nauc_mrr_at_3_std value: 12.395 - type: nauc_mrr_at_3_diff1 value: 88.8463 - type: nauc_mrr_at_5_max value: 83.6609 - type: nauc_mrr_at_5_std value: 12.620700000000001 - type: nauc_mrr_at_5_diff1 value: 88.9836 - type: nauc_mrr_at_10_max value: 83.6589 - type: nauc_mrr_at_10_std value: 12.7255 - type: nauc_mrr_at_10_diff1 value: 89.0672 - type: nauc_mrr_at_20_max value: 83.6546 - type: nauc_mrr_at_20_std value: 12.8504 - type: nauc_mrr_at_20_diff1 value: 89.09949999999999 - type: nauc_mrr_at_100_max value: 83.6396 - type: nauc_mrr_at_100_std value: 12.8638 - type: nauc_mrr_at_100_diff1 value: 89.1152 - type: nauc_mrr_at_1000_max value: 83.6369 - type: nauc_mrr_at_1000_std value: 12.856100000000001 - type: nauc_mrr_at_1000_diff1 value: 89.1168 - type: main_score value: 88.789 - task: type: Retrieval dataset: name: MTEB COIRCodeSearchNetRetrieval (javascript) type: CoIR-Retrieval/CodeSearchNet config: javascript split: test revision: 4adc7bc41202b5c13543c9c886a25f340634dab3 metrics: - type: ndcg_at_1 value: 29.14 - type: ndcg_at_3 value: 35.185 - type: ndcg_at_5 value: 37.013 - type: ndcg_at_10 value: 38.778 - type: ndcg_at_20 value: 40.184999999999995 - type: ndcg_at_100 value: 42.394999999999996 - type: ndcg_at_1000 value: 44.243 - type: map_at_1 value: 29.14 - type: map_at_3 value: 33.703 - type: map_at_5 value: 34.717999999999996 - type: map_at_10 value: 35.443999999999996 - type: map_at_20 value: 35.831 - type: map_at_100 value: 36.132999999999996 - type: map_at_1000 value: 36.193999999999996 - type: recall_at_1 value: 29.14 - type: recall_at_3 value: 39.471000000000004 - type: recall_at_5 value: 43.908 - type: recall_at_10 value: 49.376999999999995 - type: recall_at_20 value: 54.937999999999995 - type: recall_at_100 value: 66.91 - type: recall_at_1000 value: 81.98100000000001 - type: precision_at_1 value: 29.14 - type: precision_at_3 value: 13.157 - type: precision_at_5 value: 8.782 - type: precision_at_10 value: 4.938 - type: precision_at_20 value: 2.7470000000000003 - type: precision_at_100 value: 0.6689999999999999 - type: precision_at_1000 value: 0.082 - type: mrr_at_1 value: 29.140100000000004 - type: mrr_at_3 value: 33.703 - type: mrr_at_5 value: 34.7179 - type: mrr_at_10 value: 35.4443 - type: mrr_at_20 value: 35.830600000000004 - type: mrr_at_100 value: 36.1332 - type: mrr_at_1000 value: 36.1935 - type: nauc_ndcg_at_1_max value: 46.9222 - type: nauc_ndcg_at_1_std value: 3.3564999999999996 - type: nauc_ndcg_at_1_diff1 value: 60.583 - type: nauc_ndcg_at_3_max value: 49.205799999999996 - type: nauc_ndcg_at_3_std value: 5.976299999999999 - type: nauc_ndcg_at_3_diff1 value: 55.09610000000001 - type: nauc_ndcg_at_5_max value: 49.0533 - type: nauc_ndcg_at_5_std value: 6.5834 - type: nauc_ndcg_at_5_diff1 value: 54.430800000000005 - type: nauc_ndcg_at_10_max value: 48.626799999999996 - type: nauc_ndcg_at_10_std value: 7.4441 - type: nauc_ndcg_at_10_diff1 value: 53.1986 - type: nauc_ndcg_at_20_max value: 48.7498 - type: nauc_ndcg_at_20_std value: 8.3344 - type: nauc_ndcg_at_20_diff1 value: 52.844 - type: nauc_ndcg_at_100_max value: 48.7164 - type: nauc_ndcg_at_100_std value: 9.1646 - type: nauc_ndcg_at_100_diff1 value: 52.6307 - type: nauc_ndcg_at_1000_max value: 48.634699999999995 - type: nauc_ndcg_at_1000_std value: 9.3865 - type: nauc_ndcg_at_1000_diff1 value: 53.100899999999996 - type: nauc_map_at_1_max value: 46.9222 - type: nauc_map_at_1_std value: 3.3564999999999996 - type: nauc_map_at_1_diff1 value: 60.583 - type: nauc_map_at_3_max value: 48.7099 - type: nauc_map_at_3_std value: 5.2638 - type: nauc_map_at_3_diff1 value: 56.370200000000004 - type: nauc_map_at_5_max value: 48.6303 - type: nauc_map_at_5_std value: 5.5931 - type: nauc_map_at_5_diff1 value: 55.9968 - type: nauc_map_at_10_max value: 48.4549 - type: nauc_map_at_10_std value: 5.949800000000001 - type: nauc_map_at_10_diff1 value: 55.4941 - type: nauc_map_at_20_max value: 48.4854 - type: nauc_map_at_20_std value: 6.1861 - type: nauc_map_at_20_diff1 value: 55.4072 - type: nauc_map_at_100_max value: 48.4835 - type: nauc_map_at_100_std value: 6.2885 - type: nauc_map_at_100_diff1 value: 55.3743 - type: nauc_map_at_1000_max value: 48.4769 - type: nauc_map_at_1000_std value: 6.2978000000000005 - type: nauc_map_at_1000_diff1 value: 55.3852 - type: nauc_recall_at_1_max value: 46.9222 - type: nauc_recall_at_1_std value: 3.3564999999999996 - type: nauc_recall_at_1_diff1 value: 60.583 - type: nauc_recall_at_3_max value: 50.5754 - type: nauc_recall_at_3_std value: 8.005700000000001 - type: nauc_recall_at_3_diff1 value: 51.542100000000005 - type: nauc_recall_at_5_max value: 50.199000000000005 - type: nauc_recall_at_5_std value: 9.5088 - type: nauc_recall_at_5_diff1 value: 49.9358 - type: nauc_recall_at_10_max value: 48.899100000000004 - type: nauc_recall_at_10_std value: 12.2017 - type: nauc_recall_at_10_diff1 value: 46.042 - type: nauc_recall_at_20_max value: 49.433899999999994 - type: nauc_recall_at_20_std value: 16.1228 - type: nauc_recall_at_20_diff1 value: 44.1762 - type: nauc_recall_at_100_max value: 49.2626 - type: nauc_recall_at_100_std value: 23.1356 - type: nauc_recall_at_100_diff1 value: 41.2386 - type: nauc_recall_at_1000_max value: 48.7068 - type: nauc_recall_at_1000_std value: 34.4874 - type: nauc_recall_at_1000_diff1 value: 42.088 - type: nauc_precision_at_1_max value: 46.9222 - type: nauc_precision_at_1_std value: 3.3564999999999996 - type: nauc_precision_at_1_diff1 value: 60.583 - type: nauc_precision_at_3_max value: 50.5754 - type: nauc_precision_at_3_std value: 8.005700000000001 - type: nauc_precision_at_3_diff1 value: 51.542100000000005 - type: nauc_precision_at_5_max value: 50.199000000000005 - type: nauc_precision_at_5_std value: 9.5088 - type: nauc_precision_at_5_diff1 value: 49.9358 - type: nauc_precision_at_10_max value: 48.899100000000004 - type: nauc_precision_at_10_std value: 12.2017 - type: nauc_precision_at_10_diff1 value: 46.042 - type: nauc_precision_at_20_max value: 49.433899999999994 - type: nauc_precision_at_20_std value: 16.1228 - type: nauc_precision_at_20_diff1 value: 44.1762 - type: nauc_precision_at_100_max value: 49.2626 - type: nauc_precision_at_100_std value: 23.1356 - type: nauc_precision_at_100_diff1 value: 41.2386 - type: nauc_precision_at_1000_max value: 48.7068 - type: nauc_precision_at_1000_std value: 34.4874 - type: nauc_precision_at_1000_diff1 value: 42.088 - type: nauc_mrr_at_1_max value: 46.9222 - type: nauc_mrr_at_1_std value: 3.3564999999999996 - type: nauc_mrr_at_1_diff1 value: 60.583 - type: nauc_mrr_at_3_max value: 48.7099 - type: nauc_mrr_at_3_std value: 5.2638 - type: nauc_mrr_at_3_diff1 value: 56.370200000000004 - type: nauc_mrr_at_5_max value: 48.6303 - type: nauc_mrr_at_5_std value: 5.5931 - type: nauc_mrr_at_5_diff1 value: 55.9968 - type: nauc_mrr_at_10_max value: 48.4549 - type: nauc_mrr_at_10_std value: 5.949800000000001 - type: nauc_mrr_at_10_diff1 value: 55.4941 - type: nauc_mrr_at_20_max value: 48.4854 - type: nauc_mrr_at_20_std value: 6.1861 - type: nauc_mrr_at_20_diff1 value: 55.4072 - type: nauc_mrr_at_100_max value: 48.4835 - type: nauc_mrr_at_100_std value: 6.2885 - type: nauc_mrr_at_100_diff1 value: 55.3743 - type: nauc_mrr_at_1000_max value: 48.4769 - type: nauc_mrr_at_1000_std value: 6.2978000000000005 - type: nauc_mrr_at_1000_diff1 value: 55.3852 - type: main_score value: 38.778 - task: type: Retrieval dataset: name: MTEB COIRCodeSearchNetRetrieval (go) type: CoIR-Retrieval/CodeSearchNet config: go split: test revision: 4adc7bc41202b5c13543c9c886a25f340634dab3 metrics: - type: ndcg_at_1 value: 42.809999999999995 - type: ndcg_at_3 value: 51.949999999999996 - type: ndcg_at_5 value: 54.217000000000006 - type: ndcg_at_10 value: 56.296 - type: ndcg_at_20 value: 57.735 - type: ndcg_at_100 value: 59.68599999999999 - type: ndcg_at_1000 value: 60.812 - type: map_at_1 value: 42.809999999999995 - type: map_at_3 value: 49.727 - type: map_at_5 value: 50.988 - type: map_at_10 value: 51.847 - type: map_at_20 value: 52.248000000000005 - type: map_at_100 value: 52.52 - type: map_at_1000 value: 52.561 - type: recall_at_1 value: 42.809999999999995 - type: recall_at_3 value: 58.372 - type: recall_at_5 value: 63.864 - type: recall_at_10 value: 70.291 - type: recall_at_20 value: 75.92999999999999 - type: recall_at_100 value: 86.432 - type: recall_at_1000 value: 95.371 - type: precision_at_1 value: 42.809999999999995 - type: precision_at_3 value: 19.457 - type: precision_at_5 value: 12.773000000000001 - type: precision_at_10 value: 7.029000000000001 - type: precision_at_20 value: 3.7960000000000003 - type: precision_at_100 value: 0.864 - type: precision_at_1000 value: 0.095 - type: mrr_at_1 value: 42.8097 - type: mrr_at_3 value: 49.7271 - type: mrr_at_5 value: 50.987899999999996 - type: mrr_at_10 value: 51.847100000000005 - type: mrr_at_20 value: 52.2483 - type: mrr_at_100 value: 52.519499999999994 - type: mrr_at_1000 value: 52.560700000000004 - type: nauc_ndcg_at_1_max value: 42.5169 - type: nauc_ndcg_at_1_std value: -2.56 - type: nauc_ndcg_at_1_diff1 value: 61.5235 - type: nauc_ndcg_at_3_max value: 43.897999999999996 - type: nauc_ndcg_at_3_std value: -0.927 - type: nauc_ndcg_at_3_diff1 value: 55.5453 - type: nauc_ndcg_at_5_max value: 44.069199999999995 - type: nauc_ndcg_at_5_std value: -0.5125000000000001 - type: nauc_ndcg_at_5_diff1 value: 55.095000000000006 - type: nauc_ndcg_at_10_max value: 43.9261 - type: nauc_ndcg_at_10_std value: 0.218 - type: nauc_ndcg_at_10_diff1 value: 54.7159 - type: nauc_ndcg_at_20_max value: 44.0206 - type: nauc_ndcg_at_20_std value: 0.8718999999999999 - type: nauc_ndcg_at_20_diff1 value: 54.830400000000004 - type: nauc_ndcg_at_100_max value: 43.7526 - type: nauc_ndcg_at_100_std value: 0.9793 - type: nauc_ndcg_at_100_diff1 value: 54.9701 - type: nauc_ndcg_at_1000_max value: 43.8809 - type: nauc_ndcg_at_1000_std value: 0.7155 - type: nauc_ndcg_at_1000_diff1 value: 55.3053 - type: nauc_map_at_1_max value: 42.5169 - type: nauc_map_at_1_std value: -2.56 - type: nauc_map_at_1_diff1 value: 61.5235 - type: nauc_map_at_3_max value: 43.5908 - type: nauc_map_at_3_std value: -1.3469 - type: nauc_map_at_3_diff1 value: 56.9825 - type: nauc_map_at_5_max value: 43.674099999999996 - type: nauc_map_at_5_std value: -1.1391 - type: nauc_map_at_5_diff1 value: 56.7628 - type: nauc_map_at_10_max value: 43.6154 - type: nauc_map_at_10_std value: -0.861 - type: nauc_map_at_10_diff1 value: 56.6439 - type: nauc_map_at_20_max value: 43.650099999999995 - type: nauc_map_at_20_std value: -0.6788 - type: nauc_map_at_20_diff1 value: 56.6917 - type: nauc_map_at_100_max value: 43.6075 - type: nauc_map_at_100_std value: -0.6773 - type: nauc_map_at_100_diff1 value: 56.7132 - type: nauc_map_at_1000_max value: 43.6113 - type: nauc_map_at_1000_std value: -0.6847 - type: nauc_map_at_1000_diff1 value: 56.725300000000004 - type: nauc_recall_at_1_max value: 42.5169 - type: nauc_recall_at_1_std value: -2.56 - type: nauc_recall_at_1_diff1 value: 61.5235 - type: nauc_recall_at_3_max value: 44.8282 - type: nauc_recall_at_3_std value: 0.3731 - type: nauc_recall_at_3_diff1 value: 51.139199999999995 - type: nauc_recall_at_5_max value: 45.3912 - type: nauc_recall_at_5_std value: 1.6466999999999998 - type: nauc_recall_at_5_diff1 value: 49.5336 - type: nauc_recall_at_10_max value: 45.0172 - type: nauc_recall_at_10_std value: 4.702 - type: nauc_recall_at_10_diff1 value: 47.287600000000005 - type: nauc_recall_at_20_max value: 45.5956 - type: nauc_recall_at_20_std value: 8.8859 - type: nauc_recall_at_20_diff1 value: 46.5039 - type: nauc_recall_at_100_max value: 43.7193 - type: nauc_recall_at_100_std value: 15.4564 - type: nauc_recall_at_100_diff1 value: 42.9843 - type: nauc_recall_at_1000_max value: 49.6578 - type: nauc_recall_at_1000_std value: 28.1802 - type: nauc_recall_at_1000_diff1 value: 37.0098 - type: nauc_precision_at_1_max value: 42.5169 - type: nauc_precision_at_1_std value: -2.56 - type: nauc_precision_at_1_diff1 value: 61.5235 - type: nauc_precision_at_3_max value: 44.8282 - type: nauc_precision_at_3_std value: 0.3731 - type: nauc_precision_at_3_diff1 value: 51.139199999999995 - type: nauc_precision_at_5_max value: 45.3912 - type: nauc_precision_at_5_std value: 1.6466999999999998 - type: nauc_precision_at_5_diff1 value: 49.5336 - type: nauc_precision_at_10_max value: 45.0172 - type: nauc_precision_at_10_std value: 4.702 - type: nauc_precision_at_10_diff1 value: 47.287600000000005 - type: nauc_precision_at_20_max value: 45.5956 - type: nauc_precision_at_20_std value: 8.8859 - type: nauc_precision_at_20_diff1 value: 46.5039 - type: nauc_precision_at_100_max value: 43.7193 - type: nauc_precision_at_100_std value: 15.4564 - type: nauc_precision_at_100_diff1 value: 42.9843 - type: nauc_precision_at_1000_max value: 49.6578 - type: nauc_precision_at_1000_std value: 28.1802 - type: nauc_precision_at_1000_diff1 value: 37.0098 - type: nauc_mrr_at_1_max value: 42.5169 - type: nauc_mrr_at_1_std value: -2.56 - type: nauc_mrr_at_1_diff1 value: 61.5235 - type: nauc_mrr_at_3_max value: 43.5908 - type: nauc_mrr_at_3_std value: -1.3469 - type: nauc_mrr_at_3_diff1 value: 56.9825 - type: nauc_mrr_at_5_max value: 43.674099999999996 - type: nauc_mrr_at_5_std value: -1.1391 - type: nauc_mrr_at_5_diff1 value: 56.7628 - type: nauc_mrr_at_10_max value: 43.6154 - type: nauc_mrr_at_10_std value: -0.861 - type: nauc_mrr_at_10_diff1 value: 56.6439 - type: nauc_mrr_at_20_max value: 43.650099999999995 - type: nauc_mrr_at_20_std value: -0.6788 - type: nauc_mrr_at_20_diff1 value: 56.6917 - type: nauc_mrr_at_100_max value: 43.6075 - type: nauc_mrr_at_100_std value: -0.6773 - type: nauc_mrr_at_100_diff1 value: 56.7132 - type: nauc_mrr_at_1000_max value: 43.6113 - type: nauc_mrr_at_1000_std value: -0.6847 - type: nauc_mrr_at_1000_diff1 value: 56.725300000000004 - type: main_score value: 56.296 - task: type: Retrieval dataset: name: MTEB COIRCodeSearchNetRetrieval (ruby) type: CoIR-Retrieval/CodeSearchNet config: ruby split: test revision: 4adc7bc41202b5c13543c9c886a25f340634dab3 metrics: - type: ndcg_at_1 value: 31.721 - type: ndcg_at_3 value: 38.559 - type: ndcg_at_5 value: 40.303 - type: ndcg_at_10 value: 42.536 - type: ndcg_at_20 value: 44.05 - type: ndcg_at_100 value: 46.565 - type: ndcg_at_1000 value: 48.447 - type: map_at_1 value: 31.721 - type: map_at_3 value: 36.915 - type: map_at_5 value: 37.891000000000005 - type: map_at_10 value: 38.814 - type: map_at_20 value: 39.236 - type: map_at_100 value: 39.574 - type: map_at_1000 value: 39.641999999999996 - type: recall_at_1 value: 31.721 - type: recall_at_3 value: 43.299 - type: recall_at_5 value: 47.502 - type: recall_at_10 value: 54.400999999999996 - type: recall_at_20 value: 60.349 - type: recall_at_100 value: 74.068 - type: recall_at_1000 value: 89.056 - type: precision_at_1 value: 31.721 - type: precision_at_3 value: 14.433000000000002 - type: precision_at_5 value: 9.5 - type: precision_at_10 value: 5.4399999999999995 - type: precision_at_20 value: 3.017 - type: precision_at_100 value: 0.741 - type: precision_at_1000 value: 0.089 - type: mrr_at_1 value: 31.7209 - type: mrr_at_3 value: 36.9151 - type: mrr_at_5 value: 37.8906 - type: mrr_at_10 value: 38.8144 - type: mrr_at_20 value: 39.2355 - type: mrr_at_100 value: 39.5737 - type: mrr_at_1000 value: 39.641999999999996 - type: nauc_ndcg_at_1_max value: 46.428999999999995 - type: nauc_ndcg_at_1_std value: 0.0014 - type: nauc_ndcg_at_1_diff1 value: 59.6017 - type: nauc_ndcg_at_3_max value: 45.9805 - type: nauc_ndcg_at_3_std value: 0.5511 - type: nauc_ndcg_at_3_diff1 value: 53.4978 - type: nauc_ndcg_at_5_max value: 45.5339 - type: nauc_ndcg_at_5_std value: 1.2229 - type: nauc_ndcg_at_5_diff1 value: 51.798500000000004 - type: nauc_ndcg_at_10_max value: 44.018 - type: nauc_ndcg_at_10_std value: 1.6709 - type: nauc_ndcg_at_10_diff1 value: 50.428799999999995 - type: nauc_ndcg_at_20_max value: 43.5252 - type: nauc_ndcg_at_20_std value: 2.4627 - type: nauc_ndcg_at_20_diff1 value: 50.6172 - type: nauc_ndcg_at_100_max value: 43.723099999999995 - type: nauc_ndcg_at_100_std value: 4.0416 - type: nauc_ndcg_at_100_diff1 value: 50.135600000000004 - type: nauc_ndcg_at_1000_max value: 43.7739 - type: nauc_ndcg_at_1000_std value: 3.4729 - type: nauc_ndcg_at_1000_diff1 value: 50.6595 - type: nauc_map_at_1_max value: 46.428999999999995 - type: nauc_map_at_1_std value: 0.0014 - type: nauc_map_at_1_diff1 value: 59.6017 - type: nauc_map_at_3_max value: 46.217999999999996 - type: nauc_map_at_3_std value: 0.43889999999999996 - type: nauc_map_at_3_diff1 value: 54.882299999999994 - type: nauc_map_at_5_max value: 45.9757 - type: nauc_map_at_5_std value: 0.8049999999999999 - type: nauc_map_at_5_diff1 value: 53.950900000000004 - type: nauc_map_at_10_max value: 45.3363 - type: nauc_map_at_10_std value: 0.9662999999999999 - type: nauc_map_at_10_diff1 value: 53.369 - type: nauc_map_at_20_max value: 45.2008 - type: nauc_map_at_20_std value: 1.1801000000000001 - type: nauc_map_at_20_diff1 value: 53.4425 - type: nauc_map_at_100_max value: 45.226699999999994 - type: nauc_map_at_100_std value: 1.3667 - type: nauc_map_at_100_diff1 value: 53.4089 - type: nauc_map_at_1000_max value: 45.2252 - type: nauc_map_at_1000_std value: 1.3433000000000002 - type: nauc_map_at_1000_diff1 value: 53.4268 - type: nauc_recall_at_1_max value: 46.428999999999995 - type: nauc_recall_at_1_std value: 0.0014 - type: nauc_recall_at_1_diff1 value: 59.6017 - type: nauc_recall_at_3_max value: 45.2499 - type: nauc_recall_at_3_std value: 0.8637 - type: nauc_recall_at_3_diff1 value: 49.5773 - type: nauc_recall_at_5_max value: 44.1355 - type: nauc_recall_at_5_std value: 2.5255 - type: nauc_recall_at_5_diff1 value: 45.3656 - type: nauc_recall_at_10_max value: 39.313700000000004 - type: nauc_recall_at_10_std value: 4.1421 - type: nauc_recall_at_10_diff1 value: 40.8109 - type: nauc_recall_at_20_max value: 36.923 - type: nauc_recall_at_20_std value: 7.691199999999999 - type: nauc_recall_at_20_diff1 value: 40.8715 - type: nauc_recall_at_100_max value: 36.296 - type: nauc_recall_at_100_std value: 22.020999999999997 - type: nauc_recall_at_100_diff1 value: 33.400800000000004 - type: nauc_recall_at_1000_max value: 30.508999999999997 - type: nauc_recall_at_1000_std value: 29.497600000000002 - type: nauc_recall_at_1000_diff1 value: 27.5001 - type: nauc_precision_at_1_max value: 46.428999999999995 - type: nauc_precision_at_1_std value: 0.0014 - type: nauc_precision_at_1_diff1 value: 59.6017 - type: nauc_precision_at_3_max value: 45.2499 - type: nauc_precision_at_3_std value: 0.8637 - type: nauc_precision_at_3_diff1 value: 49.5773 - type: nauc_precision_at_5_max value: 44.1355 - type: nauc_precision_at_5_std value: 2.5255 - type: nauc_precision_at_5_diff1 value: 45.3656 - type: nauc_precision_at_10_max value: 39.313700000000004 - type: nauc_precision_at_10_std value: 4.1421 - type: nauc_precision_at_10_diff1 value: 40.8109 - type: nauc_precision_at_20_max value: 36.923 - type: nauc_precision_at_20_std value: 7.691199999999999 - type: nauc_precision_at_20_diff1 value: 40.8715 - type: nauc_precision_at_100_max value: 36.296 - type: nauc_precision_at_100_std value: 22.020999999999997 - type: nauc_precision_at_100_diff1 value: 33.400800000000004 - type: nauc_precision_at_1000_max value: 30.508999999999997 - type: nauc_precision_at_1000_std value: 29.497600000000002 - type: nauc_precision_at_1000_diff1 value: 27.5001 - type: nauc_mrr_at_1_max value: 46.428999999999995 - type: nauc_mrr_at_1_std value: 0.0014 - type: nauc_mrr_at_1_diff1 value: 59.6017 - type: nauc_mrr_at_3_max value: 46.217999999999996 - type: nauc_mrr_at_3_std value: 0.43889999999999996 - type: nauc_mrr_at_3_diff1 value: 54.882299999999994 - type: nauc_mrr_at_5_max value: 45.9757 - type: nauc_mrr_at_5_std value: 0.8049999999999999 - type: nauc_mrr_at_5_diff1 value: 53.950900000000004 - type: nauc_mrr_at_10_max value: 45.3363 - type: nauc_mrr_at_10_std value: 0.9662999999999999 - type: nauc_mrr_at_10_diff1 value: 53.369 - type: nauc_mrr_at_20_max value: 45.2008 - type: nauc_mrr_at_20_std value: 1.1801000000000001 - type: nauc_mrr_at_20_diff1 value: 53.4425 - type: nauc_mrr_at_100_max value: 45.226699999999994 - type: nauc_mrr_at_100_std value: 1.3667 - type: nauc_mrr_at_100_diff1 value: 53.4089 - type: nauc_mrr_at_1000_max value: 45.2252 - type: nauc_mrr_at_1000_std value: 1.3433000000000002 - type: nauc_mrr_at_1000_diff1 value: 53.4268 - type: main_score value: 42.536 - task: type: Retrieval dataset: name: MTEB COIRCodeSearchNetRetrieval (java) type: CoIR-Retrieval/CodeSearchNet config: java split: test revision: 4adc7bc41202b5c13543c9c886a25f340634dab3 metrics: - type: ndcg_at_1 value: 36.887 - type: ndcg_at_3 value: 44.671 - type: ndcg_at_5 value: 46.619 - type: ndcg_at_10 value: 48.54 - type: ndcg_at_20 value: 49.881 - type: ndcg_at_100 value: 51.847 - type: ndcg_at_1000 value: 53.286 - type: map_at_1 value: 36.887 - type: map_at_3 value: 42.805 - type: map_at_5 value: 43.884 - type: map_at_10 value: 44.68 - type: map_at_20 value: 45.051 - type: map_at_100 value: 45.316 - type: map_at_1000 value: 45.364 - type: recall_at_1 value: 36.887 - type: recall_at_3 value: 50.05 - type: recall_at_5 value: 54.788000000000004 - type: recall_at_10 value: 60.711999999999996 - type: recall_at_20 value: 65.997 - type: recall_at_100 value: 76.696 - type: recall_at_1000 value: 88.371 - type: precision_at_1 value: 36.887 - type: precision_at_3 value: 16.683 - type: precision_at_5 value: 10.958 - type: precision_at_10 value: 6.071 - type: precision_at_20 value: 3.3000000000000003 - type: precision_at_100 value: 0.767 - type: precision_at_1000 value: 0.08800000000000001 - type: mrr_at_1 value: 36.9147 - type: mrr_at_3 value: 42.823699999999995 - type: mrr_at_5 value: 43.8985 - type: mrr_at_10 value: 44.6961 - type: mrr_at_20 value: 45.067 - type: mrr_at_100 value: 45.3318 - type: mrr_at_1000 value: 45.3801 - type: nauc_ndcg_at_1_max value: 42.8063 - type: nauc_ndcg_at_1_std value: -5.3001 - type: nauc_ndcg_at_1_diff1 value: 63.370099999999994 - type: nauc_ndcg_at_3_max value: 44.0649 - type: nauc_ndcg_at_3_std value: -4.0304 - type: nauc_ndcg_at_3_diff1 value: 57.7429 - type: nauc_ndcg_at_5_max value: 43.864799999999995 - type: nauc_ndcg_at_5_std value: -3.2800000000000002 - type: nauc_ndcg_at_5_diff1 value: 57.0472 - type: nauc_ndcg_at_10_max value: 43.614799999999995 - type: nauc_ndcg_at_10_std value: -2.424 - type: nauc_ndcg_at_10_diff1 value: 56.3498 - type: nauc_ndcg_at_20_max value: 43.6108 - type: nauc_ndcg_at_20_std value: -1.699 - type: nauc_ndcg_at_20_diff1 value: 56.2153 - type: nauc_ndcg_at_100_max value: 43.4705 - type: nauc_ndcg_at_100_std value: -0.7144 - type: nauc_ndcg_at_100_diff1 value: 56.0679 - type: nauc_ndcg_at_1000_max value: 43.6856 - type: nauc_ndcg_at_1000_std value: -0.7129 - type: nauc_ndcg_at_1000_diff1 value: 56.40540000000001 - type: nauc_map_at_1_max value: 42.8063 - type: nauc_map_at_1_std value: -5.3001 - type: nauc_map_at_1_diff1 value: 63.370099999999994 - type: nauc_map_at_3_max value: 43.797999999999995 - type: nauc_map_at_3_std value: -4.3491 - type: nauc_map_at_3_diff1 value: 59.0673 - type: nauc_map_at_5_max value: 43.6812 - type: nauc_map_at_5_std value: -3.9397 - type: nauc_map_at_5_diff1 value: 58.6982 - type: nauc_map_at_10_max value: 43.5745 - type: nauc_map_at_10_std value: -3.6122 - type: nauc_map_at_10_diff1 value: 58.431999999999995 - type: nauc_map_at_20_max value: 43.573 - type: nauc_map_at_20_std value: -3.4323 - type: nauc_map_at_20_diff1 value: 58.4168 - type: nauc_map_at_100_max value: 43.5448 - type: nauc_map_at_100_std value: -3.3167 - type: nauc_map_at_100_diff1 value: 58.394999999999996 - type: nauc_map_at_1000_max value: 43.5506 - type: nauc_map_at_1000_std value: -3.3144 - type: nauc_map_at_1000_diff1 value: 58.4057 - type: nauc_recall_at_1_max value: 42.8063 - type: nauc_recall_at_1_std value: -5.3001 - type: nauc_recall_at_1_diff1 value: 63.370099999999994 - type: nauc_recall_at_3_max value: 44.8286 - type: nauc_recall_at_3_std value: -3.0949999999999998 - type: nauc_recall_at_3_diff1 value: 53.8907 - type: nauc_recall_at_5_max value: 44.3801 - type: nauc_recall_at_5_std value: -1.1593 - type: nauc_recall_at_5_diff1 value: 51.948899999999995 - type: nauc_recall_at_10_max value: 43.6005 - type: nauc_recall_at_10_std value: 1.9532999999999998 - type: nauc_recall_at_10_diff1 value: 49.2211 - type: nauc_recall_at_20_max value: 43.5839 - type: nauc_recall_at_20_std value: 5.8288 - type: nauc_recall_at_20_diff1 value: 47.7761 - type: nauc_recall_at_100_max value: 42.6633 - type: nauc_recall_at_100_std value: 16.4317 - type: nauc_recall_at_100_diff1 value: 44.0676 - type: nauc_recall_at_1000_max value: 46.698 - type: nauc_recall_at_1000_std value: 30.054799999999997 - type: nauc_recall_at_1000_diff1 value: 41.5816 - type: nauc_precision_at_1_max value: 42.8063 - type: nauc_precision_at_1_std value: -5.3001 - type: nauc_precision_at_1_diff1 value: 63.370099999999994 - type: nauc_precision_at_3_max value: 44.8286 - type: nauc_precision_at_3_std value: -3.0949999999999998 - type: nauc_precision_at_3_diff1 value: 53.8907 - type: nauc_precision_at_5_max value: 44.3801 - type: nauc_precision_at_5_std value: -1.1593 - type: nauc_precision_at_5_diff1 value: 51.948899999999995 - type: nauc_precision_at_10_max value: 43.6005 - type: nauc_precision_at_10_std value: 1.9532999999999998 - type: nauc_precision_at_10_diff1 value: 49.2211 - type: nauc_precision_at_20_max value: 43.5839 - type: nauc_precision_at_20_std value: 5.8288 - type: nauc_precision_at_20_diff1 value: 47.7761 - type: nauc_precision_at_100_max value: 42.6633 - type: nauc_precision_at_100_std value: 16.4317 - type: nauc_precision_at_100_diff1 value: 44.0676 - type: nauc_precision_at_1000_max value: 46.698 - type: nauc_precision_at_1000_std value: 30.054799999999997 - type: nauc_precision_at_1000_diff1 value: 41.5816 - type: nauc_mrr_at_1_max value: 42.7425 - type: nauc_mrr_at_1_std value: -5.2358 - type: nauc_mrr_at_1_diff1 value: 63.285199999999996 - type: nauc_mrr_at_3_max value: 43.763200000000005 - type: nauc_mrr_at_3_std value: -4.2973 - type: nauc_mrr_at_3_diff1 value: 59.031 - type: nauc_mrr_at_5_max value: 43.650800000000004 - type: nauc_mrr_at_5_std value: -3.8918 - type: nauc_mrr_at_5_diff1 value: 58.6636 - type: nauc_mrr_at_10_max value: 43.5429 - type: nauc_mrr_at_10_std value: -3.5659000000000005 - type: nauc_mrr_at_10_diff1 value: 58.3946 - type: nauc_mrr_at_20_max value: 43.5411 - type: nauc_mrr_at_20_std value: -3.3855000000000004 - type: nauc_mrr_at_20_diff1 value: 58.379099999999994 - type: nauc_mrr_at_100_max value: 43.5128 - type: nauc_mrr_at_100_std value: -3.2696000000000005 - type: nauc_mrr_at_100_diff1 value: 58.3572 - type: nauc_mrr_at_1000_max value: 43.5186 - type: nauc_mrr_at_1000_std value: -3.2672 - type: nauc_mrr_at_1000_diff1 value: 58.3678 - type: main_score value: 48.54 - task: type: Retrieval dataset: name: MTEB COIRCodeSearchNetRetrieval (php) type: CoIR-Retrieval/CodeSearchNet config: php split: test revision: 4adc7bc41202b5c13543c9c886a25f340634dab3 metrics: - type: ndcg_at_1 value: 30.734 - type: ndcg_at_3 value: 38.155 - type: ndcg_at_5 value: 40.306999999999995 - type: ndcg_at_10 value: 42.510999999999996 - type: ndcg_at_20 value: 44.156 - type: ndcg_at_100 value: 46.641 - type: ndcg_at_1000 value: 48.359 - type: map_at_1 value: 30.734 - type: map_at_3 value: 36.347 - type: map_at_5 value: 37.539 - type: map_at_10 value: 38.455 - type: map_at_20 value: 38.906 - type: map_at_100 value: 39.24 - type: map_at_1000 value: 39.300000000000004 - type: recall_at_1 value: 30.734 - type: recall_at_3 value: 43.378 - type: recall_at_5 value: 48.616 - type: recall_at_10 value: 55.395 - type: recall_at_20 value: 61.91 - type: recall_at_100 value: 75.432 - type: recall_at_1000 value: 89.254 - type: precision_at_1 value: 30.734 - type: precision_at_3 value: 14.459 - type: precision_at_5 value: 9.722999999999999 - type: precision_at_10 value: 5.539000000000001 - type: precision_at_20 value: 3.0949999999999998 - type: precision_at_100 value: 0.754 - type: precision_at_1000 value: 0.089 - type: mrr_at_1 value: 30.6907 - type: mrr_at_3 value: 36.3137 - type: mrr_at_5 value: 37.5121 - type: mrr_at_10 value: 38.4289 - type: mrr_at_20 value: 38.8786 - type: mrr_at_100 value: 39.2136 - type: mrr_at_1000 value: 39.2729 - type: nauc_ndcg_at_1_max value: 36.8055 - type: nauc_ndcg_at_1_std value: -1.5909 - type: nauc_ndcg_at_1_diff1 value: 55.9244 - type: nauc_ndcg_at_3_max value: 38.4262 - type: nauc_ndcg_at_3_std value: 0.5292 - type: nauc_ndcg_at_3_diff1 value: 49.7477 - type: nauc_ndcg_at_5_max value: 38.0552 - type: nauc_ndcg_at_5_std value: 1.102 - type: nauc_ndcg_at_5_diff1 value: 48.5308 - type: nauc_ndcg_at_10_max value: 38.0054 - type: nauc_ndcg_at_10_std value: 1.9313 - type: nauc_ndcg_at_10_diff1 value: 48.016999999999996 - type: nauc_ndcg_at_20_max value: 37.8808 - type: nauc_ndcg_at_20_std value: 2.56 - type: nauc_ndcg_at_20_diff1 value: 47.5649 - type: nauc_ndcg_at_100_max value: 38.3754 - type: nauc_ndcg_at_100_std value: 3.6703 - type: nauc_ndcg_at_100_diff1 value: 47.6154 - type: nauc_ndcg_at_1000_max value: 38.534600000000005 - type: nauc_ndcg_at_1000_std value: 3.7317000000000005 - type: nauc_ndcg_at_1000_diff1 value: 48.0299 - type: nauc_map_at_1_max value: 36.8055 - type: nauc_map_at_1_std value: -1.5909 - type: nauc_map_at_1_diff1 value: 55.9244 - type: nauc_map_at_3_max value: 38.0383 - type: nauc_map_at_3_std value: 0.0207 - type: nauc_map_at_3_diff1 value: 51.137299999999996 - type: nauc_map_at_5_max value: 37.8223 - type: nauc_map_at_5_std value: 0.3179 - type: nauc_map_at_5_diff1 value: 50.4641 - type: nauc_map_at_10_max value: 37.8022 - type: nauc_map_at_10_std value: 0.6617999999999999 - type: nauc_map_at_10_diff1 value: 50.269 - type: nauc_map_at_20_max value: 37.7686 - type: nauc_map_at_20_std value: 0.8326999999999999 - type: nauc_map_at_20_diff1 value: 50.153499999999994 - type: nauc_map_at_100_max value: 37.832300000000004 - type: nauc_map_at_100_std value: 0.9767 - type: nauc_map_at_100_diff1 value: 50.174099999999996 - type: nauc_map_at_1000_max value: 37.838300000000004 - type: nauc_map_at_1000_std value: 0.9815 - type: nauc_map_at_1000_diff1 value: 50.1882 - type: nauc_recall_at_1_max value: 36.8055 - type: nauc_recall_at_1_std value: -1.5909 - type: nauc_recall_at_1_diff1 value: 55.9244 - type: nauc_recall_at_3_max value: 39.5304 - type: nauc_recall_at_3_std value: 1.9767 - type: nauc_recall_at_3_diff1 value: 45.8281 - type: nauc_recall_at_5_max value: 38.6851 - type: nauc_recall_at_5_std value: 3.4711 - type: nauc_recall_at_5_diff1 value: 42.8172 - type: nauc_recall_at_10_max value: 38.5524 - type: nauc_recall_at_10_std value: 6.2315000000000005 - type: nauc_recall_at_10_diff1 value: 40.801 - type: nauc_recall_at_20_max value: 38.048300000000005 - type: nauc_recall_at_20_std value: 9.3045 - type: nauc_recall_at_20_diff1 value: 38.222 - type: nauc_recall_at_100_max value: 42.054399999999994 - type: nauc_recall_at_100_std value: 20.4425 - type: nauc_recall_at_100_diff1 value: 35.0773 - type: nauc_recall_at_1000_max value: 49.2856 - type: nauc_recall_at_1000_std value: 38.4529 - type: nauc_recall_at_1000_diff1 value: 31.7647 - type: nauc_precision_at_1_max value: 36.8055 - type: nauc_precision_at_1_std value: -1.5909 - type: nauc_precision_at_1_diff1 value: 55.9244 - type: nauc_precision_at_3_max value: 39.5304 - type: nauc_precision_at_3_std value: 1.9767 - type: nauc_precision_at_3_diff1 value: 45.8281 - type: nauc_precision_at_5_max value: 38.6851 - type: nauc_precision_at_5_std value: 3.4711 - type: nauc_precision_at_5_diff1 value: 42.8172 - type: nauc_precision_at_10_max value: 38.5524 - type: nauc_precision_at_10_std value: 6.2315000000000005 - type: nauc_precision_at_10_diff1 value: 40.801 - type: nauc_precision_at_20_max value: 38.048300000000005 - type: nauc_precision_at_20_std value: 9.3045 - type: nauc_precision_at_20_diff1 value: 38.222 - type: nauc_precision_at_100_max value: 42.054399999999994 - type: nauc_precision_at_100_std value: 20.4425 - type: nauc_precision_at_100_diff1 value: 35.0773 - type: nauc_precision_at_1000_max value: 49.2856 - type: nauc_precision_at_1000_std value: 38.4529 - type: nauc_precision_at_1000_diff1 value: 31.7647 - type: nauc_mrr_at_1_max value: 36.8365 - type: nauc_mrr_at_1_std value: -1.4754 - type: nauc_mrr_at_1_diff1 value: 56.0597 - type: nauc_mrr_at_3_max value: 38.054 - type: nauc_mrr_at_3_std value: 0.09430000000000001 - type: nauc_mrr_at_3_diff1 value: 51.2016 - type: nauc_mrr_at_5_max value: 37.8431 - type: nauc_mrr_at_5_std value: 0.3829 - type: nauc_mrr_at_5_diff1 value: 50.5285 - type: nauc_mrr_at_10_max value: 37.8231 - type: nauc_mrr_at_10_std value: 0.7271 - type: nauc_mrr_at_10_diff1 value: 50.333099999999995 - type: nauc_mrr_at_20_max value: 37.7905 - type: nauc_mrr_at_20_std value: 0.8992999999999999 - type: nauc_mrr_at_20_diff1 value: 50.2181 - type: nauc_mrr_at_100_max value: 37.853500000000004 - type: nauc_mrr_at_100_std value: 1.0428 - type: nauc_mrr_at_100_diff1 value: 50.239 - type: nauc_mrr_at_1000_max value: 37.859500000000004 - type: nauc_mrr_at_1000_std value: 1.0477 - type: nauc_mrr_at_1000_diff1 value: 50.2532 - type: main_score value: 42.510999999999996 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval (default) type: mteb/cqadupstack-android config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: ndcg_at_1 value: 42.918 - type: ndcg_at_3 value: 47.992000000000004 - type: ndcg_at_5 value: 50.298 - type: ndcg_at_10 value: 53.047999999999995 - type: ndcg_at_20 value: 55.36600000000001 - type: ndcg_at_100 value: 58.18 - type: ndcg_at_1000 value: 59.992999999999995 - type: map_at_1 value: 35.147 - type: map_at_3 value: 42.985 - type: map_at_5 value: 44.895 - type: map_at_10 value: 46.568 - type: map_at_20 value: 47.527 - type: map_at_100 value: 48.178 - type: map_at_1000 value: 48.303000000000004 - type: recall_at_1 value: 35.147 - type: recall_at_3 value: 50.229 - type: recall_at_5 value: 56.586999999999996 - type: recall_at_10 value: 64.656 - type: recall_at_20 value: 72.875 - type: recall_at_100 value: 85.397 - type: recall_at_1000 value: 96.799 - type: precision_at_1 value: 42.918 - type: precision_at_3 value: 22.698999999999998 - type: precision_at_5 value: 16.309 - type: precision_at_10 value: 10.100000000000001 - type: precision_at_20 value: 6.0440000000000005 - type: precision_at_100 value: 1.5890000000000002 - type: precision_at_1000 value: 0.209 - type: mrr_at_1 value: 42.9185 - type: mrr_at_3 value: 50.1907 - type: mrr_at_5 value: 51.9003 - type: mrr_at_10 value: 52.824400000000004 - type: mrr_at_20 value: 53.3002 - type: mrr_at_100 value: 53.5134 - type: mrr_at_1000 value: 53.5569 - type: nauc_ndcg_at_1_max value: 45.115300000000005 - type: nauc_ndcg_at_1_std value: -5.3469999999999995 - type: nauc_ndcg_at_1_diff1 value: 50.792899999999996 - type: nauc_ndcg_at_3_max value: 44.379000000000005 - type: nauc_ndcg_at_3_std value: -2.628 - type: nauc_ndcg_at_3_diff1 value: 45.6678 - type: nauc_ndcg_at_5_max value: 44.8852 - type: nauc_ndcg_at_5_std value: -1.7051 - type: nauc_ndcg_at_5_diff1 value: 46.0814 - type: nauc_ndcg_at_10_max value: 43.969500000000004 - type: nauc_ndcg_at_10_std value: -0.4902 - type: nauc_ndcg_at_10_diff1 value: 46.2439 - type: nauc_ndcg_at_20_max value: 44.588499999999996 - type: nauc_ndcg_at_20_std value: 0.5193 - type: nauc_ndcg_at_20_diff1 value: 45.9229 - type: nauc_ndcg_at_100_max value: 45.0779 - type: nauc_ndcg_at_100_std value: 1.1967999999999999 - type: nauc_ndcg_at_100_diff1 value: 46.090199999999996 - type: nauc_ndcg_at_1000_max value: 45.082 - type: nauc_ndcg_at_1000_std value: 0.3457 - type: nauc_ndcg_at_1000_diff1 value: 46.366 - type: nauc_map_at_1_max value: 38.731 - type: nauc_map_at_1_std value: -7.1701 - type: nauc_map_at_1_diff1 value: 52.0087 - type: nauc_map_at_3_max value: 42.126799999999996 - type: nauc_map_at_3_std value: -4.8249 - type: nauc_map_at_3_diff1 value: 47.7841 - type: nauc_map_at_5_max value: 43.2155 - type: nauc_map_at_5_std value: -3.9702 - type: nauc_map_at_5_diff1 value: 47.9376 - type: nauc_map_at_10_max value: 43.4398 - type: nauc_map_at_10_std value: -2.8201 - type: nauc_map_at_10_diff1 value: 47.9726 - type: nauc_map_at_20_max value: 43.9625 - type: nauc_map_at_20_std value: -2.4088 - type: nauc_map_at_20_diff1 value: 47.7323 - type: nauc_map_at_100_max value: 44.0439 - type: nauc_map_at_100_std value: -2.1932 - type: nauc_map_at_100_diff1 value: 47.672399999999996 - type: nauc_map_at_1000_max value: 44.059599999999996 - type: nauc_map_at_1000_std value: -2.2453999999999996 - type: nauc_map_at_1000_diff1 value: 47.6659 - type: nauc_recall_at_1_max value: 38.731 - type: nauc_recall_at_1_std value: -7.1701 - type: nauc_recall_at_1_diff1 value: 52.0087 - type: nauc_recall_at_3_max value: 40.5229 - type: nauc_recall_at_3_std value: -1.3240999999999998 - type: nauc_recall_at_3_diff1 value: 41.1764 - type: nauc_recall_at_5_max value: 41.248000000000005 - type: nauc_recall_at_5_std value: 1.4647999999999999 - type: nauc_recall_at_5_diff1 value: 41.044799999999995 - type: nauc_recall_at_10_max value: 38.6375 - type: nauc_recall_at_10_std value: 5.3439 - type: nauc_recall_at_10_diff1 value: 39.8162 - type: nauc_recall_at_20_max value: 39.6813 - type: nauc_recall_at_20_std value: 11.1138 - type: nauc_recall_at_20_diff1 value: 36.8881 - type: nauc_recall_at_100_max value: 44.9346 - type: nauc_recall_at_100_std value: 22.5203 - type: nauc_recall_at_100_diff1 value: 34.8792 - type: nauc_recall_at_1000_max value: 52.49979999999999 - type: nauc_recall_at_1000_std value: 50.954299999999996 - type: nauc_recall_at_1000_diff1 value: 36.1016 - type: nauc_precision_at_1_max value: 45.115300000000005 - type: nauc_precision_at_1_std value: -5.3469999999999995 - type: nauc_precision_at_1_diff1 value: 50.792899999999996 - type: nauc_precision_at_3_max value: 41.841 - type: nauc_precision_at_3_std value: 3.3930000000000002 - type: nauc_precision_at_3_diff1 value: 27.495399999999997 - type: nauc_precision_at_5_max value: 38.527 - type: nauc_precision_at_5_std value: 8.2496 - type: nauc_precision_at_5_diff1 value: 19.3628 - type: nauc_precision_at_10_max value: 27.5499 - type: nauc_precision_at_10_std value: 13.264100000000001 - type: nauc_precision_at_10_diff1 value: 9.9718 - type: nauc_precision_at_20_max value: 21.431 - type: nauc_precision_at_20_std value: 14.426400000000001 - type: nauc_precision_at_20_diff1 value: -0.11030000000000001 - type: nauc_precision_at_100_max value: 6.8088 - type: nauc_precision_at_100_std value: 9.8979 - type: nauc_precision_at_100_diff1 value: -10.1603 - type: nauc_precision_at_1000_max value: -6.4949 - type: nauc_precision_at_1000_std value: -3.9967999999999995 - type: nauc_precision_at_1000_diff1 value: -17.765800000000002 - type: nauc_mrr_at_1_max value: 45.115300000000005 - type: nauc_mrr_at_1_std value: -5.3469999999999995 - type: nauc_mrr_at_1_diff1 value: 50.792899999999996 - type: nauc_mrr_at_3_max value: 45.8581 - type: nauc_mrr_at_3_std value: -2.9239 - type: nauc_mrr_at_3_diff1 value: 47.079 - type: nauc_mrr_at_5_max value: 45.5453 - type: nauc_mrr_at_5_std value: -2.2778 - type: nauc_mrr_at_5_diff1 value: 47.0394 - type: nauc_mrr_at_10_max value: 45.2727 - type: nauc_mrr_at_10_std value: -2.1793 - type: nauc_mrr_at_10_diff1 value: 46.7719 - type: nauc_mrr_at_20_max value: 45.232 - type: nauc_mrr_at_20_std value: -2.0842 - type: nauc_mrr_at_20_diff1 value: 46.75 - type: nauc_mrr_at_100_max value: 45.3233 - type: nauc_mrr_at_100_std value: -2.0778000000000003 - type: nauc_mrr_at_100_diff1 value: 46.7919 - type: nauc_mrr_at_1000_max value: 45.325700000000005 - type: nauc_mrr_at_1000_std value: -2.0868 - type: nauc_mrr_at_1000_diff1 value: 46.812799999999996 - type: main_score value: 53.047999999999995 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval (default) type: mteb/cqadupstack-english config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: ndcg_at_1 value: 35.796 - type: ndcg_at_3 value: 40.036 - type: ndcg_at_5 value: 41.778 - type: ndcg_at_10 value: 43.868 - type: ndcg_at_20 value: 45.777 - type: ndcg_at_100 value: 48.771 - type: ndcg_at_1000 value: 51.001 - type: map_at_1 value: 28.177000000000003 - type: map_at_3 value: 35.445 - type: map_at_5 value: 36.976 - type: map_at_10 value: 38.25 - type: map_at_20 value: 38.981 - type: map_at_100 value: 39.585 - type: map_at_1000 value: 39.728 - type: recall_at_1 value: 28.177000000000003 - type: recall_at_3 value: 41.782000000000004 - type: recall_at_5 value: 46.861000000000004 - type: recall_at_10 value: 53.464 - type: recall_at_20 value: 60.621 - type: recall_at_100 value: 74.628 - type: recall_at_1000 value: 88.839 - type: precision_at_1 value: 35.796 - type: precision_at_3 value: 19.639 - type: precision_at_5 value: 13.924 - type: precision_at_10 value: 8.439 - type: precision_at_20 value: 5.016 - type: precision_at_100 value: 1.394 - type: precision_at_1000 value: 0.189 - type: mrr_at_1 value: 35.7962 - type: mrr_at_3 value: 42.1019 - type: mrr_at_5 value: 43.4172 - type: mrr_at_10 value: 44.2407 - type: mrr_at_20 value: 44.6907 - type: mrr_at_100 value: 45.0075 - type: mrr_at_1000 value: 45.059 - type: nauc_ndcg_at_1_max value: 47.856 - type: nauc_ndcg_at_1_std value: 3.0363 - type: nauc_ndcg_at_1_diff1 value: 48.7364 - type: nauc_ndcg_at_3_max value: 49.2728 - type: nauc_ndcg_at_3_std value: 4.1776 - type: nauc_ndcg_at_3_diff1 value: 45.1449 - type: nauc_ndcg_at_5_max value: 49.5649 - type: nauc_ndcg_at_5_std value: 3.7340999999999998 - type: nauc_ndcg_at_5_diff1 value: 44.6651 - type: nauc_ndcg_at_10_max value: 50.1977 - type: nauc_ndcg_at_10_std value: 4.5302 - type: nauc_ndcg_at_10_diff1 value: 45.0403 - type: nauc_ndcg_at_20_max value: 49.9326 - type: nauc_ndcg_at_20_std value: 5.5147 - type: nauc_ndcg_at_20_diff1 value: 44.5055 - type: nauc_ndcg_at_100_max value: 50.3035 - type: nauc_ndcg_at_100_std value: 7.1086 - type: nauc_ndcg_at_100_diff1 value: 44.451 - type: nauc_ndcg_at_1000_max value: 50.1836 - type: nauc_ndcg_at_1000_std value: 7.4503 - type: nauc_ndcg_at_1000_diff1 value: 44.301899999999996 - type: nauc_map_at_1_max value: 41.2555 - type: nauc_map_at_1_std value: -5.2668 - type: nauc_map_at_1_diff1 value: 52.0284 - type: nauc_map_at_3_max value: 46.6939 - type: nauc_map_at_3_std value: -0.8533000000000001 - type: nauc_map_at_3_diff1 value: 47.9095 - type: nauc_map_at_5_max value: 47.5024 - type: nauc_map_at_5_std value: -0.05109999999999999 - type: nauc_map_at_5_diff1 value: 47.1421 - type: nauc_map_at_10_max value: 48.1632 - type: nauc_map_at_10_std value: 0.8672 - type: nauc_map_at_10_diff1 value: 46.9929 - type: nauc_map_at_20_max value: 48.2708 - type: nauc_map_at_20_std value: 1.5195 - type: nauc_map_at_20_diff1 value: 46.7349 - type: nauc_map_at_100_max value: 48.5516 - type: nauc_map_at_100_std value: 2.1593 - type: nauc_map_at_100_diff1 value: 46.6641 - type: nauc_map_at_1000_max value: 48.6017 - type: nauc_map_at_1000_std value: 2.2745 - type: nauc_map_at_1000_diff1 value: 46.649 - type: nauc_recall_at_1_max value: 41.2555 - type: nauc_recall_at_1_std value: -5.2668 - type: nauc_recall_at_1_diff1 value: 52.0284 - type: nauc_recall_at_3_max value: 47.0403 - type: nauc_recall_at_3_std value: 1.5399 - type: nauc_recall_at_3_diff1 value: 42.998599999999996 - type: nauc_recall_at_5_max value: 47.7652 - type: nauc_recall_at_5_std value: 2.5079000000000002 - type: nauc_recall_at_5_diff1 value: 40.131099999999996 - type: nauc_recall_at_10_max value: 49.215199999999996 - type: nauc_recall_at_10_std value: 5.6207 - type: nauc_recall_at_10_diff1 value: 40.0067 - type: nauc_recall_at_20_max value: 47.6907 - type: nauc_recall_at_20_std value: 10.0091 - type: nauc_recall_at_20_diff1 value: 36.548 - type: nauc_recall_at_100_max value: 49.8978 - type: nauc_recall_at_100_std value: 20.7533 - type: nauc_recall_at_100_diff1 value: 34.463100000000004 - type: nauc_recall_at_1000_max value: 49.2751 - type: nauc_recall_at_1000_std value: 33.7021 - type: nauc_recall_at_1000_diff1 value: 27.995199999999997 - type: nauc_precision_at_1_max value: 47.856 - type: nauc_precision_at_1_std value: 3.0363 - type: nauc_precision_at_1_diff1 value: 48.7364 - type: nauc_precision_at_3_max value: 48.0591 - type: nauc_precision_at_3_std value: 16.0079 - type: nauc_precision_at_3_diff1 value: 28.286099999999998 - type: nauc_precision_at_5_max value: 45.3901 - type: nauc_precision_at_5_std value: 18.939500000000002 - type: nauc_precision_at_5_diff1 value: 20.7183 - type: nauc_precision_at_10_max value: 40.2901 - type: nauc_precision_at_10_std value: 24.1368 - type: nauc_precision_at_10_diff1 value: 13.1708 - type: nauc_precision_at_20_max value: 34.5736 - type: nauc_precision_at_20_std value: 28.524 - type: nauc_precision_at_20_diff1 value: 6.0857 - type: nauc_precision_at_100_max value: 24.0575 - type: nauc_precision_at_100_std value: 32.7048 - type: nauc_precision_at_100_diff1 value: -4.175800000000001 - type: nauc_precision_at_1000_max value: 11.3804 - type: nauc_precision_at_1000_std value: 28.917700000000004 - type: nauc_precision_at_1000_diff1 value: -11.994100000000001 - type: nauc_mrr_at_1_max value: 47.856 - type: nauc_mrr_at_1_std value: 3.0363 - type: nauc_mrr_at_1_diff1 value: 48.7364 - type: nauc_mrr_at_3_max value: 50.048 - type: nauc_mrr_at_3_std value: 6.464300000000001 - type: nauc_mrr_at_3_diff1 value: 45.5115 - type: nauc_mrr_at_5_max value: 50.0947 - type: nauc_mrr_at_5_std value: 6.3483 - type: nauc_mrr_at_5_diff1 value: 44.8476 - type: nauc_mrr_at_10_max value: 50.244699999999995 - type: nauc_mrr_at_10_std value: 6.666900000000001 - type: nauc_mrr_at_10_diff1 value: 45.0222 - type: nauc_mrr_at_20_max value: 50.1332 - type: nauc_mrr_at_20_std value: 6.868200000000001 - type: nauc_mrr_at_20_diff1 value: 44.8895 - type: nauc_mrr_at_100_max value: 50.1173 - type: nauc_mrr_at_100_std value: 6.930600000000001 - type: nauc_mrr_at_100_diff1 value: 44.8887 - type: nauc_mrr_at_1000_max value: 50.11259999999999 - type: nauc_mrr_at_1000_std value: 6.923799999999999 - type: nauc_mrr_at_1000_diff1 value: 44.8928 - type: main_score value: 43.868 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval (default) type: mteb/cqadupstack-gaming config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: ndcg_at_1 value: 43.448 - type: ndcg_at_3 value: 51.032999999999994 - type: ndcg_at_5 value: 53.73 - type: ndcg_at_10 value: 56.369 - type: ndcg_at_20 value: 58.167 - type: ndcg_at_100 value: 60.28 - type: ndcg_at_1000 value: 61.511 - type: map_at_1 value: 38.115 - type: map_at_3 value: 47.355999999999995 - type: map_at_5 value: 49.221 - type: map_at_10 value: 50.57000000000001 - type: map_at_20 value: 51.2 - type: map_at_100 value: 51.568999999999996 - type: map_at_1000 value: 51.627 - type: recall_at_1 value: 38.115 - type: recall_at_3 value: 55.733 - type: recall_at_5 value: 62.41100000000001 - type: recall_at_10 value: 70.11800000000001 - type: recall_at_20 value: 76.714 - type: recall_at_100 value: 87.071 - type: recall_at_1000 value: 95.921 - type: precision_at_1 value: 43.448 - type: precision_at_3 value: 22.947 - type: precision_at_5 value: 15.799 - type: precision_at_10 value: 9.154 - type: precision_at_20 value: 5.141 - type: precision_at_100 value: 1.196 - type: precision_at_1000 value: 0.135 - type: mrr_at_1 value: 43.4483 - type: mrr_at_3 value: 51.3689 - type: mrr_at_5 value: 52.8955 - type: mrr_at_10 value: 53.809200000000004 - type: mrr_at_20 value: 54.224700000000006 - type: mrr_at_100 value: 54.4617 - type: mrr_at_1000 value: 54.49079999999999 - type: nauc_ndcg_at_1_max value: 41.9268 - type: nauc_ndcg_at_1_std value: -6.0252 - type: nauc_ndcg_at_1_diff1 value: 55.4978 - type: nauc_ndcg_at_3_max value: 43.5492 - type: nauc_ndcg_at_3_std value: -4.7010000000000005 - type: nauc_ndcg_at_3_diff1 value: 51.0898 - type: nauc_ndcg_at_5_max value: 44.7544 - type: nauc_ndcg_at_5_std value: -2.9584 - type: nauc_ndcg_at_5_diff1 value: 50.6481 - type: nauc_ndcg_at_10_max value: 45.2203 - type: nauc_ndcg_at_10_std value: -1.6934 - type: nauc_ndcg_at_10_diff1 value: 49.9874 - type: nauc_ndcg_at_20_max value: 45.002199999999995 - type: nauc_ndcg_at_20_std value: -0.9383 - type: nauc_ndcg_at_20_diff1 value: 49.666700000000006 - type: nauc_ndcg_at_100_max value: 45.448699999999995 - type: nauc_ndcg_at_100_std value: -0.1934 - type: nauc_ndcg_at_100_diff1 value: 50.0483 - type: nauc_ndcg_at_1000_max value: 45.3335 - type: nauc_ndcg_at_1000_std value: -0.42389999999999994 - type: nauc_ndcg_at_1000_diff1 value: 50.5614 - type: nauc_map_at_1_max value: 35.7022 - type: nauc_map_at_1_std value: -6.6763 - type: nauc_map_at_1_diff1 value: 54.848699999999994 - type: nauc_map_at_3_max value: 41.5987 - type: nauc_map_at_3_std value: -6.3043000000000005 - type: nauc_map_at_3_diff1 value: 52.058400000000006 - type: nauc_map_at_5_max value: 42.5887 - type: nauc_map_at_5_std value: -5.0012 - type: nauc_map_at_5_diff1 value: 51.804300000000005 - type: nauc_map_at_10_max value: 43.085 - type: nauc_map_at_10_std value: -4.1721 - type: nauc_map_at_10_diff1 value: 51.524499999999996 - type: nauc_map_at_20_max value: 43.185 - type: nauc_map_at_20_std value: -3.6862 - type: nauc_map_at_20_diff1 value: 51.4297 - type: nauc_map_at_100_max value: 43.3473 - type: nauc_map_at_100_std value: -3.4286999999999996 - type: nauc_map_at_100_diff1 value: 51.497099999999996 - type: nauc_map_at_1000_max value: 43.358799999999995 - type: nauc_map_at_1000_std value: -3.3894 - type: nauc_map_at_1000_diff1 value: 51.5155 - type: nauc_recall_at_1_max value: 35.7022 - type: nauc_recall_at_1_std value: -6.6763 - type: nauc_recall_at_1_diff1 value: 54.848699999999994 - type: nauc_recall_at_3_max value: 42.9096 - type: nauc_recall_at_3_std value: -5.9907 - type: nauc_recall_at_3_diff1 value: 47.407 - type: nauc_recall_at_5_max value: 45.9891 - type: nauc_recall_at_5_std value: -0.5341 - type: nauc_recall_at_5_diff1 value: 45.336 - type: nauc_recall_at_10_max value: 47.457899999999995 - type: nauc_recall_at_10_std value: 4.2982 - type: nauc_recall_at_10_diff1 value: 41.6 - type: nauc_recall_at_20_max value: 47.3364 - type: nauc_recall_at_20_std value: 9.667100000000001 - type: nauc_recall_at_20_diff1 value: 38.4822 - type: nauc_recall_at_100_max value: 52.0554 - type: nauc_recall_at_100_std value: 21.6585 - type: nauc_recall_at_100_diff1 value: 35.2361 - type: nauc_recall_at_1000_max value: 62.38590000000001 - type: nauc_recall_at_1000_std value: 42.5442 - type: nauc_recall_at_1000_diff1 value: 37.1857 - type: nauc_precision_at_1_max value: 41.9268 - type: nauc_precision_at_1_std value: -6.0252 - type: nauc_precision_at_1_diff1 value: 55.4978 - type: nauc_precision_at_3_max value: 44.0934 - type: nauc_precision_at_3_std value: 2.4657 - type: nauc_precision_at_3_diff1 value: 33.468399999999995 - type: nauc_precision_at_5_max value: 41.8649 - type: nauc_precision_at_5_std value: 8.4992 - type: nauc_precision_at_5_diff1 value: 25.8132 - type: nauc_precision_at_10_max value: 36.8909 - type: nauc_precision_at_10_std value: 15.173200000000001 - type: nauc_precision_at_10_diff1 value: 16.0022 - type: nauc_precision_at_20_max value: 31.3774 - type: nauc_precision_at_20_std value: 21.304100000000002 - type: nauc_precision_at_20_diff1 value: 7.8406 - type: nauc_precision_at_100_max value: 23.828 - type: nauc_precision_at_100_std value: 27.3387 - type: nauc_precision_at_100_diff1 value: -0.5574 - type: nauc_precision_at_1000_max value: 14.3787 - type: nauc_precision_at_1000_std value: 27.8714 - type: nauc_precision_at_1000_diff1 value: -6.372400000000001 - type: nauc_mrr_at_1_max value: 41.9268 - type: nauc_mrr_at_1_std value: -6.0252 - type: nauc_mrr_at_1_diff1 value: 55.4978 - type: nauc_mrr_at_3_max value: 44.3228 - type: nauc_mrr_at_3_std value: -4.8039 - type: nauc_mrr_at_3_diff1 value: 52.6895 - type: nauc_mrr_at_5_max value: 45.0053 - type: nauc_mrr_at_5_std value: -3.5381000000000005 - type: nauc_mrr_at_5_diff1 value: 52.321 - type: nauc_mrr_at_10_max value: 44.9242 - type: nauc_mrr_at_10_std value: -3.2841 - type: nauc_mrr_at_10_diff1 value: 52.0518 - type: nauc_mrr_at_20_max value: 44.8189 - type: nauc_mrr_at_20_std value: -3.1717000000000004 - type: nauc_mrr_at_20_diff1 value: 52.0415 - type: nauc_mrr_at_100_max value: 44.8679 - type: nauc_mrr_at_100_std value: -3.1606 - type: nauc_mrr_at_100_diff1 value: 52.1083 - type: nauc_mrr_at_1000_max value: 44.864599999999996 - type: nauc_mrr_at_1000_std value: -3.167 - type: nauc_mrr_at_1000_diff1 value: 52.121399999999994 - type: main_score value: 56.369 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval (default) type: mteb/cqadupstack-gis config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: ndcg_at_1 value: 31.863999999999997 - type: ndcg_at_3 value: 38.537 - type: ndcg_at_5 value: 41.104 - type: ndcg_at_10 value: 43.503 - type: ndcg_at_20 value: 45.413 - type: ndcg_at_100 value: 48.291000000000004 - type: ndcg_at_1000 value: 50.26199999999999 - type: map_at_1 value: 29.37 - type: map_at_3 value: 35.824 - type: map_at_5 value: 37.408 - type: map_at_10 value: 38.452999999999996 - type: map_at_20 value: 39.004 - type: map_at_100 value: 39.421 - type: map_at_1000 value: 39.501 - type: recall_at_1 value: 29.37 - type: recall_at_3 value: 43.442 - type: recall_at_5 value: 49.551 - type: recall_at_10 value: 56.791000000000004 - type: recall_at_20 value: 63.93 - type: recall_at_100 value: 78.666 - type: recall_at_1000 value: 93.354 - type: precision_at_1 value: 31.863999999999997 - type: precision_at_3 value: 16.083 - type: precision_at_5 value: 11.254 - type: precision_at_10 value: 6.508 - type: precision_at_20 value: 3.712 - type: precision_at_100 value: 0.9390000000000001 - type: precision_at_1000 value: 0.11399999999999999 - type: mrr_at_1 value: 31.8644 - type: mrr_at_3 value: 38.5122 - type: mrr_at_5 value: 39.873799999999996 - type: mrr_at_10 value: 40.8308 - type: mrr_at_20 value: 41.3284 - type: mrr_at_100 value: 41.6819 - type: mrr_at_1000 value: 41.7416 - type: nauc_ndcg_at_1_max value: 33.7601 - type: nauc_ndcg_at_1_std value: -9.8717 - type: nauc_ndcg_at_1_diff1 value: 42.2537 - type: nauc_ndcg_at_3_max value: 34.409600000000005 - type: nauc_ndcg_at_3_std value: -10.6027 - type: nauc_ndcg_at_3_diff1 value: 40.0317 - type: nauc_ndcg_at_5_max value: 34.0482 - type: nauc_ndcg_at_5_std value: -9.0778 - type: nauc_ndcg_at_5_diff1 value: 39.421499999999995 - type: nauc_ndcg_at_10_max value: 34.5365 - type: nauc_ndcg_at_10_std value: -7.3511999999999995 - type: nauc_ndcg_at_10_diff1 value: 38.6886 - type: nauc_ndcg_at_20_max value: 35.335699999999996 - type: nauc_ndcg_at_20_std value: -5.9596 - type: nauc_ndcg_at_20_diff1 value: 38.6051 - type: nauc_ndcg_at_100_max value: 34.6961 - type: nauc_ndcg_at_100_std value: -6.5812 - type: nauc_ndcg_at_100_diff1 value: 37.8079 - type: nauc_ndcg_at_1000_max value: 34.3938 - type: nauc_ndcg_at_1000_std value: -6.9155 - type: nauc_ndcg_at_1000_diff1 value: 38.2247 - type: nauc_map_at_1_max value: 32.231500000000004 - type: nauc_map_at_1_std value: -11.4991 - type: nauc_map_at_1_diff1 value: 44.7044 - type: nauc_map_at_3_max value: 34.0411 - type: nauc_map_at_3_std value: -10.8111 - type: nauc_map_at_3_diff1 value: 41.6004 - type: nauc_map_at_5_max value: 33.9275 - type: nauc_map_at_5_std value: -9.9881 - type: nauc_map_at_5_diff1 value: 41.1704 - type: nauc_map_at_10_max value: 34.1806 - type: nauc_map_at_10_std value: -9.2606 - type: nauc_map_at_10_diff1 value: 40.9213 - type: nauc_map_at_20_max value: 34.474 - type: nauc_map_at_20_std value: -8.798599999999999 - type: nauc_map_at_20_diff1 value: 40.9088 - type: nauc_map_at_100_max value: 34.381699999999995 - type: nauc_map_at_100_std value: -8.869 - type: nauc_map_at_100_diff1 value: 40.7894 - type: nauc_map_at_1000_max value: 34.3718 - type: nauc_map_at_1000_std value: -8.8674 - type: nauc_map_at_1000_diff1 value: 40.801700000000004 - type: nauc_recall_at_1_max value: 32.231500000000004 - type: nauc_recall_at_1_std value: -11.4991 - type: nauc_recall_at_1_diff1 value: 44.7044 - type: nauc_recall_at_3_max value: 33.4997 - type: nauc_recall_at_3_std value: -10.793999999999999 - type: nauc_recall_at_3_diff1 value: 36.8971 - type: nauc_recall_at_5_max value: 33.217600000000004 - type: nauc_recall_at_5_std value: -7.4771 - type: nauc_recall_at_5_diff1 value: 35.7378 - type: nauc_recall_at_10_max value: 34.3881 - type: nauc_recall_at_10_std value: -1.9206 - type: nauc_recall_at_10_diff1 value: 33.024300000000004 - type: nauc_recall_at_20_max value: 37.1734 - type: nauc_recall_at_20_std value: 4.5757 - type: nauc_recall_at_20_diff1 value: 31.7119 - type: nauc_recall_at_100_max value: 33.3328 - type: nauc_recall_at_100_std value: 4.0235 - type: nauc_recall_at_100_diff1 value: 23.5836 - type: nauc_recall_at_1000_max value: 23.6203 - type: nauc_recall_at_1000_std value: 10.4212 - type: nauc_recall_at_1000_diff1 value: 16.5204 - type: nauc_precision_at_1_max value: 33.7601 - type: nauc_precision_at_1_std value: -9.8717 - type: nauc_precision_at_1_diff1 value: 42.2537 - type: nauc_precision_at_3_max value: 37.046099999999996 - type: nauc_precision_at_3_std value: -8.1696 - type: nauc_precision_at_3_diff1 value: 32.893699999999995 - type: nauc_precision_at_5_max value: 33.5411 - type: nauc_precision_at_5_std value: -3.8621000000000003 - type: nauc_precision_at_5_diff1 value: 28.4192 - type: nauc_precision_at_10_max value: 33.8177 - type: nauc_precision_at_10_std value: 1.4605 - type: nauc_precision_at_10_diff1 value: 23.8779 - type: nauc_precision_at_20_max value: 33.2362 - type: nauc_precision_at_20_std value: 6.8675 - type: nauc_precision_at_20_diff1 value: 19.12 - type: nauc_precision_at_100_max value: 22.0581 - type: nauc_precision_at_100_std value: 5.6537999999999995 - type: nauc_precision_at_100_diff1 value: 2.677 - type: nauc_precision_at_1000_max value: 6.4192 - type: nauc_precision_at_1000_std value: 5.2604999999999995 - type: nauc_precision_at_1000_diff1 value: -12.5191 - type: nauc_mrr_at_1_max value: 33.7601 - type: nauc_mrr_at_1_std value: -9.8717 - type: nauc_mrr_at_1_diff1 value: 42.2537 - type: nauc_mrr_at_3_max value: 34.590700000000005 - type: nauc_mrr_at_3_std value: -9.3063 - type: nauc_mrr_at_3_diff1 value: 39.157599999999995 - type: nauc_mrr_at_5_max value: 34.262 - type: nauc_mrr_at_5_std value: -8.6629 - type: nauc_mrr_at_5_diff1 value: 38.7425 - type: nauc_mrr_at_10_max value: 34.3456 - type: nauc_mrr_at_10_std value: -8.0433 - type: nauc_mrr_at_10_diff1 value: 38.474199999999996 - type: nauc_mrr_at_20_max value: 34.504400000000004 - type: nauc_mrr_at_20_std value: -7.7764 - type: nauc_mrr_at_20_diff1 value: 38.4646 - type: nauc_mrr_at_100_max value: 34.407700000000006 - type: nauc_mrr_at_100_std value: -7.8669 - type: nauc_mrr_at_100_diff1 value: 38.4062 - type: nauc_mrr_at_1000_max value: 34.400999999999996 - type: nauc_mrr_at_1000_std value: -7.8653 - type: nauc_mrr_at_1000_diff1 value: 38.4264 - type: main_score value: 43.503 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval (default) type: mteb/cqadupstack-mathematica config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: ndcg_at_1 value: 22.637 - type: ndcg_at_3 value: 26.865 - type: ndcg_at_5 value: 29.506 - type: ndcg_at_10 value: 32.024 - type: ndcg_at_20 value: 34.123999999999995 - type: ndcg_at_100 value: 38.013999999999996 - type: ndcg_at_1000 value: 40.681 - type: map_at_1 value: 18.354 - type: map_at_3 value: 23.777 - type: map_at_5 value: 25.380000000000003 - type: map_at_10 value: 26.588 - type: map_at_20 value: 27.227 - type: map_at_100 value: 27.851 - type: map_at_1000 value: 27.971 - type: recall_at_1 value: 18.354 - type: recall_at_3 value: 30.029 - type: recall_at_5 value: 36.716 - type: recall_at_10 value: 44.083 - type: recall_at_20 value: 51.653000000000006 - type: recall_at_100 value: 70.24000000000001 - type: recall_at_1000 value: 88.941 - type: precision_at_1 value: 22.637 - type: precision_at_3 value: 12.852 - type: precision_at_5 value: 9.652 - type: precision_at_10 value: 5.970000000000001 - type: precision_at_20 value: 3.557 - type: precision_at_100 value: 1.035 - type: precision_at_1000 value: 0.13899999999999998 - type: mrr_at_1 value: 22.6368 - type: mrr_at_3 value: 28.296 - type: mrr_at_5 value: 30.198999999999998 - type: mrr_at_10 value: 31.2411 - type: mrr_at_20 value: 31.773600000000002 - type: mrr_at_100 value: 32.230199999999996 - type: mrr_at_1000 value: 32.2949 - type: nauc_ndcg_at_1_max value: 31.0579 - type: nauc_ndcg_at_1_std value: -1.1154000000000002 - type: nauc_ndcg_at_1_diff1 value: 37.0188 - type: nauc_ndcg_at_3_max value: 30.6319 - type: nauc_ndcg_at_3_std value: 1.2079 - type: nauc_ndcg_at_3_diff1 value: 29.7055 - type: nauc_ndcg_at_5_max value: 29.2059 - type: nauc_ndcg_at_5_std value: 3.0105 - type: nauc_ndcg_at_5_diff1 value: 28.0947 - type: nauc_ndcg_at_10_max value: 29.2307 - type: nauc_ndcg_at_10_std value: 3.1515 - type: nauc_ndcg_at_10_diff1 value: 27.2115 - type: nauc_ndcg_at_20_max value: 29.1914 - type: nauc_ndcg_at_20_std value: 3.9833 - type: nauc_ndcg_at_20_diff1 value: 27.287899999999997 - type: nauc_ndcg_at_100_max value: 30.759999999999998 - type: nauc_ndcg_at_100_std value: 5.6163 - type: nauc_ndcg_at_100_diff1 value: 28.1445 - type: nauc_ndcg_at_1000_max value: 30.4012 - type: nauc_ndcg_at_1000_std value: 4.8586 - type: nauc_ndcg_at_1000_diff1 value: 27.7366 - type: nauc_map_at_1_max value: 26.9538 - type: nauc_map_at_1_std value: -0.9815 - type: nauc_map_at_1_diff1 value: 35.1964 - type: nauc_map_at_3_max value: 28.9516 - type: nauc_map_at_3_std value: 0.6373 - type: nauc_map_at_3_diff1 value: 30.476599999999998 - type: nauc_map_at_5_max value: 28.3735 - type: nauc_map_at_5_std value: 1.5893000000000002 - type: nauc_map_at_5_diff1 value: 29.4822 - type: nauc_map_at_10_max value: 28.4489 - type: nauc_map_at_10_std value: 1.7179 - type: nauc_map_at_10_diff1 value: 29.0721 - type: nauc_map_at_20_max value: 28.6443 - type: nauc_map_at_20_std value: 1.9567999999999999 - type: nauc_map_at_20_diff1 value: 29.2744 - type: nauc_map_at_100_max value: 28.9144 - type: nauc_map_at_100_std value: 2.2790999999999997 - type: nauc_map_at_100_diff1 value: 29.3889 - type: nauc_map_at_1000_max value: 28.8827 - type: nauc_map_at_1000_std value: 2.2127999999999997 - type: nauc_map_at_1000_diff1 value: 29.367700000000003 - type: nauc_recall_at_1_max value: 26.9538 - type: nauc_recall_at_1_std value: -0.9815 - type: nauc_recall_at_1_diff1 value: 35.1964 - type: nauc_recall_at_3_max value: 29.2823 - type: nauc_recall_at_3_std value: 2.2192 - type: nauc_recall_at_3_diff1 value: 25.174400000000002 - type: nauc_recall_at_5_max value: 26.098300000000002 - type: nauc_recall_at_5_std value: 5.870100000000001 - type: nauc_recall_at_5_diff1 value: 21.5717 - type: nauc_recall_at_10_max value: 26.3965 - type: nauc_recall_at_10_std value: 5.9524 - type: nauc_recall_at_10_diff1 value: 19.2576 - type: nauc_recall_at_20_max value: 25.014799999999997 - type: nauc_recall_at_20_std value: 8.889800000000001 - type: nauc_recall_at_20_diff1 value: 18.2048 - type: nauc_recall_at_100_max value: 32.664100000000005 - type: nauc_recall_at_100_std value: 20.66 - type: nauc_recall_at_100_diff1 value: 20.7167 - type: nauc_recall_at_1000_max value: 32.7425 - type: nauc_recall_at_1000_std value: 31.798 - type: nauc_recall_at_1000_diff1 value: 6.1744 - type: nauc_precision_at_1_max value: 31.0579 - type: nauc_precision_at_1_std value: -1.1154000000000002 - type: nauc_precision_at_1_diff1 value: 37.0188 - type: nauc_precision_at_3_max value: 34.0041 - type: nauc_precision_at_3_std value: 2.759 - type: nauc_precision_at_3_diff1 value: 26.0113 - type: nauc_precision_at_5_max value: 31.591599999999996 - type: nauc_precision_at_5_std value: 7.019499999999999 - type: nauc_precision_at_5_diff1 value: 22.5517 - type: nauc_precision_at_10_max value: 28.9779 - type: nauc_precision_at_10_std value: 6.0112 - type: nauc_precision_at_10_diff1 value: 18.4627 - type: nauc_precision_at_20_max value: 27.2677 - type: nauc_precision_at_20_std value: 7.9853 - type: nauc_precision_at_20_diff1 value: 17.6528 - type: nauc_precision_at_100_max value: 23.8248 - type: nauc_precision_at_100_std value: 9.9215 - type: nauc_precision_at_100_diff1 value: 13.5355 - type: nauc_precision_at_1000_max value: 9.9312 - type: nauc_precision_at_1000_std value: 1.8778 - type: nauc_precision_at_1000_diff1 value: 3.6692 - type: nauc_mrr_at_1_max value: 31.0579 - type: nauc_mrr_at_1_std value: -1.1154000000000002 - type: nauc_mrr_at_1_diff1 value: 37.0188 - type: nauc_mrr_at_3_max value: 32.265100000000004 - type: nauc_mrr_at_3_std value: 0.4738 - type: nauc_mrr_at_3_diff1 value: 31.6965 - type: nauc_mrr_at_5_max value: 31.610100000000003 - type: nauc_mrr_at_5_std value: 1.693 - type: nauc_mrr_at_5_diff1 value: 31.2068 - type: nauc_mrr_at_10_max value: 31.593500000000002 - type: nauc_mrr_at_10_std value: 1.6910999999999998 - type: nauc_mrr_at_10_diff1 value: 30.988300000000002 - type: nauc_mrr_at_20_max value: 31.4229 - type: nauc_mrr_at_20_std value: 1.9178000000000002 - type: nauc_mrr_at_20_diff1 value: 30.911 - type: nauc_mrr_at_100_max value: 31.510500000000004 - type: nauc_mrr_at_100_std value: 1.9404000000000001 - type: nauc_mrr_at_100_diff1 value: 30.928499999999996 - type: nauc_mrr_at_1000_max value: 31.499899999999997 - type: nauc_mrr_at_1000_std value: 1.9026999999999998 - type: nauc_mrr_at_1000_diff1 value: 30.9234 - type: main_score value: 32.024 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval (default) type: mteb/cqadupstack-physics config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: ndcg_at_1 value: 36.477 - type: ndcg_at_3 value: 41.9 - type: ndcg_at_5 value: 44.352000000000004 - type: ndcg_at_10 value: 47.316 - type: ndcg_at_20 value: 49.262 - type: ndcg_at_100 value: 52.5 - type: ndcg_at_1000 value: 54.433 - type: map_at_1 value: 29.633 - type: map_at_3 value: 37.374 - type: map_at_5 value: 39.327 - type: map_at_10 value: 40.897 - type: map_at_20 value: 41.629 - type: map_at_100 value: 42.221 - type: map_at_1000 value: 42.337 - type: recall_at_1 value: 29.633 - type: recall_at_3 value: 45.141999999999996 - type: recall_at_5 value: 51.578 - type: recall_at_10 value: 60.465999999999994 - type: recall_at_20 value: 67.012 - type: recall_at_100 value: 82.174 - type: recall_at_1000 value: 94.65 - type: precision_at_1 value: 36.477 - type: precision_at_3 value: 20.308 - type: precision_at_5 value: 14.379 - type: precision_at_10 value: 8.816 - type: precision_at_20 value: 5.106 - type: precision_at_100 value: 1.3419999999999999 - type: precision_at_1000 value: 0.169 - type: mrr_at_1 value: 36.477399999999996 - type: mrr_at_3 value: 44.0648 - type: mrr_at_5 value: 45.4604 - type: mrr_at_10 value: 46.6132 - type: mrr_at_20 value: 47.0122 - type: mrr_at_100 value: 47.3432 - type: mrr_at_1000 value: 47.383900000000004 - type: nauc_ndcg_at_1_max value: 44.2532 - type: nauc_ndcg_at_1_std value: 0.27399999999999997 - type: nauc_ndcg_at_1_diff1 value: 56.0608 - type: nauc_ndcg_at_3_max value: 40.7243 - type: nauc_ndcg_at_3_std value: -3.0545 - type: nauc_ndcg_at_3_diff1 value: 48.4101 - type: nauc_ndcg_at_5_max value: 39.556999999999995 - type: nauc_ndcg_at_5_std value: -3.9035 - type: nauc_ndcg_at_5_diff1 value: 47.2832 - type: nauc_ndcg_at_10_max value: 39.6116 - type: nauc_ndcg_at_10_std value: -4.2111 - type: nauc_ndcg_at_10_diff1 value: 47.0266 - type: nauc_ndcg_at_20_max value: 40.1775 - type: nauc_ndcg_at_20_std value: -2.9367 - type: nauc_ndcg_at_20_diff1 value: 47.4448 - type: nauc_ndcg_at_100_max value: 41.9972 - type: nauc_ndcg_at_100_std value: 0.46740000000000004 - type: nauc_ndcg_at_100_diff1 value: 48.4355 - type: nauc_ndcg_at_1000_max value: 42.1182 - type: nauc_ndcg_at_1000_std value: 0.8456 - type: nauc_ndcg_at_1000_diff1 value: 48.1614 - type: nauc_map_at_1_max value: 37.5422 - type: nauc_map_at_1_std value: -4.2909999999999995 - type: nauc_map_at_1_diff1 value: 55.083800000000004 - type: nauc_map_at_3_max value: 39.0107 - type: nauc_map_at_3_std value: -4.3038 - type: nauc_map_at_3_diff1 value: 49.5355 - type: nauc_map_at_5_max value: 38.9933 - type: nauc_map_at_5_std value: -4.3489 - type: nauc_map_at_5_diff1 value: 48.9543 - type: nauc_map_at_10_max value: 39.2673 - type: nauc_map_at_10_std value: -4.1611 - type: nauc_map_at_10_diff1 value: 48.891400000000004 - type: nauc_map_at_20_max value: 39.533699999999996 - type: nauc_map_at_20_std value: -3.7303 - type: nauc_map_at_20_diff1 value: 49.001099999999994 - type: nauc_map_at_100_max value: 39.9274 - type: nauc_map_at_100_std value: -3.0797000000000003 - type: nauc_map_at_100_diff1 value: 49.1862 - type: nauc_map_at_1000_max value: 39.957100000000004 - type: nauc_map_at_1000_std value: -3.0084 - type: nauc_map_at_1000_diff1 value: 49.1595 - type: nauc_recall_at_1_max value: 37.5422 - type: nauc_recall_at_1_std value: -4.2909999999999995 - type: nauc_recall_at_1_diff1 value: 55.083800000000004 - type: nauc_recall_at_3_max value: 35.5355 - type: nauc_recall_at_3_std value: -7.140000000000001 - type: nauc_recall_at_3_diff1 value: 42.4278 - type: nauc_recall_at_5_max value: 33.9238 - type: nauc_recall_at_5_std value: -7.9919 - type: nauc_recall_at_5_diff1 value: 39.1808 - type: nauc_recall_at_10_max value: 33.4493 - type: nauc_recall_at_10_std value: -9.1861 - type: nauc_recall_at_10_diff1 value: 36.8475 - type: nauc_recall_at_20_max value: 34.9121 - type: nauc_recall_at_20_std value: -4.8026 - type: nauc_recall_at_20_diff1 value: 37.9247 - type: nauc_recall_at_100_max value: 44.1541 - type: nauc_recall_at_100_std value: 18.1134 - type: nauc_recall_at_100_diff1 value: 41.6633 - type: nauc_recall_at_1000_max value: 56.3385 - type: nauc_recall_at_1000_std value: 53.257299999999994 - type: nauc_recall_at_1000_diff1 value: 36.1232 - type: nauc_precision_at_1_max value: 44.2532 - type: nauc_precision_at_1_std value: 0.27399999999999997 - type: nauc_precision_at_1_diff1 value: 56.0608 - type: nauc_precision_at_3_max value: 41.179 - type: nauc_precision_at_3_std value: 5.588 - type: nauc_precision_at_3_diff1 value: 32.8574 - type: nauc_precision_at_5_max value: 34.808699999999995 - type: nauc_precision_at_5_std value: 6.261 - type: nauc_precision_at_5_diff1 value: 23.993100000000002 - type: nauc_precision_at_10_max value: 30.966500000000003 - type: nauc_precision_at_10_std value: 9.9887 - type: nauc_precision_at_10_diff1 value: 16.8352 - type: nauc_precision_at_20_max value: 26.977600000000002 - type: nauc_precision_at_20_std value: 14.0043 - type: nauc_precision_at_20_diff1 value: 10.9725 - type: nauc_precision_at_100_max value: 20.0541 - type: nauc_precision_at_100_std value: 24.0399 - type: nauc_precision_at_100_diff1 value: -0.46509999999999996 - type: nauc_precision_at_1000_max value: 8.1382 - type: nauc_precision_at_1000_std value: 21.7963 - type: nauc_precision_at_1000_diff1 value: -13.7289 - type: nauc_mrr_at_1_max value: 44.2532 - type: nauc_mrr_at_1_std value: 0.27399999999999997 - type: nauc_mrr_at_1_diff1 value: 56.0608 - type: nauc_mrr_at_3_max value: 43.0277 - type: nauc_mrr_at_3_std value: -0.8843 - type: nauc_mrr_at_3_diff1 value: 51.112899999999996 - type: nauc_mrr_at_5_max value: 42.852000000000004 - type: nauc_mrr_at_5_std value: -0.8572 - type: nauc_mrr_at_5_diff1 value: 50.4937 - type: nauc_mrr_at_10_max value: 43.0093 - type: nauc_mrr_at_10_std value: -0.8631 - type: nauc_mrr_at_10_diff1 value: 50.41909999999999 - type: nauc_mrr_at_20_max value: 43.0484 - type: nauc_mrr_at_20_std value: -0.6054999999999999 - type: nauc_mrr_at_20_diff1 value: 50.527100000000004 - type: nauc_mrr_at_100_max value: 43.175200000000004 - type: nauc_mrr_at_100_std value: -0.3019 - type: nauc_mrr_at_100_diff1 value: 50.5962 - type: nauc_mrr_at_1000_max value: 43.173899999999996 - type: nauc_mrr_at_1000_std value: -0.3115 - type: nauc_mrr_at_1000_diff1 value: 50.6012 - type: main_score value: 47.316 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval (default) type: mteb/cqadupstack-programmers config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: ndcg_at_1 value: 33.676 - type: ndcg_at_3 value: 38.7 - type: ndcg_at_5 value: 41.032999999999994 - type: ndcg_at_10 value: 43.580999999999996 - type: ndcg_at_20 value: 45.992 - type: ndcg_at_100 value: 49.192 - type: ndcg_at_1000 value: 51.473 - type: map_at_1 value: 27.389999999999997 - type: map_at_3 value: 34.660999999999994 - type: map_at_5 value: 36.38 - type: map_at_10 value: 37.768 - type: map_at_20 value: 38.534 - type: map_at_100 value: 39.091 - type: map_at_1000 value: 39.2 - type: recall_at_1 value: 27.389999999999997 - type: recall_at_3 value: 41.876000000000005 - type: recall_at_5 value: 47.961999999999996 - type: recall_at_10 value: 55.445 - type: recall_at_20 value: 64.143 - type: recall_at_100 value: 79.327 - type: recall_at_1000 value: 94.64200000000001 - type: precision_at_1 value: 33.676 - type: precision_at_3 value: 18.455 - type: precision_at_5 value: 13.128 - type: precision_at_10 value: 7.888000000000001 - type: precision_at_20 value: 4.697 - type: precision_at_100 value: 1.234 - type: precision_at_1000 value: 0.161 - type: mrr_at_1 value: 33.6758 - type: mrr_at_3 value: 40.7725 - type: mrr_at_5 value: 42.267900000000004 - type: mrr_at_10 value: 43.1813 - type: mrr_at_20 value: 43.769200000000005 - type: mrr_at_100 value: 44.0965 - type: mrr_at_1000 value: 44.149899999999995 - type: nauc_ndcg_at_1_max value: 47.957699999999996 - type: nauc_ndcg_at_1_std value: 11.211 - type: nauc_ndcg_at_1_diff1 value: 50.975899999999996 - type: nauc_ndcg_at_3_max value: 46.7077 - type: nauc_ndcg_at_3_std value: 11.8166 - type: nauc_ndcg_at_3_diff1 value: 44.183699999999995 - type: nauc_ndcg_at_5_max value: 46.5691 - type: nauc_ndcg_at_5_std value: 12.3224 - type: nauc_ndcg_at_5_diff1 value: 43.2912 - type: nauc_ndcg_at_10_max value: 45.989200000000004 - type: nauc_ndcg_at_10_std value: 13.4501 - type: nauc_ndcg_at_10_diff1 value: 41.3206 - type: nauc_ndcg_at_20_max value: 46.400400000000005 - type: nauc_ndcg_at_20_std value: 15.004000000000001 - type: nauc_ndcg_at_20_diff1 value: 40.8932 - type: nauc_ndcg_at_100_max value: 47.3346 - type: nauc_ndcg_at_100_std value: 16.5132 - type: nauc_ndcg_at_100_diff1 value: 42.126599999999996 - type: nauc_ndcg_at_1000_max value: 47.5217 - type: nauc_ndcg_at_1000_std value: 15.4551 - type: nauc_ndcg_at_1000_diff1 value: 42.5563 - type: nauc_map_at_1_max value: 42.549 - type: nauc_map_at_1_std value: 4.9833 - type: nauc_map_at_1_diff1 value: 52.14339999999999 - type: nauc_map_at_3_max value: 44.8114 - type: nauc_map_at_3_std value: 9.440800000000001 - type: nauc_map_at_3_diff1 value: 46.1197 - type: nauc_map_at_5_max value: 45.3059 - type: nauc_map_at_5_std value: 10.286900000000001 - type: nauc_map_at_5_diff1 value: 45.6263 - type: nauc_map_at_10_max value: 45.3517 - type: nauc_map_at_10_std value: 11.1304 - type: nauc_map_at_10_diff1 value: 44.6502 - type: nauc_map_at_20_max value: 45.5319 - type: nauc_map_at_20_std value: 11.5773 - type: nauc_map_at_20_diff1 value: 44.5681 - type: nauc_map_at_100_max value: 45.8019 - type: nauc_map_at_100_std value: 11.9772 - type: nauc_map_at_100_diff1 value: 44.7825 - type: nauc_map_at_1000_max value: 45.8134 - type: nauc_map_at_1000_std value: 11.9461 - type: nauc_map_at_1000_diff1 value: 44.7905 - type: nauc_recall_at_1_max value: 42.549 - type: nauc_recall_at_1_std value: 4.9833 - type: nauc_recall_at_1_diff1 value: 52.14339999999999 - type: nauc_recall_at_3_max value: 44.0409 - type: nauc_recall_at_3_std value: 11.9146 - type: nauc_recall_at_3_diff1 value: 38.6436 - type: nauc_recall_at_5_max value: 43.3961 - type: nauc_recall_at_5_std value: 12.6675 - type: nauc_recall_at_5_diff1 value: 35.5553 - type: nauc_recall_at_10_max value: 41.4966 - type: nauc_recall_at_10_std value: 16.1644 - type: nauc_recall_at_10_diff1 value: 29.2835 - type: nauc_recall_at_20_max value: 41.474 - type: nauc_recall_at_20_std value: 22.5684 - type: nauc_recall_at_20_diff1 value: 25.7308 - type: nauc_recall_at_100_max value: 45.1253 - type: nauc_recall_at_100_std value: 36.248799999999996 - type: nauc_recall_at_100_diff1 value: 28.799500000000002 - type: nauc_recall_at_1000_max value: 54.1747 - type: nauc_recall_at_1000_std value: 47.1501 - type: nauc_recall_at_1000_diff1 value: 23.198900000000002 - type: nauc_precision_at_1_max value: 47.957699999999996 - type: nauc_precision_at_1_std value: 11.211 - type: nauc_precision_at_1_diff1 value: 50.975899999999996 - type: nauc_precision_at_3_max value: 46.6181 - type: nauc_precision_at_3_std value: 19.475 - type: nauc_precision_at_3_diff1 value: 30.6784 - type: nauc_precision_at_5_max value: 43.5114 - type: nauc_precision_at_5_std value: 22.1293 - type: nauc_precision_at_5_diff1 value: 24.6525 - type: nauc_precision_at_10_max value: 37.47 - type: nauc_precision_at_10_std value: 23.8068 - type: nauc_precision_at_10_diff1 value: 14.9368 - type: nauc_precision_at_20_max value: 33.4529 - type: nauc_precision_at_20_std value: 25.4979 - type: nauc_precision_at_20_diff1 value: 9.4501 - type: nauc_precision_at_100_max value: 23.7406 - type: nauc_precision_at_100_std value: 22.8583 - type: nauc_precision_at_100_diff1 value: 3.6348 - type: nauc_precision_at_1000_max value: 4.5396 - type: nauc_precision_at_1000_std value: 6.0796 - type: nauc_precision_at_1000_diff1 value: -7.2498000000000005 - type: nauc_mrr_at_1_max value: 47.957699999999996 - type: nauc_mrr_at_1_std value: 11.211 - type: nauc_mrr_at_1_diff1 value: 50.975899999999996 - type: nauc_mrr_at_3_max value: 48.6226 - type: nauc_mrr_at_3_std value: 13.600000000000001 - type: nauc_mrr_at_3_diff1 value: 45.2881 - type: nauc_mrr_at_5_max value: 48.402499999999996 - type: nauc_mrr_at_5_std value: 13.616 - type: nauc_mrr_at_5_diff1 value: 44.7074 - type: nauc_mrr_at_10_max value: 48.0556 - type: nauc_mrr_at_10_std value: 13.7803 - type: nauc_mrr_at_10_diff1 value: 44.0852 - type: nauc_mrr_at_20_max value: 48.173500000000004 - type: nauc_mrr_at_20_std value: 14.1617 - type: nauc_mrr_at_20_diff1 value: 44.0396 - type: nauc_mrr_at_100_max value: 48.1841 - type: nauc_mrr_at_100_std value: 14.1827 - type: nauc_mrr_at_100_diff1 value: 44.210100000000004 - type: nauc_mrr_at_1000_max value: 48.1875 - type: nauc_mrr_at_1000_std value: 14.161000000000001 - type: nauc_mrr_at_1000_diff1 value: 44.222 - type: main_score value: 43.580999999999996 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval (default) type: CQADupstackRetrieval_is_a_combined_dataset config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: ndcg_at_1 value: 32.588499999999996 - type: ndcg_at_3 value: 37.949083333333334 - type: ndcg_at_5 value: 40.258833333333335 - type: ndcg_at_10 value: 42.74341666666667 - type: ndcg_at_20 value: 44.784 - type: ndcg_at_100 value: 47.903416666666665 - type: ndcg_at_1000 value: 50.067416666666674 - type: map_at_1 value: 27.52808333333333 - type: map_at_3 value: 34.321999999999996 - type: map_at_5 value: 35.96091666666666 - type: map_at_10 value: 37.22708333333333 - type: map_at_20 value: 37.914833333333334 - type: map_at_100 value: 38.462166666666675 - type: map_at_1000 value: 38.57725 - type: recall_at_1 value: 27.52808333333333 - type: recall_at_3 value: 41.30075 - type: recall_at_5 value: 47.26408333333334 - type: recall_at_10 value: 54.663833333333336 - type: recall_at_20 value: 62.11658333333333 - type: recall_at_100 value: 77.176 - type: recall_at_1000 value: 92.03791666666666 - type: precision_at_1 value: 32.588499999999996 - type: precision_at_3 value: 17.485 - type: precision_at_5 value: 12.427666666666669 - type: precision_at_10 value: 7.493333333333334 - type: precision_at_20 value: 4.413499999999999 - type: precision_at_100 value: 1.18675 - type: precision_at_1000 value: 0.15691666666666665 - type: mrr_at_1 value: 32.58871666666667 - type: mrr_at_3 value: 39.09032499999999 - type: mrr_at_5 value: 40.533125 - type: mrr_at_10 value: 41.51483333333333 - type: mrr_at_20 value: 42.01036666666667 - type: mrr_at_100 value: 42.35724166666667 - type: mrr_at_1000 value: 42.41010833333333 - type: nauc_ndcg_at_1_max value: 41.86760833333334 - type: nauc_ndcg_at_1_std value: -0.022441666666666443 - type: nauc_ndcg_at_1_diff1 value: 48.604266666666675 - type: nauc_ndcg_at_3_max value: 40.649825 - type: nauc_ndcg_at_3_std value: 0.9594416666666666 - type: nauc_ndcg_at_3_diff1 value: 42.754375 - type: nauc_ndcg_at_5_max value: 40.71646666666666 - type: nauc_ndcg_at_5_std value: 1.8118249999999998 - type: nauc_ndcg_at_5_diff1 value: 42.09031666666666 - type: nauc_ndcg_at_10_max value: 40.616033333333334 - type: nauc_ndcg_at_10_std value: 2.621475 - type: nauc_ndcg_at_10_diff1 value: 41.56405833333333 - type: nauc_ndcg_at_20_max value: 41.00335 - type: nauc_ndcg_at_20_std value: 3.5835 - type: nauc_ndcg_at_20_diff1 value: 41.526025 - type: nauc_ndcg_at_100_max value: 41.626575 - type: nauc_ndcg_at_100_std value: 4.921058333333334 - type: nauc_ndcg_at_100_diff1 value: 41.785700000000006 - type: nauc_ndcg_at_1000_max value: 41.623041666666666 - type: nauc_ndcg_at_1000_std value: 4.743416666666667 - type: nauc_ndcg_at_1000_diff1 value: 41.930049999999994 - type: nauc_map_at_1_max value: 37.757374999999996 - type: nauc_map_at_1_std value: -2.7256583333333335 - type: nauc_map_at_1_diff1 value: 49.68454166666667 - type: nauc_map_at_3_max value: 39.41603333333333 - type: nauc_map_at_3_std value: -0.7485333333333334 - type: nauc_map_at_3_diff1 value: 44.64258333333333 - type: nauc_map_at_5_max value: 39.84875833333333 - type: nauc_map_at_5_std value: 0.010733333333333428 - type: nauc_map_at_5_diff1 value: 44.133975 - type: nauc_map_at_10_max value: 40.05009166666666 - type: nauc_map_at_10_std value: 0.6503083333333333 - type: nauc_map_at_10_diff1 value: 43.826724999999996 - type: nauc_map_at_20_max value: 40.287733333333335 - type: nauc_map_at_20_std value: 1.0432333333333332 - type: nauc_map_at_20_diff1 value: 43.784241666666674 - type: nauc_map_at_100_max value: 40.44630833333334 - type: nauc_map_at_100_std value: 1.3809583333333333 - type: nauc_map_at_100_diff1 value: 43.81610833333333 - type: nauc_map_at_1000_max value: 40.45624166666667 - type: nauc_map_at_1000_std value: 1.4088416666666665 - type: nauc_map_at_1000_diff1 value: 43.81260833333333 - type: nauc_recall_at_1_max value: 37.757374999999996 - type: nauc_recall_at_1_std value: -2.7256583333333335 - type: nauc_recall_at_1_diff1 value: 49.68454166666667 - type: nauc_recall_at_3_max value: 37.99286666666667 - type: nauc_recall_at_3_std value: 0.5074666666666666 - type: nauc_recall_at_3_diff1 value: 38.458816666666664 - type: nauc_recall_at_5_max value: 38.23744166666667 - type: nauc_recall_at_5_std value: 2.8538000000000006 - type: nauc_recall_at_5_diff1 value: 36.16175833333334 - type: nauc_recall_at_10_max value: 37.54170833333333 - type: nauc_recall_at_10_std value: 5.354441666666667 - type: nauc_recall_at_10_diff1 value: 33.80731666666667 - type: nauc_recall_at_20_max value: 38.071758333333335 - type: nauc_recall_at_20_std value: 9.4403 - type: nauc_recall_at_20_diff1 value: 32.409758333333336 - type: nauc_recall_at_100_max value: 41.127158333333334 - type: nauc_recall_at_100_std value: 20.718875000000004 - type: nauc_recall_at_100_diff1 value: 30.971016666666664 - type: nauc_recall_at_1000_max value: 44.978608333333334 - type: nauc_recall_at_1000_std value: 39.36581666666667 - type: nauc_recall_at_1000_diff1 value: 27.076241666666668 - type: nauc_precision_at_1_max value: 41.86760833333334 - type: nauc_precision_at_1_std value: -0.022441666666666443 - type: nauc_precision_at_1_diff1 value: 48.604266666666675 - type: nauc_precision_at_3_max value: 40.53820000000001 - type: nauc_precision_at_3_std value: 6.682866666666667 - type: nauc_precision_at_3_diff1 value: 30.627458333333337 - type: nauc_precision_at_5_max value: 38.085708333333336 - type: nauc_precision_at_5_std value: 10.236816666666666 - type: nauc_precision_at_5_diff1 value: 24.589866666666666 - type: nauc_precision_at_10_max value: 33.795766666666665 - type: nauc_precision_at_10_std value: 13.644358333333335 - type: nauc_precision_at_10_diff1 value: 17.663875 - type: nauc_precision_at_20_max value: 30.67170833333333 - type: nauc_precision_at_20_std value: 16.899591666666666 - type: nauc_precision_at_20_diff1 value: 12.398666666666665 - type: nauc_precision_at_100_max value: 21.46699166666666 - type: nauc_precision_at_100_std value: 19.683266666666665 - type: nauc_precision_at_100_diff1 value: 2.3721666666666668 - type: nauc_precision_at_1000_max value: 6.773875 - type: nauc_precision_at_1000_std value: 13.712933333333336 - type: nauc_precision_at_1000_diff1 value: -9.302758333333333 - type: nauc_mrr_at_1_max value: 41.86760833333334 - type: nauc_mrr_at_1_std value: -0.022441666666666443 - type: nauc_mrr_at_1_diff1 value: 48.604266666666675 - type: nauc_mrr_at_3_max value: 42.065525 - type: nauc_mrr_at_3_std value: 1.6751166666666664 - type: nauc_mrr_at_3_diff1 value: 43.90220833333333 - type: nauc_mrr_at_5_max value: 42.07275833333333 - type: nauc_mrr_at_5_std value: 2.3014749999999995 - type: nauc_mrr_at_5_diff1 value: 43.440275 - type: nauc_mrr_at_10_max value: 41.955425000000005 - type: nauc_mrr_at_10_std value: 2.499491666666667 - type: nauc_mrr_at_10_diff1 value: 43.23685833333333 - type: nauc_mrr_at_20_max value: 41.98479166666666 - type: nauc_mrr_at_20_std value: 2.6983083333333333 - type: nauc_mrr_at_20_diff1 value: 43.24806666666667 - type: nauc_mrr_at_100_max value: 42.01090833333334 - type: nauc_mrr_at_100_std value: 2.7583083333333334 - type: nauc_mrr_at_100_diff1 value: 43.28899166666667 - type: nauc_mrr_at_1000_max value: 42.010841666666664 - type: nauc_mrr_at_1000_std value: 2.750433333333333 - type: nauc_mrr_at_1000_diff1 value: 43.299625 - type: main_score value: 42.74341666666667 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval (default) type: CQADupstackRetrieval_is_a_combined_dataset config: default split: test revision: CQADupstackRetrieval_is_a_combined_dataset metrics: - type: main_score value: 42.743416666666675 - type: ndcg_at_10 value: 42.743416666666675 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval (default) type: mteb/cqadupstack-stats config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: ndcg_at_1 value: 27.607 - type: ndcg_at_3 value: 32.665 - type: ndcg_at_5 value: 34.876000000000005 - type: ndcg_at_10 value: 36.796 - type: ndcg_at_20 value: 38.405 - type: ndcg_at_100 value: 41.612 - type: ndcg_at_1000 value: 43.869 - type: map_at_1 value: 24.748 - type: map_at_3 value: 30.192999999999998 - type: map_at_5 value: 31.563999999999997 - type: map_at_10 value: 32.424 - type: map_at_20 value: 32.905 - type: map_at_100 value: 33.385 - type: map_at_1000 value: 33.476 - type: recall_at_1 value: 24.748 - type: recall_at_3 value: 36.14 - type: recall_at_5 value: 41.617 - type: recall_at_10 value: 47.49 - type: recall_at_20 value: 53.413 - type: recall_at_100 value: 69.461 - type: recall_at_1000 value: 86.014 - type: precision_at_1 value: 27.607 - type: precision_at_3 value: 13.957 - type: precision_at_5 value: 9.847 - type: precision_at_10 value: 5.782 - type: precision_at_20 value: 3.3360000000000003 - type: precision_at_100 value: 0.906 - type: precision_at_1000 value: 0.11800000000000001 - type: mrr_at_1 value: 27.6074 - type: mrr_at_3 value: 32.9499 - type: mrr_at_5 value: 34.2229 - type: mrr_at_10 value: 35.0668 - type: mrr_at_20 value: 35.4859 - type: mrr_at_100 value: 35.8948 - type: mrr_at_1000 value: 35.9562 - type: nauc_ndcg_at_1_max value: 49.1944 - type: nauc_ndcg_at_1_std value: 11.7093 - type: nauc_ndcg_at_1_diff1 value: 56.8806 - type: nauc_ndcg_at_3_max value: 46.7361 - type: nauc_ndcg_at_3_std value: 13.4354 - type: nauc_ndcg_at_3_diff1 value: 49.7927 - type: nauc_ndcg_at_5_max value: 47.280899999999995 - type: nauc_ndcg_at_5_std value: 14.5061 - type: nauc_ndcg_at_5_diff1 value: 48.9168 - type: nauc_ndcg_at_10_max value: 47.5137 - type: nauc_ndcg_at_10_std value: 15.4698 - type: nauc_ndcg_at_10_diff1 value: 48.4279 - type: nauc_ndcg_at_20_max value: 47.9904 - type: nauc_ndcg_at_20_std value: 15.7135 - type: nauc_ndcg_at_20_diff1 value: 48.4332 - type: nauc_ndcg_at_100_max value: 48.2942 - type: nauc_ndcg_at_100_std value: 17.502100000000002 - type: nauc_ndcg_at_100_diff1 value: 48.6035 - type: nauc_ndcg_at_1000_max value: 48.0957 - type: nauc_ndcg_at_1000_std value: 17.6368 - type: nauc_ndcg_at_1000_diff1 value: 48.7597 - type: nauc_map_at_1_max value: 45.6445 - type: nauc_map_at_1_std value: 6.9397 - type: nauc_map_at_1_diff1 value: 58.6992 - type: nauc_map_at_3_max value: 45.8449 - type: nauc_map_at_3_std value: 11.036200000000001 - type: nauc_map_at_3_diff1 value: 51.906 - type: nauc_map_at_5_max value: 46.3198 - type: nauc_map_at_5_std value: 11.921 - type: nauc_map_at_5_diff1 value: 51.2763 - type: nauc_map_at_10_max value: 46.5425 - type: nauc_map_at_10_std value: 12.5743 - type: nauc_map_at_10_diff1 value: 50.9536 - type: nauc_map_at_20_max value: 46.726 - type: nauc_map_at_20_std value: 12.6497 - type: nauc_map_at_20_diff1 value: 50.99510000000001 - type: nauc_map_at_100_max value: 46.7746 - type: nauc_map_at_100_std value: 12.881200000000002 - type: nauc_map_at_100_diff1 value: 51.011399999999995 - type: nauc_map_at_1000_max value: 46.785900000000005 - type: nauc_map_at_1000_std value: 12.898000000000001 - type: nauc_map_at_1000_diff1 value: 51.01480000000001 - type: nauc_recall_at_1_max value: 45.6445 - type: nauc_recall_at_1_std value: 6.9397 - type: nauc_recall_at_1_diff1 value: 58.6992 - type: nauc_recall_at_3_max value: 45.0182 - type: nauc_recall_at_3_std value: 14.2648 - type: nauc_recall_at_3_diff1 value: 45.3428 - type: nauc_recall_at_5_max value: 46.2258 - type: nauc_recall_at_5_std value: 17.2103 - type: nauc_recall_at_5_diff1 value: 42.5614 - type: nauc_recall_at_10_max value: 46.251799999999996 - type: nauc_recall_at_10_std value: 19.8669 - type: nauc_recall_at_10_diff1 value: 40.415 - type: nauc_recall_at_20_max value: 46.7318 - type: nauc_recall_at_20_std value: 20.3996 - type: nauc_recall_at_20_diff1 value: 39.0112 - type: nauc_recall_at_100_max value: 48.3756 - type: nauc_recall_at_100_std value: 33.558 - type: nauc_recall_at_100_diff1 value: 37.584 - type: nauc_recall_at_1000_max value: 46.1278 - type: nauc_recall_at_1000_std value: 50.2506 - type: nauc_recall_at_1000_diff1 value: 33.7694 - type: nauc_precision_at_1_max value: 49.1944 - type: nauc_precision_at_1_std value: 11.7093 - type: nauc_precision_at_1_diff1 value: 56.8806 - type: nauc_precision_at_3_max value: 49.9406 - type: nauc_precision_at_3_std value: 22.883200000000002 - type: nauc_precision_at_3_diff1 value: 40.5974 - type: nauc_precision_at_5_max value: 48.4187 - type: nauc_precision_at_5_std value: 25.9129 - type: nauc_precision_at_5_diff1 value: 34.863 - type: nauc_precision_at_10_max value: 46.734700000000004 - type: nauc_precision_at_10_std value: 28.5765 - type: nauc_precision_at_10_diff1 value: 30.071599999999997 - type: nauc_precision_at_20_max value: 45.2343 - type: nauc_precision_at_20_std value: 27.4324 - type: nauc_precision_at_20_diff1 value: 26.888299999999997 - type: nauc_precision_at_100_max value: 33.7511 - type: nauc_precision_at_100_std value: 30.084300000000002 - type: nauc_precision_at_100_diff1 value: 14.877099999999999 - type: nauc_precision_at_1000_max value: 15.059000000000001 - type: nauc_precision_at_1000_std value: 21.4471 - type: nauc_precision_at_1000_diff1 value: -1.2862 - type: nauc_mrr_at_1_max value: 49.1944 - type: nauc_mrr_at_1_std value: 11.7093 - type: nauc_mrr_at_1_diff1 value: 56.8806 - type: nauc_mrr_at_3_max value: 48.8173 - type: nauc_mrr_at_3_std value: 14.7023 - type: nauc_mrr_at_3_diff1 value: 50.9845 - type: nauc_mrr_at_5_max value: 49.0933 - type: nauc_mrr_at_5_std value: 15.5443 - type: nauc_mrr_at_5_diff1 value: 50.403299999999994 - type: nauc_mrr_at_10_max value: 49.058 - type: nauc_mrr_at_10_std value: 15.6592 - type: nauc_mrr_at_10_diff1 value: 50.3304 - type: nauc_mrr_at_20_max value: 49.104 - type: nauc_mrr_at_20_std value: 15.7446 - type: nauc_mrr_at_20_diff1 value: 50.2689 - type: nauc_mrr_at_100_max value: 49.071999999999996 - type: nauc_mrr_at_100_std value: 15.8584 - type: nauc_mrr_at_100_diff1 value: 50.3045 - type: nauc_mrr_at_1000_max value: 49.061 - type: nauc_mrr_at_1000_std value: 15.856700000000002 - type: nauc_mrr_at_1000_diff1 value: 50.3081 - type: main_score value: 36.796 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval (default) type: mteb/cqadupstack-tex config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: ndcg_at_1 value: 23.159 - type: ndcg_at_3 value: 27.401999999999997 - type: ndcg_at_5 value: 29.354000000000003 - type: ndcg_at_10 value: 31.775 - type: ndcg_at_20 value: 33.743 - type: ndcg_at_100 value: 37.125 - type: ndcg_at_1000 value: 39.956 - type: map_at_1 value: 18.997 - type: map_at_3 value: 24.351 - type: map_at_5 value: 25.724999999999998 - type: map_at_10 value: 26.873 - type: map_at_20 value: 27.479 - type: map_at_100 value: 28.008 - type: map_at_1000 value: 28.133999999999997 - type: recall_at_1 value: 18.997 - type: recall_at_3 value: 30.14 - type: recall_at_5 value: 35.225 - type: recall_at_10 value: 42.447 - type: recall_at_20 value: 49.769000000000005 - type: recall_at_100 value: 66.39500000000001 - type: recall_at_1000 value: 86.434 - type: precision_at_1 value: 23.159 - type: precision_at_3 value: 12.995999999999999 - type: precision_at_5 value: 9.381 - type: precision_at_10 value: 5.778 - type: precision_at_20 value: 3.467 - type: precision_at_100 value: 0.9900000000000001 - type: precision_at_1000 value: 0.14200000000000002 - type: mrr_at_1 value: 23.159 - type: mrr_at_3 value: 28.676299999999998 - type: mrr_at_5 value: 29.9082 - type: mrr_at_10 value: 30.9286 - type: mrr_at_20 value: 31.4303 - type: mrr_at_100 value: 31.845000000000002 - type: mrr_at_1000 value: 31.9176 - type: nauc_ndcg_at_1_max value: 32.959500000000006 - type: nauc_ndcg_at_1_std value: -2.0082 - type: nauc_ndcg_at_1_diff1 value: 41.801500000000004 - type: nauc_ndcg_at_3_max value: 32.8362 - type: nauc_ndcg_at_3_std value: -0.9611 - type: nauc_ndcg_at_3_diff1 value: 36.248200000000004 - type: nauc_ndcg_at_5_max value: 32.650800000000004 - type: nauc_ndcg_at_5_std value: 0.13879999999999998 - type: nauc_ndcg_at_5_diff1 value: 35.2211 - type: nauc_ndcg_at_10_max value: 32.6256 - type: nauc_ndcg_at_10_std value: 1.0654000000000001 - type: nauc_ndcg_at_10_diff1 value: 34.6558 - type: nauc_ndcg_at_20_max value: 33.0706 - type: nauc_ndcg_at_20_std value: 2.2485 - type: nauc_ndcg_at_20_diff1 value: 34.5314 - type: nauc_ndcg_at_100_max value: 33.3131 - type: nauc_ndcg_at_100_std value: 3.4467 - type: nauc_ndcg_at_100_diff1 value: 34.4791 - type: nauc_ndcg_at_1000_max value: 33.644400000000005 - type: nauc_ndcg_at_1000_std value: 3.6159999999999997 - type: nauc_ndcg_at_1000_diff1 value: 34.9717 - type: nauc_map_at_1_max value: 30.2696 - type: nauc_map_at_1_std value: -3.3264 - type: nauc_map_at_1_diff1 value: 42.0066 - type: nauc_map_at_3_max value: 31.455899999999996 - type: nauc_map_at_3_std value: -1.8429999999999997 - type: nauc_map_at_3_diff1 value: 37.4893 - type: nauc_map_at_5_max value: 31.7755 - type: nauc_map_at_5_std value: -1.1461999999999999 - type: nauc_map_at_5_diff1 value: 36.8624 - type: nauc_map_at_10_max value: 31.9842 - type: nauc_map_at_10_std value: -0.6542 - type: nauc_map_at_10_diff1 value: 36.5911 - type: nauc_map_at_20_max value: 32.1745 - type: nauc_map_at_20_std value: -0.2191 - type: nauc_map_at_20_diff1 value: 36.552800000000005 - type: nauc_map_at_100_max value: 32.3001 - type: nauc_map_at_100_std value: 0.012199999999999999 - type: nauc_map_at_100_diff1 value: 36.5376 - type: nauc_map_at_1000_max value: 32.3571 - type: nauc_map_at_1000_std value: 0.0557 - type: nauc_map_at_1000_diff1 value: 36.5535 - type: nauc_recall_at_1_max value: 30.2696 - type: nauc_recall_at_1_std value: -3.3264 - type: nauc_recall_at_1_diff1 value: 42.0066 - type: nauc_recall_at_3_max value: 30.413600000000002 - type: nauc_recall_at_3_std value: -0.44530000000000003 - type: nauc_recall_at_3_diff1 value: 32.3805 - type: nauc_recall_at_5_max value: 30.075499999999998 - type: nauc_recall_at_5_std value: 1.8853000000000002 - type: nauc_recall_at_5_diff1 value: 29.8885 - type: nauc_recall_at_10_max value: 29.7039 - type: nauc_recall_at_10_std value: 4.1936 - type: nauc_recall_at_10_diff1 value: 27.9912 - type: nauc_recall_at_20_max value: 30.538700000000002 - type: nauc_recall_at_20_std value: 7.8352 - type: nauc_recall_at_20_diff1 value: 26.842 - type: nauc_recall_at_100_max value: 30.8116 - type: nauc_recall_at_100_std value: 15.1426 - type: nauc_recall_at_100_diff1 value: 23.9166 - type: nauc_recall_at_1000_max value: 31.9647 - type: nauc_recall_at_1000_std value: 26.5754 - type: nauc_recall_at_1000_diff1 value: 22.608 - type: nauc_precision_at_1_max value: 32.959500000000006 - type: nauc_precision_at_1_std value: -2.0082 - type: nauc_precision_at_1_diff1 value: 41.801500000000004 - type: nauc_precision_at_3_max value: 34.8709 - type: nauc_precision_at_3_std value: 1.5288 - type: nauc_precision_at_3_diff1 value: 30.6782 - type: nauc_precision_at_5_max value: 34.163700000000006 - type: nauc_precision_at_5_std value: 4.3446 - type: nauc_precision_at_5_diff1 value: 26.2964 - type: nauc_precision_at_10_max value: 33.1747 - type: nauc_precision_at_10_std value: 7.2109000000000005 - type: nauc_precision_at_10_diff1 value: 22.6126 - type: nauc_precision_at_20_max value: 32.8185 - type: nauc_precision_at_20_std value: 11.296100000000001 - type: nauc_precision_at_20_diff1 value: 19.4086 - type: nauc_precision_at_100_max value: 30.4363 - type: nauc_precision_at_100_std value: 14.23 - type: nauc_precision_at_100_diff1 value: 13.1689 - type: nauc_precision_at_1000_max value: 24.6263 - type: nauc_precision_at_1000_std value: 11.190999999999999 - type: nauc_precision_at_1000_diff1 value: 4.5375 - type: nauc_mrr_at_1_max value: 32.959500000000006 - type: nauc_mrr_at_1_std value: -2.0082 - type: nauc_mrr_at_1_diff1 value: 41.801500000000004 - type: nauc_mrr_at_3_max value: 33.949400000000004 - type: nauc_mrr_at_3_std value: -0.5342 - type: nauc_mrr_at_3_diff1 value: 37.3148 - type: nauc_mrr_at_5_max value: 33.7685 - type: nauc_mrr_at_5_std value: 0.2542 - type: nauc_mrr_at_5_diff1 value: 36.5632 - type: nauc_mrr_at_10_max value: 33.849000000000004 - type: nauc_mrr_at_10_std value: 0.6677 - type: nauc_mrr_at_10_diff1 value: 36.4741 - type: nauc_mrr_at_20_max value: 33.9586 - type: nauc_mrr_at_20_std value: 0.897 - type: nauc_mrr_at_20_diff1 value: 36.478899999999996 - type: nauc_mrr_at_100_max value: 33.9441 - type: nauc_mrr_at_100_std value: 0.9808000000000001 - type: nauc_mrr_at_100_diff1 value: 36.5049 - type: nauc_mrr_at_1000_max value: 33.9546 - type: nauc_mrr_at_1000_std value: 0.9831 - type: nauc_mrr_at_1000_diff1 value: 36.5259 - type: main_score value: 31.775 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval (default) type: mteb/cqadupstack-unix config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: ndcg_at_1 value: 34.981 - type: ndcg_at_3 value: 40.107 - type: ndcg_at_5 value: 42.842999999999996 - type: ndcg_at_10 value: 45.275 - type: ndcg_at_20 value: 47.455999999999996 - type: ndcg_at_100 value: 50.321000000000005 - type: ndcg_at_1000 value: 52.406 - type: map_at_1 value: 29.504 - type: map_at_3 value: 36.622 - type: map_at_5 value: 38.541 - type: map_at_10 value: 39.675 - type: map_at_20 value: 40.409 - type: map_at_100 value: 40.914 - type: map_at_1000 value: 41.012 - type: recall_at_1 value: 29.504 - type: recall_at_3 value: 43.807 - type: recall_at_5 value: 50.77700000000001 - type: recall_at_10 value: 57.898 - type: recall_at_20 value: 65.59899999999999 - type: recall_at_100 value: 78.974 - type: recall_at_1000 value: 93.33399999999999 - type: precision_at_1 value: 34.981 - type: precision_at_3 value: 18.315 - type: precision_at_5 value: 13.097 - type: precision_at_10 value: 7.631 - type: precision_at_20 value: 4.431 - type: precision_at_100 value: 1.13 - type: precision_at_1000 value: 0.14100000000000001 - type: mrr_at_1 value: 34.9813 - type: mrr_at_3 value: 41.3557 - type: mrr_at_5 value: 42.9602 - type: mrr_at_10 value: 43.9816 - type: mrr_at_20 value: 44.5 - type: mrr_at_100 value: 44.8076 - type: mrr_at_1000 value: 44.865 - type: nauc_ndcg_at_1_max value: 48.6102 - type: nauc_ndcg_at_1_std value: -5.6691 - type: nauc_ndcg_at_1_diff1 value: 56.008599999999994 - type: nauc_ndcg_at_3_max value: 46.388400000000004 - type: nauc_ndcg_at_3_std value: -4.877800000000001 - type: nauc_ndcg_at_3_diff1 value: 49.1768 - type: nauc_ndcg_at_5_max value: 46.3438 - type: nauc_ndcg_at_5_std value: -4.1069 - type: nauc_ndcg_at_5_diff1 value: 48.209999999999994 - type: nauc_ndcg_at_10_max value: 46.147 - type: nauc_ndcg_at_10_std value: -3.7115 - type: nauc_ndcg_at_10_diff1 value: 47.9846 - type: nauc_ndcg_at_20_max value: 46.2731 - type: nauc_ndcg_at_20_std value: -3.5068 - type: nauc_ndcg_at_20_diff1 value: 48.1901 - type: nauc_ndcg_at_100_max value: 46.886 - type: nauc_ndcg_at_100_std value: -1.8507 - type: nauc_ndcg_at_100_diff1 value: 49.058 - type: nauc_ndcg_at_1000_max value: 46.5984 - type: nauc_ndcg_at_1000_std value: -2.1614999999999998 - type: nauc_ndcg_at_1000_diff1 value: 49.1318 - type: nauc_map_at_1_max value: 45.5569 - type: nauc_map_at_1_std value: -7.604900000000001 - type: nauc_map_at_1_diff1 value: 56.3936 - type: nauc_map_at_3_max value: 46.0028 - type: nauc_map_at_3_std value: -6.334 - type: nauc_map_at_3_diff1 value: 51.3472 - type: nauc_map_at_5_max value: 46.2903 - type: nauc_map_at_5_std value: -5.475300000000001 - type: nauc_map_at_5_diff1 value: 50.5945 - type: nauc_map_at_10_max value: 46.3277 - type: nauc_map_at_10_std value: -5.1829 - type: nauc_map_at_10_diff1 value: 50.4714 - type: nauc_map_at_20_max value: 46.5326 - type: nauc_map_at_20_std value: -5.0456 - type: nauc_map_at_20_diff1 value: 50.5729 - type: nauc_map_at_100_max value: 46.6537 - type: nauc_map_at_100_std value: -4.7367 - type: nauc_map_at_100_diff1 value: 50.711 - type: nauc_map_at_1000_max value: 46.6406 - type: nauc_map_at_1000_std value: -4.7269 - type: nauc_map_at_1000_diff1 value: 50.6985 - type: nauc_recall_at_1_max value: 45.5569 - type: nauc_recall_at_1_std value: -7.604900000000001 - type: nauc_recall_at_1_diff1 value: 56.3936 - type: nauc_recall_at_3_max value: 43.1624 - type: nauc_recall_at_3_std value: -5.0664 - type: nauc_recall_at_3_diff1 value: 44.016 - type: nauc_recall_at_5_max value: 42.893 - type: nauc_recall_at_5_std value: -2.0581 - type: nauc_recall_at_5_diff1 value: 40.6813 - type: nauc_recall_at_10_max value: 41.3464 - type: nauc_recall_at_10_std value: -0.9026 - type: nauc_recall_at_10_diff1 value: 38.8716 - type: nauc_recall_at_20_max value: 40.7766 - type: nauc_recall_at_20_std value: -0.4664 - type: nauc_recall_at_20_diff1 value: 38.6801 - type: nauc_recall_at_100_max value: 43.856 - type: nauc_recall_at_100_std value: 12.148200000000001 - type: nauc_recall_at_100_diff1 value: 43.189899999999994 - type: nauc_recall_at_1000_max value: 36.6555 - type: nauc_recall_at_1000_std value: 25.7409 - type: nauc_recall_at_1000_diff1 value: 44.9133 - type: nauc_precision_at_1_max value: 48.6102 - type: nauc_precision_at_1_std value: -5.6691 - type: nauc_precision_at_1_diff1 value: 56.008599999999994 - type: nauc_precision_at_3_max value: 43.2148 - type: nauc_precision_at_3_std value: 0.0292 - type: nauc_precision_at_3_diff1 value: 35.75 - type: nauc_precision_at_5_max value: 39.8562 - type: nauc_precision_at_5_std value: 4.105 - type: nauc_precision_at_5_diff1 value: 28.4213 - type: nauc_precision_at_10_max value: 34.901199999999996 - type: nauc_precision_at_10_std value: 6.4718 - type: nauc_precision_at_10_diff1 value: 22.785 - type: nauc_precision_at_20_max value: 29.151 - type: nauc_precision_at_20_std value: 8.213 - type: nauc_precision_at_20_diff1 value: 16.6992 - type: nauc_precision_at_100_max value: 17.1377 - type: nauc_precision_at_100_std value: 16.1652 - type: nauc_precision_at_100_diff1 value: 4.4657 - type: nauc_precision_at_1000_max value: -2.6889 - type: nauc_precision_at_1000_std value: 11.010499999999999 - type: nauc_precision_at_1000_diff1 value: -11.0026 - type: nauc_mrr_at_1_max value: 48.6102 - type: nauc_mrr_at_1_std value: -5.6691 - type: nauc_mrr_at_1_diff1 value: 56.008599999999994 - type: nauc_mrr_at_3_max value: 47.6571 - type: nauc_mrr_at_3_std value: -4.1072999999999995 - type: nauc_mrr_at_3_diff1 value: 50.18470000000001 - type: nauc_mrr_at_5_max value: 47.6268 - type: nauc_mrr_at_5_std value: -3.6222 - type: nauc_mrr_at_5_diff1 value: 49.5854 - type: nauc_mrr_at_10_max value: 47.454499999999996 - type: nauc_mrr_at_10_std value: -3.4977 - type: nauc_mrr_at_10_diff1 value: 49.5833 - type: nauc_mrr_at_20_max value: 47.3316 - type: nauc_mrr_at_20_std value: -3.5721000000000003 - type: nauc_mrr_at_20_diff1 value: 49.6713 - type: nauc_mrr_at_100_max value: 47.387299999999996 - type: nauc_mrr_at_100_std value: -3.4835 - type: nauc_mrr_at_100_diff1 value: 49.8135 - type: nauc_mrr_at_1000_max value: 47.4002 - type: nauc_mrr_at_1000_std value: -3.4842999999999997 - type: nauc_mrr_at_1000_diff1 value: 49.8286 - type: main_score value: 45.275 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval (default) type: mteb/cqadupstack-webmasters config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: ndcg_at_1 value: 32.806000000000004 - type: ndcg_at_3 value: 38.775999999999996 - type: ndcg_at_5 value: 40.614 - type: ndcg_at_10 value: 42.957 - type: ndcg_at_20 value: 45.202999999999996 - type: ndcg_at_100 value: 48.941 - type: ndcg_at_1000 value: 51.105000000000004 - type: map_at_1 value: 27.236 - type: map_at_3 value: 34.204 - type: map_at_5 value: 35.66 - type: map_at_10 value: 36.986000000000004 - type: map_at_20 value: 37.827 - type: map_at_100 value: 38.602 - type: map_at_1000 value: 38.818000000000005 - type: recall_at_1 value: 27.236 - type: recall_at_3 value: 41.596 - type: recall_at_5 value: 46.947 - type: recall_at_10 value: 54.129000000000005 - type: recall_at_20 value: 62.641000000000005 - type: recall_at_100 value: 80.971 - type: recall_at_1000 value: 93.98100000000001 - type: precision_at_1 value: 32.806000000000004 - type: precision_at_3 value: 18.445 - type: precision_at_5 value: 13.083 - type: precision_at_10 value: 8.142000000000001 - type: precision_at_20 value: 5.119 - type: precision_at_100 value: 1.599 - type: precision_at_1000 value: 0.244 - type: mrr_at_1 value: 32.8063 - type: mrr_at_3 value: 39.5257 - type: mrr_at_5 value: 40.8399 - type: mrr_at_10 value: 41.8107 - type: mrr_at_20 value: 42.4012 - type: mrr_at_100 value: 42.7919 - type: mrr_at_1000 value: 42.8261 - type: nauc_ndcg_at_1_max value: 49.2838 - type: nauc_ndcg_at_1_std value: 8.713799999999999 - type: nauc_ndcg_at_1_diff1 value: 48.2777 - type: nauc_ndcg_at_3_max value: 44.4031 - type: nauc_ndcg_at_3_std value: 11.4725 - type: nauc_ndcg_at_3_diff1 value: 41.5639 - type: nauc_ndcg_at_5_max value: 44.452999999999996 - type: nauc_ndcg_at_5_std value: 11.9373 - type: nauc_ndcg_at_5_diff1 value: 41.977199999999996 - type: nauc_ndcg_at_10_max value: 44.8695 - type: nauc_ndcg_at_10_std value: 13.6193 - type: nauc_ndcg_at_10_diff1 value: 41.665 - type: nauc_ndcg_at_20_max value: 45.691900000000004 - type: nauc_ndcg_at_20_std value: 14.0959 - type: nauc_ndcg_at_20_diff1 value: 42.2414 - type: nauc_ndcg_at_100_max value: 45.7442 - type: nauc_ndcg_at_100_std value: 15.218699999999998 - type: nauc_ndcg_at_100_diff1 value: 41.7288 - type: nauc_ndcg_at_1000_max value: 46.788000000000004 - type: nauc_ndcg_at_1000_std value: 15.409900000000002 - type: nauc_ndcg_at_1000_diff1 value: 41.9824 - type: nauc_map_at_1_max value: 48.0334 - type: nauc_map_at_1_std value: 8.0125 - type: nauc_map_at_1_diff1 value: 53.4579 - type: nauc_map_at_3_max value: 45.1289 - type: nauc_map_at_3_std value: 10.013 - type: nauc_map_at_3_diff1 value: 45.51 - type: nauc_map_at_5_max value: 45.3494 - type: nauc_map_at_5_std value: 10.0348 - type: nauc_map_at_5_diff1 value: 45.3972 - type: nauc_map_at_10_max value: 45.8378 - type: nauc_map_at_10_std value: 11.3299 - type: nauc_map_at_10_diff1 value: 44.8933 - type: nauc_map_at_20_max value: 46.156000000000006 - type: nauc_map_at_20_std value: 11.8154 - type: nauc_map_at_20_diff1 value: 44.6615 - type: nauc_map_at_100_max value: 46.1188 - type: nauc_map_at_100_std value: 12.3635 - type: nauc_map_at_100_diff1 value: 44.5946 - type: nauc_map_at_1000_max value: 46.1113 - type: nauc_map_at_1000_std value: 12.526599999999998 - type: nauc_map_at_1000_diff1 value: 44.595400000000005 - type: nauc_recall_at_1_max value: 48.0334 - type: nauc_recall_at_1_std value: 8.0125 - type: nauc_recall_at_1_diff1 value: 53.4579 - type: nauc_recall_at_3_max value: 39.3688 - type: nauc_recall_at_3_std value: 10.3834 - type: nauc_recall_at_3_diff1 value: 37.8084 - type: nauc_recall_at_5_max value: 39.3184 - type: nauc_recall_at_5_std value: 10.509400000000001 - type: nauc_recall_at_5_diff1 value: 36.7191 - type: nauc_recall_at_10_max value: 38.785599999999995 - type: nauc_recall_at_10_std value: 15.781300000000002 - type: nauc_recall_at_10_diff1 value: 34.7564 - type: nauc_recall_at_20_max value: 39.6075 - type: nauc_recall_at_20_std value: 18.0278 - type: nauc_recall_at_20_diff1 value: 35.483399999999996 - type: nauc_recall_at_100_max value: 36.1361 - type: nauc_recall_at_100_std value: 29.1037 - type: nauc_recall_at_100_diff1 value: 26.9486 - type: nauc_recall_at_1000_max value: 62.4461 - type: nauc_recall_at_1000_std value: 57.465599999999995 - type: nauc_recall_at_1000_diff1 value: 29.5554 - type: nauc_precision_at_1_max value: 49.2838 - type: nauc_precision_at_1_std value: 8.713799999999999 - type: nauc_precision_at_1_diff1 value: 48.2777 - type: nauc_precision_at_3_max value: 36.4572 - type: nauc_precision_at_3_std value: 14.3924 - type: nauc_precision_at_3_diff1 value: 22.9406 - type: nauc_precision_at_5_max value: 32.5803 - type: nauc_precision_at_5_std value: 16.4452 - type: nauc_precision_at_5_diff1 value: 18.2745 - type: nauc_precision_at_10_max value: 27.3789 - type: nauc_precision_at_10_std value: 21.0131 - type: nauc_precision_at_10_diff1 value: 6.947399999999999 - type: nauc_precision_at_20_max value: 22.8404 - type: nauc_precision_at_20_std value: 24.6328 - type: nauc_precision_at_20_diff1 value: 0.1601 - type: nauc_precision_at_100_max value: 2.6098 - type: nauc_precision_at_100_std value: 22.3326 - type: nauc_precision_at_100_diff1 value: -10.1755 - type: nauc_precision_at_1000_max value: -6.730899999999999 - type: nauc_precision_at_1000_std value: 18.262900000000002 - type: nauc_precision_at_1000_diff1 value: -16.3364 - type: nauc_mrr_at_1_max value: 49.2838 - type: nauc_mrr_at_1_std value: 8.713799999999999 - type: nauc_mrr_at_1_diff1 value: 48.2777 - type: nauc_mrr_at_3_max value: 45.8613 - type: nauc_mrr_at_3_std value: 10.4584 - type: nauc_mrr_at_3_diff1 value: 42.2388 - type: nauc_mrr_at_5_max value: 46.1544 - type: nauc_mrr_at_5_std value: 11.1434 - type: nauc_mrr_at_5_diff1 value: 42.2252 - type: nauc_mrr_at_10_max value: 46.2703 - type: nauc_mrr_at_10_std value: 11.7714 - type: nauc_mrr_at_10_diff1 value: 42.0821 - type: nauc_mrr_at_20_max value: 46.4586 - type: nauc_mrr_at_20_std value: 11.9329 - type: nauc_mrr_at_20_diff1 value: 42.3199 - type: nauc_mrr_at_100_max value: 46.4309 - type: nauc_mrr_at_100_std value: 11.9458 - type: nauc_mrr_at_100_diff1 value: 42.2902 - type: nauc_mrr_at_1000_max value: 46.4392 - type: nauc_mrr_at_1000_std value: 11.9269 - type: nauc_mrr_at_1000_diff1 value: 42.3078 - type: main_score value: 42.957 - task: type: Retrieval dataset: name: MTEB CQADupstackWordpressRetrieval (default) type: mteb/cqadupstack-wordpress config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: ndcg_at_1 value: 25.692999999999998 - type: ndcg_at_3 value: 31.375999999999998 - type: ndcg_at_5 value: 33.617999999999995 - type: ndcg_at_10 value: 36.409000000000006 - type: ndcg_at_20 value: 38.5 - type: ndcg_at_100 value: 41.614000000000004 - type: ndcg_at_1000 value: 44.119 - type: map_at_1 value: 23.666 - type: map_at_3 value: 29.072 - type: map_at_5 value: 30.453999999999997 - type: map_at_10 value: 31.673000000000002 - type: map_at_20 value: 32.256 - type: map_at_100 value: 32.721000000000004 - type: map_at_1000 value: 32.82 - type: recall_at_1 value: 23.666 - type: recall_at_3 value: 35.693000000000005 - type: recall_at_5 value: 40.937 - type: recall_at_10 value: 48.979 - type: recall_at_20 value: 57.028999999999996 - type: recall_at_100 value: 72.80799999999999 - type: recall_at_1000 value: 91.546 - type: precision_at_1 value: 25.692999999999998 - type: precision_at_3 value: 13.123999999999999 - type: precision_at_5 value: 9.279 - type: precision_at_10 value: 5.712 - type: precision_at_20 value: 3.3360000000000003 - type: precision_at_100 value: 0.8869999999999999 - type: precision_at_1000 value: 0.122 - type: mrr_at_1 value: 25.6932 - type: mrr_at_3 value: 31.2693 - type: mrr_at_5 value: 32.4522 - type: mrr_at_10 value: 33.6496 - type: mrr_at_20 value: 34.208 - type: mrr_at_100 value: 34.6132 - type: mrr_at_1000 value: 34.6794 - type: nauc_ndcg_at_1_max value: 30.436400000000003 - type: nauc_ndcg_at_1_std value: -5.177099999999999 - type: nauc_ndcg_at_1_diff1 value: 38.9465 - type: nauc_ndcg_at_3_max value: 27.759600000000002 - type: nauc_ndcg_at_3_std value: -3.7716 - type: nauc_ndcg_at_3_diff1 value: 32.0374 - type: nauc_ndcg_at_5_max value: 29.284399999999998 - type: nauc_ndcg_at_5_std value: -2.1555999999999997 - type: nauc_ndcg_at_5_diff1 value: 31.2735 - type: nauc_ndcg_at_10_max value: 27.4811 - type: nauc_ndcg_at_10_std value: -2.3712 - type: nauc_ndcg_at_10_diff1 value: 30.5165 - type: nauc_ndcg_at_20_max value: 28.385899999999996 - type: nauc_ndcg_at_20_std value: -0.7358 - type: nauc_ndcg_at_20_diff1 value: 30.5901 - type: nauc_ndcg_at_100_max value: 29.6634 - type: nauc_ndcg_at_100_std value: 0.6082 - type: nauc_ndcg_at_100_diff1 value: 30.455 - type: nauc_ndcg_at_1000_max value: 29.316 - type: nauc_ndcg_at_1000_std value: 0.8039 - type: nauc_ndcg_at_1000_diff1 value: 30.406699999999997 - type: nauc_map_at_1_max value: 28.618900000000004 - type: nauc_map_at_1_std value: -5.8273 - type: nauc_map_at_1_diff1 value: 39.6434 - type: nauc_map_at_3_max value: 27.3257 - type: nauc_map_at_3_std value: -4.8353 - type: nauc_map_at_3_diff1 value: 33.9743 - type: nauc_map_at_5_max value: 28.5433 - type: nauc_map_at_5_std value: -3.7222 - type: nauc_map_at_5_diff1 value: 33.360099999999996 - type: nauc_map_at_10_max value: 27.972399999999997 - type: nauc_map_at_10_std value: -3.565 - type: nauc_map_at_10_diff1 value: 32.9863 - type: nauc_map_at_20_max value: 28.2615 - type: nauc_map_at_20_std value: -3.1113 - type: nauc_map_at_20_diff1 value: 32.9793 - type: nauc_map_at_100_max value: 28.540300000000002 - type: nauc_map_at_100_std value: -2.7937 - type: nauc_map_at_100_diff1 value: 32.9581 - type: nauc_map_at_1000_max value: 28.5349 - type: nauc_map_at_1000_std value: -2.7701 - type: nauc_map_at_1000_diff1 value: 32.939299999999996 - type: nauc_recall_at_1_max value: 28.618900000000004 - type: nauc_recall_at_1_std value: -5.8273 - type: nauc_recall_at_1_diff1 value: 39.6434 - type: nauc_recall_at_3_max value: 25.120199999999997 - type: nauc_recall_at_3_std value: -3.4718 - type: nauc_recall_at_3_diff1 value: 27.233200000000004 - type: nauc_recall_at_5_max value: 28.6985 - type: nauc_recall_at_5_std value: 0.1915 - type: nauc_recall_at_5_diff1 value: 25.533299999999997 - type: nauc_recall_at_10_max value: 23.3717 - type: nauc_recall_at_10_std value: -0.9587999999999999 - type: nauc_recall_at_10_diff1 value: 23.8178 - type: nauc_recall_at_20_max value: 25.923800000000004 - type: nauc_recall_at_20_std value: 5.4661 - type: nauc_recall_at_20_diff1 value: 23.4099 - type: nauc_recall_at_100_max value: 32.182500000000005 - type: nauc_recall_at_100_std value: 14.696200000000001 - type: nauc_recall_at_100_diff1 value: 20.6716 - type: nauc_recall_at_1000_max value: 31.512400000000003 - type: nauc_recall_at_1000_std value: 42.5301 - type: nauc_recall_at_1000_diff1 value: 10.7694 - type: nauc_precision_at_1_max value: 30.436400000000003 - type: nauc_precision_at_1_std value: -5.177099999999999 - type: nauc_precision_at_1_diff1 value: 38.9465 - type: nauc_precision_at_3_max value: 29.1341 - type: nauc_precision_at_3_std value: -0.1582 - type: nauc_precision_at_3_diff1 value: 25.872600000000002 - type: nauc_precision_at_5_max value: 32.7748 - type: nauc_precision_at_5_std value: 4.798100000000001 - type: nauc_precision_at_5_diff1 value: 21.712400000000002 - type: nauc_precision_at_10_max value: 27.396700000000003 - type: nauc_precision_at_10_std value: 6.6187 - type: nauc_precision_at_10_diff1 value: 16.292499999999997 - type: nauc_precision_at_20_max value: 29.6999 - type: nauc_precision_at_20_std value: 12.6113 - type: nauc_precision_at_20_diff1 value: 14.616399999999999 - type: nauc_precision_at_100_max value: 29.297099999999997 - type: nauc_precision_at_100_std value: 20.9722 - type: nauc_precision_at_100_diff1 value: 1.6410999999999998 - type: nauc_precision_at_1000_max value: 2.7286 - type: nauc_precision_at_1000_std value: 14.837200000000001 - type: nauc_precision_at_1000_diff1 value: -21.584500000000002 - type: nauc_mrr_at_1_max value: 30.436400000000003 - type: nauc_mrr_at_1_std value: -5.177099999999999 - type: nauc_mrr_at_1_diff1 value: 38.9465 - type: nauc_mrr_at_3_max value: 29.766199999999998 - type: nauc_mrr_at_3_std value: -3.0375 - type: nauc_mrr_at_3_diff1 value: 33.568599999999996 - type: nauc_mrr_at_5_max value: 30.4582 - type: nauc_mrr_at_5_std value: -2.0233 - type: nauc_mrr_at_5_diff1 value: 33.1478 - type: nauc_mrr_at_10_max value: 29.3877 - type: nauc_mrr_at_10_std value: -2.3752 - type: nauc_mrr_at_10_diff1 value: 32.5597 - type: nauc_mrr_at_20_max value: 29.631400000000003 - type: nauc_mrr_at_20_std value: -1.9325999999999999 - type: nauc_mrr_at_20_diff1 value: 32.6145 - type: nauc_mrr_at_100_max value: 29.7106 - type: nauc_mrr_at_100_std value: -1.8483 - type: nauc_mrr_at_100_diff1 value: 32.624900000000004 - type: nauc_mrr_at_1000_max value: 29.7099 - type: nauc_mrr_at_1000_std value: -1.8341 - type: nauc_mrr_at_1000_diff1 value: 32.6251 - type: main_score value: 36.409000000000006 - task: type: Retrieval dataset: name: MTEB ClimateFEVER (default) type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: ndcg_at_1 value: 26.971 - type: ndcg_at_3 value: 24.196 - type: ndcg_at_5 value: 25.811 - type: ndcg_at_10 value: 29.494 - type: ndcg_at_20 value: 32.013999999999996 - type: ndcg_at_100 value: 35.989 - type: ndcg_at_1000 value: 39.326 - type: map_at_1 value: 12.107 - type: map_at_3 value: 17.538 - type: map_at_5 value: 19.124 - type: map_at_10 value: 20.896 - type: map_at_20 value: 21.798000000000002 - type: map_at_100 value: 22.567 - type: map_at_1000 value: 22.746 - type: recall_at_1 value: 12.107 - type: recall_at_3 value: 22.425 - type: recall_at_5 value: 27.394000000000002 - type: recall_at_10 value: 35.57 - type: recall_at_20 value: 42.565 - type: recall_at_100 value: 57.708000000000006 - type: recall_at_1000 value: 76.673 - type: precision_at_1 value: 26.971 - type: precision_at_3 value: 18.111 - type: precision_at_5 value: 13.694 - type: precision_at_10 value: 9.303 - type: precision_at_20 value: 5.769 - type: precision_at_100 value: 1.6320000000000001 - type: precision_at_1000 value: 0.22499999999999998 - type: mrr_at_1 value: 26.970699999999997 - type: mrr_at_3 value: 36.0478 - type: mrr_at_5 value: 37.9598 - type: mrr_at_10 value: 39.4286 - type: mrr_at_20 value: 39.9242 - type: mrr_at_100 value: 40.232600000000005 - type: mrr_at_1000 value: 40.2711 - type: nauc_ndcg_at_1_max value: 30.1498 - type: nauc_ndcg_at_1_std value: 9.795 - type: nauc_ndcg_at_1_diff1 value: 28.3202 - type: nauc_ndcg_at_3_max value: 36.1507 - type: nauc_ndcg_at_3_std value: 16.6918 - type: nauc_ndcg_at_3_diff1 value: 25.9179 - type: nauc_ndcg_at_5_max value: 38.4314 - type: nauc_ndcg_at_5_std value: 19.1236 - type: nauc_ndcg_at_5_diff1 value: 25.7315 - type: nauc_ndcg_at_10_max value: 39.734 - type: nauc_ndcg_at_10_std value: 22.795199999999998 - type: nauc_ndcg_at_10_diff1 value: 24.5446 - type: nauc_ndcg_at_20_max value: 40.0306 - type: nauc_ndcg_at_20_std value: 25.0242 - type: nauc_ndcg_at_20_diff1 value: 23.7608 - type: nauc_ndcg_at_100_max value: 39.881 - type: nauc_ndcg_at_100_std value: 26.8935 - type: nauc_ndcg_at_100_diff1 value: 23.366600000000002 - type: nauc_ndcg_at_1000_max value: 39.6299 - type: nauc_ndcg_at_1000_std value: 27.556000000000004 - type: nauc_ndcg_at_1000_diff1 value: 23.4406 - type: nauc_map_at_1_max value: 36.033500000000004 - type: nauc_map_at_1_std value: 9.3902 - type: nauc_map_at_1_diff1 value: 33.3389 - type: nauc_map_at_3_max value: 38.2772 - type: nauc_map_at_3_std value: 14.862 - type: nauc_map_at_3_diff1 value: 29.121799999999997 - type: nauc_map_at_5_max value: 38.8901 - type: nauc_map_at_5_std value: 16.4551 - type: nauc_map_at_5_diff1 value: 28.258499999999998 - type: nauc_map_at_10_max value: 39.689099999999996 - type: nauc_map_at_10_std value: 19.0082 - type: nauc_map_at_10_diff1 value: 27.5292 - type: nauc_map_at_20_max value: 39.8114 - type: nauc_map_at_20_std value: 20.099700000000002 - type: nauc_map_at_20_diff1 value: 27.1249 - type: nauc_map_at_100_max value: 39.7759 - type: nauc_map_at_100_std value: 20.671400000000002 - type: nauc_map_at_100_diff1 value: 26.9515 - type: nauc_map_at_1000_max value: 39.7635 - type: nauc_map_at_1000_std value: 20.7381 - type: nauc_map_at_1000_diff1 value: 26.9318 - type: nauc_recall_at_1_max value: 36.033500000000004 - type: nauc_recall_at_1_std value: 9.3902 - type: nauc_recall_at_1_diff1 value: 33.3389 - type: nauc_recall_at_3_max value: 37.040099999999995 - type: nauc_recall_at_3_std value: 18.421000000000003 - type: nauc_recall_at_3_diff1 value: 23.591 - type: nauc_recall_at_5_max value: 38.2483 - type: nauc_recall_at_5_std value: 21.9791 - type: nauc_recall_at_5_diff1 value: 20.9432 - type: nauc_recall_at_10_max value: 38.684400000000004 - type: nauc_recall_at_10_std value: 27.528000000000002 - type: nauc_recall_at_10_diff1 value: 17.874599999999997 - type: nauc_recall_at_20_max value: 37.7408 - type: nauc_recall_at_20_std value: 31.178800000000003 - type: nauc_recall_at_20_diff1 value: 15.3021 - type: nauc_recall_at_100_max value: 35.0668 - type: nauc_recall_at_100_std value: 35.8934 - type: nauc_recall_at_100_diff1 value: 12.0978 - type: nauc_recall_at_1000_max value: 33.2113 - type: nauc_recall_at_1000_std value: 44.3165 - type: nauc_recall_at_1000_diff1 value: 9.6011 - type: nauc_precision_at_1_max value: 30.1498 - type: nauc_precision_at_1_std value: 9.795 - type: nauc_precision_at_1_diff1 value: 28.3202 - type: nauc_precision_at_3_max value: 32.1047 - type: nauc_precision_at_3_std value: 20.7027 - type: nauc_precision_at_3_diff1 value: 18.3366 - type: nauc_precision_at_5_max value: 32.9484 - type: nauc_precision_at_5_std value: 24.439700000000002 - type: nauc_precision_at_5_diff1 value: 16.3709 - type: nauc_precision_at_10_max value: 30.626900000000003 - type: nauc_precision_at_10_std value: 30.3335 - type: nauc_precision_at_10_diff1 value: 10.4378 - type: nauc_precision_at_20_max value: 26.875100000000003 - type: nauc_precision_at_20_std value: 33.1578 - type: nauc_precision_at_20_diff1 value: 6.3161 - type: nauc_precision_at_100_max value: 18.5691 - type: nauc_precision_at_100_std value: 32.4294 - type: nauc_precision_at_100_diff1 value: 1.9001000000000001 - type: nauc_precision_at_1000_max value: 5.2522 - type: nauc_precision_at_1000_std value: 26.337899999999998 - type: nauc_precision_at_1000_diff1 value: -4.2309 - type: nauc_mrr_at_1_max value: 30.1498 - type: nauc_mrr_at_1_std value: 9.795 - type: nauc_mrr_at_1_diff1 value: 28.3202 - type: nauc_mrr_at_3_max value: 32.2466 - type: nauc_mrr_at_3_std value: 15.6475 - type: nauc_mrr_at_3_diff1 value: 24.160899999999998 - type: nauc_mrr_at_5_max value: 33.1837 - type: nauc_mrr_at_5_std value: 16.8917 - type: nauc_mrr_at_5_diff1 value: 24.072499999999998 - type: nauc_mrr_at_10_max value: 33.576 - type: nauc_mrr_at_10_std value: 17.4501 - type: nauc_mrr_at_10_diff1 value: 23.9826 - type: nauc_mrr_at_20_max value: 33.5003 - type: nauc_mrr_at_20_std value: 17.5104 - type: nauc_mrr_at_20_diff1 value: 23.9237 - type: nauc_mrr_at_100_max value: 33.455200000000005 - type: nauc_mrr_at_100_std value: 17.5181 - type: nauc_mrr_at_100_diff1 value: 23.9598 - type: nauc_mrr_at_1000_max value: 33.4473 - type: nauc_mrr_at_1000_std value: 17.4969 - type: nauc_mrr_at_1000_diff1 value: 23.974899999999998 - type: main_score value: 29.494 - task: type: Retrieval dataset: name: MTEB CodeFeedbackMT (default) type: CoIR-Retrieval/codefeedback-mt config: default split: test revision: b0f12fa0c0dd67f59c95a5c33d02aeeb4c398c5f metrics: - type: ndcg_at_1 value: 21.044 - type: ndcg_at_3 value: 27.134999999999998 - type: ndcg_at_5 value: 29.205 - type: ndcg_at_10 value: 31.391999999999996 - type: ndcg_at_20 value: 33.031 - type: ndcg_at_100 value: 35.852000000000004 - type: ndcg_at_1000 value: 38.076 - type: map_at_1 value: 21.044 - type: map_at_3 value: 25.637 - type: map_at_5 value: 26.779999999999998 - type: map_at_10 value: 27.683000000000003 - type: map_at_20 value: 28.133999999999997 - type: map_at_100 value: 28.510999999999996 - type: map_at_1000 value: 28.588 - type: recall_at_1 value: 21.044 - type: recall_at_3 value: 31.468 - type: recall_at_5 value: 36.522 - type: recall_at_10 value: 43.278 - type: recall_at_20 value: 49.748 - type: recall_at_100 value: 65.16499999999999 - type: recall_at_1000 value: 83.031 - type: precision_at_1 value: 21.044 - type: precision_at_3 value: 10.488999999999999 - type: precision_at_5 value: 7.303999999999999 - type: precision_at_10 value: 4.328 - type: precision_at_20 value: 2.487 - type: precision_at_100 value: 0.652 - type: precision_at_1000 value: 0.083 - type: mrr_at_1 value: 21.043899999999997 - type: mrr_at_3 value: 25.6371 - type: mrr_at_5 value: 26.7796 - type: mrr_at_10 value: 27.6831 - type: mrr_at_20 value: 28.1344 - type: mrr_at_100 value: 28.510999999999996 - type: mrr_at_1000 value: 28.588400000000004 - type: nauc_ndcg_at_1_max value: 11.8658 - type: nauc_ndcg_at_1_std value: -18.4852 - type: nauc_ndcg_at_1_diff1 value: 47.3429 - type: nauc_ndcg_at_3_max value: 11.608400000000001 - type: nauc_ndcg_at_3_std value: -19.0804 - type: nauc_ndcg_at_3_diff1 value: 41.7031 - type: nauc_ndcg_at_5_max value: 11.289299999999999 - type: nauc_ndcg_at_5_std value: -19.3124 - type: nauc_ndcg_at_5_diff1 value: 40.5381 - type: nauc_ndcg_at_10_max value: 11.6701 - type: nauc_ndcg_at_10_std value: -18.7838 - type: nauc_ndcg_at_10_diff1 value: 39.8088 - type: nauc_ndcg_at_20_max value: 11.942400000000001 - type: nauc_ndcg_at_20_std value: -18.123900000000003 - type: nauc_ndcg_at_20_diff1 value: 38.967800000000004 - type: nauc_ndcg_at_100_max value: 13.114999999999998 - type: nauc_ndcg_at_100_std value: -16.1964 - type: nauc_ndcg_at_100_diff1 value: 39.0077 - type: nauc_ndcg_at_1000_max value: 13.5244 - type: nauc_ndcg_at_1000_std value: -15.2702 - type: nauc_ndcg_at_1000_diff1 value: 39.1235 - type: nauc_map_at_1_max value: 11.8658 - type: nauc_map_at_1_std value: -18.4852 - type: nauc_map_at_1_diff1 value: 47.3429 - type: nauc_map_at_3_max value: 11.6937 - type: nauc_map_at_3_std value: -18.9625 - type: nauc_map_at_3_diff1 value: 42.993900000000004 - type: nauc_map_at_5_max value: 11.5064 - type: nauc_map_at_5_std value: -19.0958 - type: nauc_map_at_5_diff1 value: 42.3108 - type: nauc_map_at_10_max value: 11.6615 - type: nauc_map_at_10_std value: -18.885199999999998 - type: nauc_map_at_10_diff1 value: 41.993399999999994 - type: nauc_map_at_20_max value: 11.7419 - type: nauc_map_at_20_std value: -18.7005 - type: nauc_map_at_20_diff1 value: 41.7643 - type: nauc_map_at_100_max value: 11.902600000000001 - type: nauc_map_at_100_std value: -18.4376 - type: nauc_map_at_100_diff1 value: 41.7771 - type: nauc_map_at_1000_max value: 11.9208 - type: nauc_map_at_1000_std value: -18.395500000000002 - type: nauc_map_at_1000_diff1 value: 41.7802 - type: nauc_recall_at_1_max value: 11.8658 - type: nauc_recall_at_1_std value: -18.4852 - type: nauc_recall_at_1_diff1 value: 47.3429 - type: nauc_recall_at_3_max value: 11.3724 - type: nauc_recall_at_3_std value: -19.3869 - type: nauc_recall_at_3_diff1 value: 38.2763 - type: nauc_recall_at_5_max value: 10.678600000000001 - type: nauc_recall_at_5_std value: -19.8995 - type: nauc_recall_at_5_diff1 value: 35.781400000000005 - type: nauc_recall_at_10_max value: 11.7997 - type: nauc_recall_at_10_std value: -18.3219 - type: nauc_recall_at_10_diff1 value: 33.7507 - type: nauc_recall_at_20_max value: 12.7832 - type: nauc_recall_at_20_std value: -15.8611 - type: nauc_recall_at_20_diff1 value: 30.4676 - type: nauc_recall_at_100_max value: 20.0012 - type: nauc_recall_at_100_std value: -3.8268000000000004 - type: nauc_recall_at_100_diff1 value: 28.8928 - type: nauc_recall_at_1000_max value: 30.812099999999997 - type: nauc_recall_at_1000_std value: 18.1771 - type: nauc_recall_at_1000_diff1 value: 23.3851 - type: nauc_precision_at_1_max value: 11.8658 - type: nauc_precision_at_1_std value: -18.4852 - type: nauc_precision_at_1_diff1 value: 47.3429 - type: nauc_precision_at_3_max value: 11.3724 - type: nauc_precision_at_3_std value: -19.3869 - type: nauc_precision_at_3_diff1 value: 38.2763 - type: nauc_precision_at_5_max value: 10.678600000000001 - type: nauc_precision_at_5_std value: -19.8995 - type: nauc_precision_at_5_diff1 value: 35.781400000000005 - type: nauc_precision_at_10_max value: 11.7997 - type: nauc_precision_at_10_std value: -18.3219 - type: nauc_precision_at_10_diff1 value: 33.7507 - type: nauc_precision_at_20_max value: 12.7832 - type: nauc_precision_at_20_std value: -15.8611 - type: nauc_precision_at_20_diff1 value: 30.4676 - type: nauc_precision_at_100_max value: 20.0012 - type: nauc_precision_at_100_std value: -3.8268000000000004 - type: nauc_precision_at_100_diff1 value: 28.8928 - type: nauc_precision_at_1000_max value: 30.812099999999997 - type: nauc_precision_at_1000_std value: 18.1771 - type: nauc_precision_at_1000_diff1 value: 23.3851 - type: nauc_mrr_at_1_max value: 11.8658 - type: nauc_mrr_at_1_std value: -18.4852 - type: nauc_mrr_at_1_diff1 value: 47.3429 - type: nauc_mrr_at_3_max value: 11.6937 - type: nauc_mrr_at_3_std value: -18.9625 - type: nauc_mrr_at_3_diff1 value: 42.993900000000004 - type: nauc_mrr_at_5_max value: 11.5064 - type: nauc_mrr_at_5_std value: -19.0958 - type: nauc_mrr_at_5_diff1 value: 42.3108 - type: nauc_mrr_at_10_max value: 11.6615 - type: nauc_mrr_at_10_std value: -18.885199999999998 - type: nauc_mrr_at_10_diff1 value: 41.993399999999994 - type: nauc_mrr_at_20_max value: 11.7419 - type: nauc_mrr_at_20_std value: -18.7005 - type: nauc_mrr_at_20_diff1 value: 41.7643 - type: nauc_mrr_at_100_max value: 11.902600000000001 - type: nauc_mrr_at_100_std value: -18.4376 - type: nauc_mrr_at_100_diff1 value: 41.7771 - type: nauc_mrr_at_1000_max value: 11.9208 - type: nauc_mrr_at_1000_std value: -18.395500000000002 - type: nauc_mrr_at_1000_diff1 value: 41.7802 - type: main_score value: 31.391999999999996 - task: type: Retrieval dataset: name: MTEB CodeFeedbackST (default) type: CoIR-Retrieval/codefeedback-st config: default split: test revision: d213819e87aab9010628da8b73ab4eb337c89340 metrics: - type: ndcg_at_1 value: 51.227000000000004 - type: ndcg_at_3 value: 62.971999999999994 - type: ndcg_at_5 value: 65.649 - type: ndcg_at_10 value: 67.72200000000001 - type: ndcg_at_20 value: 68.919 - type: ndcg_at_100 value: 70.15299999999999 - type: ndcg_at_1000 value: 70.658 - type: map_at_1 value: 51.227000000000004 - type: map_at_3 value: 60.114000000000004 - type: map_at_5 value: 61.607 - type: map_at_10 value: 62.475 - type: map_at_20 value: 62.806 - type: map_at_100 value: 62.979 - type: map_at_1000 value: 62.999 - type: recall_at_1 value: 51.227000000000004 - type: recall_at_3 value: 71.232 - type: recall_at_5 value: 77.69800000000001 - type: recall_at_10 value: 84.041 - type: recall_at_20 value: 88.756 - type: recall_at_100 value: 95.371 - type: recall_at_1000 value: 99.278 - type: precision_at_1 value: 51.227000000000004 - type: precision_at_3 value: 23.744 - type: precision_at_5 value: 15.540000000000001 - type: precision_at_10 value: 8.404 - type: precision_at_20 value: 4.438000000000001 - type: precision_at_100 value: 0.954 - type: precision_at_1000 value: 0.099 - type: mrr_at_1 value: 51.0062 - type: mrr_at_3 value: 60.0023 - type: mrr_at_5 value: 61.492999999999995 - type: mrr_at_10 value: 62.362899999999996 - type: mrr_at_20 value: 62.693200000000004 - type: mrr_at_100 value: 62.8664 - type: mrr_at_1000 value: 62.8866 - type: nauc_ndcg_at_1_max value: 5.5119 - type: nauc_ndcg_at_1_std value: -27.434599999999996 - type: nauc_ndcg_at_1_diff1 value: 67.3476 - type: nauc_ndcg_at_3_max value: 11.8474 - type: nauc_ndcg_at_3_std value: -30.5305 - type: nauc_ndcg_at_3_diff1 value: 61.4515 - type: nauc_ndcg_at_5_max value: 12.692700000000002 - type: nauc_ndcg_at_5_std value: -30.938 - type: nauc_ndcg_at_5_diff1 value: 61.0505 - type: nauc_ndcg_at_10_max value: 12.354800000000001 - type: nauc_ndcg_at_10_std value: -30.6409 - type: nauc_ndcg_at_10_diff1 value: 61.205600000000004 - type: nauc_ndcg_at_20_max value: 11.9146 - type: nauc_ndcg_at_20_std value: -30.247 - type: nauc_ndcg_at_20_diff1 value: 61.5428 - type: nauc_ndcg_at_100_max value: 11.5407 - type: nauc_ndcg_at_100_std value: -29.561700000000002 - type: nauc_ndcg_at_100_diff1 value: 62.06270000000001 - type: nauc_ndcg_at_1000_max value: 11.2459 - type: nauc_ndcg_at_1000_std value: -29.5751 - type: nauc_ndcg_at_1000_diff1 value: 62.28 - type: nauc_map_at_1_max value: 5.5119 - type: nauc_map_at_1_std value: -27.434599999999996 - type: nauc_map_at_1_diff1 value: 67.3476 - type: nauc_map_at_3_max value: 10.1298 - type: nauc_map_at_3_std value: -29.674899999999997 - type: nauc_map_at_3_diff1 value: 62.982000000000006 - type: nauc_map_at_5_max value: 10.5075 - type: nauc_map_at_5_std value: -29.858600000000003 - type: nauc_map_at_5_diff1 value: 62.829299999999996 - type: nauc_map_at_10_max value: 10.3459 - type: nauc_map_at_10_std value: -29.7338 - type: nauc_map_at_10_diff1 value: 62.917699999999996 - type: nauc_map_at_20_max value: 10.2198 - type: nauc_map_at_20_std value: -29.6284 - type: nauc_map_at_20_diff1 value: 63.01409999999999 - type: nauc_map_at_100_max value: 10.1683 - type: nauc_map_at_100_std value: -29.5448 - type: nauc_map_at_100_diff1 value: 63.0794 - type: nauc_map_at_1000_max value: 10.1602 - type: nauc_map_at_1000_std value: -29.5412 - type: nauc_map_at_1000_diff1 value: 63.0874 - type: nauc_recall_at_1_max value: 5.5119 - type: nauc_recall_at_1_std value: -27.434599999999996 - type: nauc_recall_at_1_diff1 value: 67.3476 - type: nauc_recall_at_3_max value: 17.8724 - type: nauc_recall_at_3_std value: -33.5404 - type: nauc_recall_at_3_diff1 value: 56.1172 - type: nauc_recall_at_5_max value: 21.945700000000002 - type: nauc_recall_at_5_std value: -35.5124 - type: nauc_recall_at_5_diff1 value: 53.6154 - type: nauc_recall_at_10_max value: 23.1968 - type: nauc_recall_at_10_std value: -35.4292 - type: nauc_recall_at_10_diff1 value: 51.998900000000006 - type: nauc_recall_at_20_max value: 23.4056 - type: nauc_recall_at_20_std value: -33.825300000000006 - type: nauc_recall_at_20_diff1 value: 51.544900000000005 - type: nauc_recall_at_100_max value: 29.2331 - type: nauc_recall_at_100_std value: -20.444499999999998 - type: nauc_recall_at_100_diff1 value: 51.8606 - type: nauc_recall_at_1000_max value: 47.943000000000005 - type: nauc_recall_at_1000_std value: 16.1139 - type: nauc_recall_at_1000_diff1 value: 49.2407 - type: nauc_precision_at_1_max value: 5.5119 - type: nauc_precision_at_1_std value: -27.434599999999996 - type: nauc_precision_at_1_diff1 value: 67.3476 - type: nauc_precision_at_3_max value: 17.8724 - type: nauc_precision_at_3_std value: -33.5404 - type: nauc_precision_at_3_diff1 value: 56.1172 - type: nauc_precision_at_5_max value: 21.945700000000002 - type: nauc_precision_at_5_std value: -35.5124 - type: nauc_precision_at_5_diff1 value: 53.6154 - type: nauc_precision_at_10_max value: 23.1968 - type: nauc_precision_at_10_std value: -35.4292 - type: nauc_precision_at_10_diff1 value: 51.998900000000006 - type: nauc_precision_at_20_max value: 23.4056 - type: nauc_precision_at_20_std value: -33.825300000000006 - type: nauc_precision_at_20_diff1 value: 51.544900000000005 - type: nauc_precision_at_100_max value: 29.2331 - type: nauc_precision_at_100_std value: -20.444499999999998 - type: nauc_precision_at_100_diff1 value: 51.8606 - type: nauc_precision_at_1000_max value: 47.943000000000005 - type: nauc_precision_at_1000_std value: 16.1139 - type: nauc_precision_at_1000_diff1 value: 49.2407 - type: nauc_mrr_at_1_max value: 4.9502 - type: nauc_mrr_at_1_std value: -27.426099999999998 - type: nauc_mrr_at_1_diff1 value: 67.8214 - type: nauc_mrr_at_3_max value: 9.7423 - type: nauc_mrr_at_3_std value: -29.674699999999998 - type: nauc_mrr_at_3_diff1 value: 63.24340000000001 - type: nauc_mrr_at_5_max value: 10.1129 - type: nauc_mrr_at_5_std value: -29.871100000000002 - type: nauc_mrr_at_5_diff1 value: 63.1148 - type: nauc_mrr_at_10_max value: 9.9493 - type: nauc_mrr_at_10_std value: -29.7413 - type: nauc_mrr_at_10_diff1 value: 63.2057 - type: nauc_mrr_at_20_max value: 9.8157 - type: nauc_mrr_at_20_std value: -29.644 - type: nauc_mrr_at_20_diff1 value: 63.307100000000005 - type: nauc_mrr_at_100_max value: 9.7639 - type: nauc_mrr_at_100_std value: -29.5582 - type: nauc_mrr_at_100_diff1 value: 63.3738 - type: nauc_mrr_at_1000_max value: 9.7555 - type: nauc_mrr_at_1000_std value: -29.554599999999997 - type: nauc_mrr_at_1000_diff1 value: 63.382000000000005 - type: main_score value: 67.72200000000001 - task: type: Retrieval dataset: name: MTEB CodeSearchNetCCRetrieval (python) type: CoIR-Retrieval/CodeSearchNet-ccr config: python split: test revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8 metrics: - type: ndcg_at_1 value: 32.417 - type: ndcg_at_3 value: 40.904 - type: ndcg_at_5 value: 43.321 - type: ndcg_at_10 value: 45.532000000000004 - type: ndcg_at_20 value: 47.071000000000005 - type: ndcg_at_100 value: 49.297999999999995 - type: ndcg_at_1000 value: 50.859 - type: map_at_1 value: 32.417 - type: map_at_3 value: 38.829 - type: map_at_5 value: 40.166000000000004 - type: map_at_10 value: 41.087 - type: map_at_20 value: 41.510999999999996 - type: map_at_100 value: 41.815000000000005 - type: map_at_1000 value: 41.869 - type: recall_at_1 value: 32.417 - type: recall_at_3 value: 46.903 - type: recall_at_5 value: 52.788999999999994 - type: recall_at_10 value: 59.57900000000001 - type: recall_at_20 value: 65.652 - type: recall_at_100 value: 77.718 - type: recall_at_1000 value: 90.294 - type: precision_at_1 value: 32.417 - type: precision_at_3 value: 15.634 - type: precision_at_5 value: 10.558 - type: precision_at_10 value: 5.958 - type: precision_at_20 value: 3.283 - type: precision_at_100 value: 0.777 - type: precision_at_1000 value: 0.09 - type: mrr_at_1 value: 32.4239 - type: mrr_at_3 value: 38.8323 - type: mrr_at_5 value: 40.1696 - type: mrr_at_10 value: 41.0908 - type: mrr_at_20 value: 41.5149 - type: mrr_at_100 value: 41.8188 - type: mrr_at_1000 value: 41.8726 - type: nauc_ndcg_at_1_max value: 32.4803 - type: nauc_ndcg_at_1_std value: -1.1774 - type: nauc_ndcg_at_1_diff1 value: 54.68730000000001 - type: nauc_ndcg_at_3_max value: 33.5662 - type: nauc_ndcg_at_3_std value: 0.361 - type: nauc_ndcg_at_3_diff1 value: 49.522 - type: nauc_ndcg_at_5_max value: 33.0861 - type: nauc_ndcg_at_5_std value: 0.5551999999999999 - type: nauc_ndcg_at_5_diff1 value: 48.9052 - type: nauc_ndcg_at_10_max value: 33.0427 - type: nauc_ndcg_at_10_std value: 1.466 - type: nauc_ndcg_at_10_diff1 value: 48.3256 - type: nauc_ndcg_at_20_max value: 33.059 - type: nauc_ndcg_at_20_std value: 2.2277 - type: nauc_ndcg_at_20_diff1 value: 48.2916 - type: nauc_ndcg_at_100_max value: 33.0797 - type: nauc_ndcg_at_100_std value: 2.9991 - type: nauc_ndcg_at_100_diff1 value: 48.266999999999996 - type: nauc_ndcg_at_1000_max value: 33.1052 - type: nauc_ndcg_at_1000_std value: 2.8583000000000003 - type: nauc_ndcg_at_1000_diff1 value: 48.5209 - type: nauc_map_at_1_max value: 32.4803 - type: nauc_map_at_1_std value: -1.1774 - type: nauc_map_at_1_diff1 value: 54.68730000000001 - type: nauc_map_at_3_max value: 33.3014 - type: nauc_map_at_3_std value: -0.06409999999999999 - type: nauc_map_at_3_diff1 value: 50.6726 - type: nauc_map_at_5_max value: 33.0327 - type: nauc_map_at_5_std value: 0.0325 - type: nauc_map_at_5_diff1 value: 50.3363 - type: nauc_map_at_10_max value: 33.0181 - type: nauc_map_at_10_std value: 0.3939 - type: nauc_map_at_10_diff1 value: 50.1109 - type: nauc_map_at_20_max value: 33.0183 - type: nauc_map_at_20_std value: 0.5951 - type: nauc_map_at_20_diff1 value: 50.108 - type: nauc_map_at_100_max value: 33.022 - type: nauc_map_at_100_std value: 0.6973 - type: nauc_map_at_100_diff1 value: 50.10790000000001 - type: nauc_map_at_1000_max value: 33.022 - type: nauc_map_at_1000_std value: 0.6931999999999999 - type: nauc_map_at_1000_diff1 value: 50.1174 - type: nauc_recall_at_1_max value: 32.4803 - type: nauc_recall_at_1_std value: -1.1774 - type: nauc_recall_at_1_diff1 value: 54.68730000000001 - type: nauc_recall_at_3_max value: 34.3301 - type: nauc_recall_at_3_std value: 1.6075 - type: nauc_recall_at_3_diff1 value: 46.2477 - type: nauc_recall_at_5_max value: 33.177299999999995 - type: nauc_recall_at_5_std value: 2.1687000000000003 - type: nauc_recall_at_5_diff1 value: 44.61 - type: nauc_recall_at_10_max value: 33.020500000000006 - type: nauc_recall_at_10_std value: 5.3331 - type: nauc_recall_at_10_diff1 value: 42.3796 - type: nauc_recall_at_20_max value: 33.1279 - type: nauc_recall_at_20_std value: 9.2437 - type: nauc_recall_at_20_diff1 value: 41.584199999999996 - type: nauc_recall_at_100_max value: 33.2882 - type: nauc_recall_at_100_std value: 18.1866 - type: nauc_recall_at_100_diff1 value: 38.9221 - type: nauc_recall_at_1000_max value: 34.2607 - type: nauc_recall_at_1000_std value: 30.5699 - type: nauc_recall_at_1000_diff1 value: 35.204800000000006 - type: nauc_precision_at_1_max value: 32.4803 - type: nauc_precision_at_1_std value: -1.1774 - type: nauc_precision_at_1_diff1 value: 54.68730000000001 - type: nauc_precision_at_3_max value: 34.3301 - type: nauc_precision_at_3_std value: 1.6075 - type: nauc_precision_at_3_diff1 value: 46.2477 - type: nauc_precision_at_5_max value: 33.177299999999995 - type: nauc_precision_at_5_std value: 2.1687000000000003 - type: nauc_precision_at_5_diff1 value: 44.61 - type: nauc_precision_at_10_max value: 33.020500000000006 - type: nauc_precision_at_10_std value: 5.3331 - type: nauc_precision_at_10_diff1 value: 42.3796 - type: nauc_precision_at_20_max value: 33.1279 - type: nauc_precision_at_20_std value: 9.2437 - type: nauc_precision_at_20_diff1 value: 41.584199999999996 - type: nauc_precision_at_100_max value: 33.2882 - type: nauc_precision_at_100_std value: 18.1866 - type: nauc_precision_at_100_diff1 value: 38.9221 - type: nauc_precision_at_1000_max value: 34.2607 - type: nauc_precision_at_1000_std value: 30.5699 - type: nauc_precision_at_1000_diff1 value: 35.204800000000006 - type: nauc_mrr_at_1_max value: 32.5013 - type: nauc_mrr_at_1_std value: -1.1843 - type: nauc_mrr_at_1_diff1 value: 54.6663 - type: nauc_mrr_at_3_max value: 33.315 - type: nauc_mrr_at_3_std value: -0.06849999999999999 - type: nauc_mrr_at_3_diff1 value: 50.66460000000001 - type: nauc_mrr_at_5_max value: 33.0452 - type: nauc_mrr_at_5_std value: 0.0305 - type: nauc_mrr_at_5_diff1 value: 50.326499999999996 - type: nauc_mrr_at_10_max value: 33.0308 - type: nauc_mrr_at_10_std value: 0.39189999999999997 - type: nauc_mrr_at_10_diff1 value: 50.101 - type: nauc_mrr_at_20_max value: 33.031 - type: nauc_mrr_at_20_std value: 0.5930000000000001 - type: nauc_mrr_at_20_diff1 value: 50.0981 - type: nauc_mrr_at_100_max value: 33.0348 - type: nauc_mrr_at_100_std value: 0.6952 - type: nauc_mrr_at_100_diff1 value: 50.097899999999996 - type: nauc_mrr_at_1000_max value: 33.0348 - type: nauc_mrr_at_1000_std value: 0.6910999999999999 - type: nauc_mrr_at_1000_diff1 value: 50.1074 - type: main_score value: 45.532000000000004 - task: type: Retrieval dataset: name: MTEB CodeSearchNetCCRetrieval (javascript) type: CoIR-Retrieval/CodeSearchNet-ccr config: javascript split: test revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8 metrics: - type: ndcg_at_1 value: 33.364 - type: ndcg_at_3 value: 41.943999999999996 - type: ndcg_at_5 value: 44.167 - type: ndcg_at_10 value: 46.024 - type: ndcg_at_20 value: 47.508 - type: ndcg_at_100 value: 49.668 - type: ndcg_at_1000 value: 51.336999999999996 - type: map_at_1 value: 33.364 - type: map_at_3 value: 39.846 - type: map_at_5 value: 41.083999999999996 - type: map_at_10 value: 41.85 - type: map_at_20 value: 42.254000000000005 - type: map_at_100 value: 42.547000000000004 - type: map_at_1000 value: 42.601 - type: recall_at_1 value: 33.364 - type: recall_at_3 value: 48.010000000000005 - type: recall_at_5 value: 53.388000000000005 - type: recall_at_10 value: 59.131 - type: recall_at_20 value: 65.026 - type: recall_at_100 value: 76.755 - type: recall_at_1000 value: 90.398 - type: precision_at_1 value: 33.364 - type: precision_at_3 value: 16.003 - type: precision_at_5 value: 10.678 - type: precision_at_10 value: 5.913 - type: precision_at_20 value: 3.251 - type: precision_at_100 value: 0.768 - type: precision_at_1000 value: 0.09 - type: mrr_at_1 value: 33.272600000000004 - type: mrr_at_3 value: 39.7954 - type: mrr_at_5 value: 41.0412 - type: mrr_at_10 value: 41.8073 - type: mrr_at_20 value: 42.2109 - type: mrr_at_100 value: 42.5037 - type: mrr_at_1000 value: 42.5577 - type: nauc_ndcg_at_1_max value: 26.6036 - type: nauc_ndcg_at_1_std value: -8.3972 - type: nauc_ndcg_at_1_diff1 value: 52.43560000000001 - type: nauc_ndcg_at_3_max value: 28.5119 - type: nauc_ndcg_at_3_std value: -5.6812000000000005 - type: nauc_ndcg_at_3_diff1 value: 47.1671 - type: nauc_ndcg_at_5_max value: 28.1875 - type: nauc_ndcg_at_5_std value: -5.6434999999999995 - type: nauc_ndcg_at_5_diff1 value: 46.1849 - type: nauc_ndcg_at_10_max value: 27.5534 - type: nauc_ndcg_at_10_std value: -5.6785000000000005 - type: nauc_ndcg_at_10_diff1 value: 45.6927 - type: nauc_ndcg_at_20_max value: 27.4338 - type: nauc_ndcg_at_20_std value: -5.5037 - type: nauc_ndcg_at_20_diff1 value: 45.872800000000005 - type: nauc_ndcg_at_100_max value: 27.386100000000003 - type: nauc_ndcg_at_100_std value: -5.2795000000000005 - type: nauc_ndcg_at_100_diff1 value: 46.1008 - type: nauc_ndcg_at_1000_max value: 27.5195 - type: nauc_ndcg_at_1000_std value: -5.0668999999999995 - type: nauc_ndcg_at_1000_diff1 value: 46.381499999999996 - type: nauc_map_at_1_max value: 26.6036 - type: nauc_map_at_1_std value: -8.3972 - type: nauc_map_at_1_diff1 value: 52.43560000000001 - type: nauc_map_at_3_max value: 28.098699999999997 - type: nauc_map_at_3_std value: -6.357500000000001 - type: nauc_map_at_3_diff1 value: 48.4799 - type: nauc_map_at_5_max value: 27.938000000000002 - type: nauc_map_at_5_std value: -6.3283000000000005 - type: nauc_map_at_5_diff1 value: 47.955799999999996 - type: nauc_map_at_10_max value: 27.6989 - type: nauc_map_at_10_std value: -6.3546000000000005 - type: nauc_map_at_10_diff1 value: 47.7813 - type: nauc_map_at_20_max value: 27.637099999999997 - type: nauc_map_at_20_std value: -6.3278 - type: nauc_map_at_20_diff1 value: 47.8258 - type: nauc_map_at_100_max value: 27.6654 - type: nauc_map_at_100_std value: -6.284199999999999 - type: nauc_map_at_100_diff1 value: 47.8675 - type: nauc_map_at_1000_max value: 27.668599999999998 - type: nauc_map_at_1000_std value: -6.2727 - type: nauc_map_at_1000_diff1 value: 47.8793 - type: nauc_recall_at_1_max value: 26.6036 - type: nauc_recall_at_1_std value: -8.3972 - type: nauc_recall_at_1_diff1 value: 52.43560000000001 - type: nauc_recall_at_3_max value: 29.686600000000002 - type: nauc_recall_at_3_std value: -3.7178999999999998 - type: nauc_recall_at_3_diff1 value: 43.3556 - type: nauc_recall_at_5_max value: 28.835499999999996 - type: nauc_recall_at_5_std value: -3.6023 - type: nauc_recall_at_5_diff1 value: 40.7246 - type: nauc_recall_at_10_max value: 26.6593 - type: nauc_recall_at_10_std value: -3.5498000000000003 - type: nauc_recall_at_10_diff1 value: 38.6728 - type: nauc_recall_at_20_max value: 26.293499999999998 - type: nauc_recall_at_20_std value: -2.3813 - type: nauc_recall_at_20_diff1 value: 38.8857 - type: nauc_recall_at_100_max value: 24.7411 - type: nauc_recall_at_100_std value: 0.1296 - type: nauc_recall_at_100_diff1 value: 38.1683 - type: nauc_recall_at_1000_max value: 25.1934 - type: nauc_recall_at_1000_std value: 10.7766 - type: nauc_recall_at_1000_diff1 value: 35.856300000000005 - type: nauc_precision_at_1_max value: 26.6036 - type: nauc_precision_at_1_std value: -8.3972 - type: nauc_precision_at_1_diff1 value: 52.43560000000001 - type: nauc_precision_at_3_max value: 29.686600000000002 - type: nauc_precision_at_3_std value: -3.7178999999999998 - type: nauc_precision_at_3_diff1 value: 43.3556 - type: nauc_precision_at_5_max value: 28.835499999999996 - type: nauc_precision_at_5_std value: -3.6023 - type: nauc_precision_at_5_diff1 value: 40.7246 - type: nauc_precision_at_10_max value: 26.6593 - type: nauc_precision_at_10_std value: -3.5498000000000003 - type: nauc_precision_at_10_diff1 value: 38.6728 - type: nauc_precision_at_20_max value: 26.293499999999998 - type: nauc_precision_at_20_std value: -2.3813 - type: nauc_precision_at_20_diff1 value: 38.8857 - type: nauc_precision_at_100_max value: 24.7411 - type: nauc_precision_at_100_std value: 0.1296 - type: nauc_precision_at_100_diff1 value: 38.1683 - type: nauc_precision_at_1000_max value: 25.1934 - type: nauc_precision_at_1000_std value: 10.7766 - type: nauc_precision_at_1000_diff1 value: 35.856300000000005 - type: nauc_mrr_at_1_max value: 26.7351 - type: nauc_mrr_at_1_std value: -8.2798 - type: nauc_mrr_at_1_diff1 value: 52.7186 - type: nauc_mrr_at_3_max value: 28.1671 - type: nauc_mrr_at_3_std value: -6.3235 - type: nauc_mrr_at_3_diff1 value: 48.6387 - type: nauc_mrr_at_5_max value: 28.0115 - type: nauc_mrr_at_5_std value: -6.256399999999999 - type: nauc_mrr_at_5_diff1 value: 48.098400000000005 - type: nauc_mrr_at_10_max value: 27.7729 - type: nauc_mrr_at_10_std value: -6.2821 - type: nauc_mrr_at_10_diff1 value: 47.925000000000004 - type: nauc_mrr_at_20_max value: 27.7115 - type: nauc_mrr_at_20_std value: -6.254899999999999 - type: nauc_mrr_at_20_diff1 value: 47.9703 - type: nauc_mrr_at_100_max value: 27.740199999999998 - type: nauc_mrr_at_100_std value: -6.2109 - type: nauc_mrr_at_100_diff1 value: 48.0128 - type: nauc_mrr_at_1000_max value: 27.743499999999997 - type: nauc_mrr_at_1000_std value: -6.1993 - type: nauc_mrr_at_1000_diff1 value: 48.0248 - type: main_score value: 46.024 - task: type: Retrieval dataset: name: MTEB CodeSearchNetCCRetrieval (go) type: CoIR-Retrieval/CodeSearchNet-ccr config: go split: test revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8 metrics: - type: ndcg_at_1 value: 26.471 - type: ndcg_at_3 value: 33.489999999999995 - type: ndcg_at_5 value: 35.55 - type: ndcg_at_10 value: 37.555 - type: ndcg_at_20 value: 39.029 - type: ndcg_at_100 value: 41.478 - type: ndcg_at_1000 value: 43.457 - type: map_at_1 value: 26.471 - type: map_at_3 value: 31.774 - type: map_at_5 value: 32.915 - type: map_at_10 value: 33.745999999999995 - type: map_at_20 value: 34.150000000000006 - type: map_at_100 value: 34.477999999999994 - type: map_at_1000 value: 34.544000000000004 - type: recall_at_1 value: 26.471 - type: recall_at_3 value: 38.451 - type: recall_at_5 value: 43.462 - type: recall_at_10 value: 49.643 - type: recall_at_20 value: 55.479 - type: recall_at_100 value: 68.825 - type: recall_at_1000 value: 84.93 - type: precision_at_1 value: 26.471 - type: precision_at_3 value: 12.817 - type: precision_at_5 value: 8.692 - type: precision_at_10 value: 4.9639999999999995 - type: precision_at_20 value: 2.774 - type: precision_at_100 value: 0.688 - type: precision_at_1000 value: 0.08499999999999999 - type: mrr_at_1 value: 26.459 - type: mrr_at_3 value: 31.757400000000004 - type: mrr_at_5 value: 32.9092 - type: mrr_at_10 value: 33.7387 - type: mrr_at_20 value: 34.1428 - type: mrr_at_100 value: 34.471000000000004 - type: mrr_at_1000 value: 34.5364 - type: nauc_ndcg_at_1_max value: 29.408299999999997 - type: nauc_ndcg_at_1_std value: 1.5685 - type: nauc_ndcg_at_1_diff1 value: 45.834599999999995 - type: nauc_ndcg_at_3_max value: 27.7526 - type: nauc_ndcg_at_3_std value: -0.43810000000000004 - type: nauc_ndcg_at_3_diff1 value: 41.272999999999996 - type: nauc_ndcg_at_5_max value: 27.2864 - type: nauc_ndcg_at_5_std value: -0.37820000000000004 - type: nauc_ndcg_at_5_diff1 value: 40.4934 - type: nauc_ndcg_at_10_max value: 26.845599999999997 - type: nauc_ndcg_at_10_std value: -0.3317 - type: nauc_ndcg_at_10_diff1 value: 39.9305 - type: nauc_ndcg_at_20_max value: 26.4669 - type: nauc_ndcg_at_20_std value: 0.1423 - type: nauc_ndcg_at_20_diff1 value: 39.432 - type: nauc_ndcg_at_100_max value: 26.3318 - type: nauc_ndcg_at_100_std value: 0.8049000000000001 - type: nauc_ndcg_at_100_diff1 value: 39.0276 - type: nauc_ndcg_at_1000_max value: 26.5895 - type: nauc_ndcg_at_1000_std value: 1.0204 - type: nauc_ndcg_at_1000_diff1 value: 39.2747 - type: nauc_map_at_1_max value: 29.408299999999997 - type: nauc_map_at_1_std value: 1.5685 - type: nauc_map_at_1_diff1 value: 45.834599999999995 - type: nauc_map_at_3_max value: 28.1245 - type: nauc_map_at_3_std value: -0.006999999999999999 - type: nauc_map_at_3_diff1 value: 42.2701 - type: nauc_map_at_5_max value: 27.8563 - type: nauc_map_at_5_std value: 0.0204 - type: nauc_map_at_5_diff1 value: 41.8294 - type: nauc_map_at_10_max value: 27.6709 - type: nauc_map_at_10_std value: 0.0262 - type: nauc_map_at_10_diff1 value: 41.5973 - type: nauc_map_at_20_max value: 27.572000000000003 - type: nauc_map_at_20_std value: 0.1652 - type: nauc_map_at_20_diff1 value: 41.4683 - type: nauc_map_at_100_max value: 27.5573 - type: nauc_map_at_100_std value: 0.243 - type: nauc_map_at_100_diff1 value: 41.4201 - type: nauc_map_at_1000_max value: 27.5663 - type: nauc_map_at_1000_std value: 0.254 - type: nauc_map_at_1000_diff1 value: 41.4275 - type: nauc_recall_at_1_max value: 29.408299999999997 - type: nauc_recall_at_1_std value: 1.5685 - type: nauc_recall_at_1_diff1 value: 45.834599999999995 - type: nauc_recall_at_3_max value: 26.737499999999997 - type: nauc_recall_at_3_std value: -1.6067999999999998 - type: nauc_recall_at_3_diff1 value: 38.5628 - type: nauc_recall_at_5_max value: 25.6664 - type: nauc_recall_at_5_std value: -1.4459 - type: nauc_recall_at_5_diff1 value: 36.7369 - type: nauc_recall_at_10_max value: 24.3156 - type: nauc_recall_at_10_std value: -1.25 - type: nauc_recall_at_10_diff1 value: 34.959 - type: nauc_recall_at_20_max value: 22.6187 - type: nauc_recall_at_20_std value: 0.5539 - type: nauc_recall_at_20_diff1 value: 32.634299999999996 - type: nauc_recall_at_100_max value: 20.8069 - type: nauc_recall_at_100_std value: 5.2502 - type: nauc_recall_at_100_diff1 value: 28.3304 - type: nauc_recall_at_1000_max value: 20.8473 - type: nauc_recall_at_1000_std value: 12.2405 - type: nauc_recall_at_1000_diff1 value: 24.2366 - type: nauc_precision_at_1_max value: 29.408299999999997 - type: nauc_precision_at_1_std value: 1.5685 - type: nauc_precision_at_1_diff1 value: 45.834599999999995 - type: nauc_precision_at_3_max value: 26.737499999999997 - type: nauc_precision_at_3_std value: -1.6067999999999998 - type: nauc_precision_at_3_diff1 value: 38.5628 - type: nauc_precision_at_5_max value: 25.6664 - type: nauc_precision_at_5_std value: -1.4459 - type: nauc_precision_at_5_diff1 value: 36.7369 - type: nauc_precision_at_10_max value: 24.3156 - type: nauc_precision_at_10_std value: -1.25 - type: nauc_precision_at_10_diff1 value: 34.959 - type: nauc_precision_at_20_max value: 22.6187 - type: nauc_precision_at_20_std value: 0.5539 - type: nauc_precision_at_20_diff1 value: 32.634299999999996 - type: nauc_precision_at_100_max value: 20.8069 - type: nauc_precision_at_100_std value: 5.2502 - type: nauc_precision_at_100_diff1 value: 28.3304 - type: nauc_precision_at_1000_max value: 20.8473 - type: nauc_precision_at_1000_std value: 12.2405 - type: nauc_precision_at_1000_diff1 value: 24.2366 - type: nauc_mrr_at_1_max value: 29.435499999999998 - type: nauc_mrr_at_1_std value: 1.5623 - type: nauc_mrr_at_1_diff1 value: 45.8822 - type: nauc_mrr_at_3_max value: 28.183000000000003 - type: nauc_mrr_at_3_std value: -0.00039999999999999996 - type: nauc_mrr_at_3_diff1 value: 42.2776 - type: nauc_mrr_at_5_max value: 27.8735 - type: nauc_mrr_at_5_std value: 0.0288 - type: nauc_mrr_at_5_diff1 value: 41.827999999999996 - type: nauc_mrr_at_10_max value: 27.6989 - type: nauc_mrr_at_10_std value: 0.0349 - type: nauc_mrr_at_10_diff1 value: 41.6043 - type: nauc_mrr_at_20_max value: 27.599 - type: nauc_mrr_at_20_std value: 0.1719 - type: nauc_mrr_at_20_diff1 value: 41.4786 - type: nauc_mrr_at_100_max value: 27.5846 - type: nauc_mrr_at_100_std value: 0.25 - type: nauc_mrr_at_100_diff1 value: 41.4307 - type: nauc_mrr_at_1000_max value: 27.5937 - type: nauc_mrr_at_1000_std value: 0.261 - type: nauc_mrr_at_1000_diff1 value: 41.4381 - type: main_score value: 37.555 - task: type: Retrieval dataset: name: MTEB CodeSearchNetCCRetrieval (ruby) type: CoIR-Retrieval/CodeSearchNet-ccr config: ruby split: test revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8 metrics: - type: ndcg_at_1 value: 36.003 - type: ndcg_at_3 value: 43.306 - type: ndcg_at_5 value: 45.443 - type: ndcg_at_10 value: 47.549 - type: ndcg_at_20 value: 48.872 - type: ndcg_at_100 value: 50.651 - type: ndcg_at_1000 value: 52.406 - type: map_at_1 value: 36.003 - type: map_at_3 value: 41.501 - type: map_at_5 value: 42.695 - type: map_at_10 value: 43.580999999999996 - type: map_at_20 value: 43.954 - type: map_at_100 value: 44.195 - type: map_at_1000 value: 44.255 - type: recall_at_1 value: 36.003 - type: recall_at_3 value: 48.533 - type: recall_at_5 value: 53.688 - type: recall_at_10 value: 60.111000000000004 - type: recall_at_20 value: 65.266 - type: recall_at_100 value: 74.941 - type: recall_at_1000 value: 89.056 - type: precision_at_1 value: 36.003 - type: precision_at_3 value: 16.178 - type: precision_at_5 value: 10.738 - type: precision_at_10 value: 6.010999999999999 - type: precision_at_20 value: 3.263 - type: precision_at_100 value: 0.749 - type: precision_at_1000 value: 0.089 - type: mrr_at_1 value: 36.0032 - type: mrr_at_3 value: 41.5015 - type: mrr_at_5 value: 42.695 - type: mrr_at_10 value: 43.580600000000004 - type: mrr_at_20 value: 43.9543 - type: mrr_at_100 value: 44.195299999999996 - type: mrr_at_1000 value: 44.255 - type: nauc_ndcg_at_1_max value: 32.9994 - type: nauc_ndcg_at_1_std value: -12.2575 - type: nauc_ndcg_at_1_diff1 value: 55.63360000000001 - type: nauc_ndcg_at_3_max value: 33.314899999999994 - type: nauc_ndcg_at_3_std value: -11.4208 - type: nauc_ndcg_at_3_diff1 value: 50.995599999999996 - type: nauc_ndcg_at_5_max value: 33.1612 - type: nauc_ndcg_at_5_std value: -11.4067 - type: nauc_ndcg_at_5_diff1 value: 50.766999999999996 - type: nauc_ndcg_at_10_max value: 32.903999999999996 - type: nauc_ndcg_at_10_std value: -11.447000000000001 - type: nauc_ndcg_at_10_diff1 value: 50.1061 - type: nauc_ndcg_at_20_max value: 32.8849 - type: nauc_ndcg_at_20_std value: -11.4567 - type: nauc_ndcg_at_20_diff1 value: 50.0131 - type: nauc_ndcg_at_100_max value: 32.5449 - type: nauc_ndcg_at_100_std value: -11.0686 - type: nauc_ndcg_at_100_diff1 value: 49.7046 - type: nauc_ndcg_at_1000_max value: 32.7575 - type: nauc_ndcg_at_1000_std value: -10.9682 - type: nauc_ndcg_at_1000_diff1 value: 50.17359999999999 - type: nauc_map_at_1_max value: 32.9994 - type: nauc_map_at_1_std value: -12.2575 - type: nauc_map_at_1_diff1 value: 55.63360000000001 - type: nauc_map_at_3_max value: 33.2746 - type: nauc_map_at_3_std value: -11.5215 - type: nauc_map_at_3_diff1 value: 52.1439 - type: nauc_map_at_5_max value: 33.206799999999994 - type: nauc_map_at_5_std value: -11.533 - type: nauc_map_at_5_diff1 value: 52.0477 - type: nauc_map_at_10_max value: 33.1113 - type: nauc_map_at_10_std value: -11.5406 - type: nauc_map_at_10_diff1 value: 51.8103 - type: nauc_map_at_20_max value: 33.070899999999995 - type: nauc_map_at_20_std value: -11.5655 - type: nauc_map_at_20_diff1 value: 51.7759 - type: nauc_map_at_100_max value: 32.9989 - type: nauc_map_at_100_std value: -11.546 - type: nauc_map_at_100_diff1 value: 51.739000000000004 - type: nauc_map_at_1000_max value: 33.0074 - type: nauc_map_at_1000_std value: -11.541 - type: nauc_map_at_1000_diff1 value: 51.7548 - type: nauc_recall_at_1_max value: 32.9994 - type: nauc_recall_at_1_std value: -12.2575 - type: nauc_recall_at_1_diff1 value: 55.63360000000001 - type: nauc_recall_at_3_max value: 33.4172 - type: nauc_recall_at_3_std value: -11.1701 - type: nauc_recall_at_3_diff1 value: 47.6442 - type: nauc_recall_at_5_max value: 32.962799999999994 - type: nauc_recall_at_5_std value: -11.0448 - type: nauc_recall_at_5_diff1 value: 46.8433 - type: nauc_recall_at_10_max value: 32.042500000000004 - type: nauc_recall_at_10_std value: -11.2125 - type: nauc_recall_at_10_diff1 value: 44.2396 - type: nauc_recall_at_20_max value: 32.1997 - type: nauc_recall_at_20_std value: -11.0222 - type: nauc_recall_at_20_diff1 value: 43.4014 - type: nauc_recall_at_100_max value: 29.972500000000004 - type: nauc_recall_at_100_std value: -7.2572 - type: nauc_recall_at_100_diff1 value: 39.285199999999996 - type: nauc_recall_at_1000_max value: 31.759300000000003 - type: nauc_recall_at_1000_std value: -1.555 - type: nauc_recall_at_1000_diff1 value: 38.7819 - type: nauc_precision_at_1_max value: 32.9994 - type: nauc_precision_at_1_std value: -12.2575 - type: nauc_precision_at_1_diff1 value: 55.63360000000001 - type: nauc_precision_at_3_max value: 33.4172 - type: nauc_precision_at_3_std value: -11.1701 - type: nauc_precision_at_3_diff1 value: 47.6442 - type: nauc_precision_at_5_max value: 32.962799999999994 - type: nauc_precision_at_5_std value: -11.0448 - type: nauc_precision_at_5_diff1 value: 46.8433 - type: nauc_precision_at_10_max value: 32.042500000000004 - type: nauc_precision_at_10_std value: -11.2125 - type: nauc_precision_at_10_diff1 value: 44.2396 - type: nauc_precision_at_20_max value: 32.1997 - type: nauc_precision_at_20_std value: -11.0222 - type: nauc_precision_at_20_diff1 value: 43.4014 - type: nauc_precision_at_100_max value: 29.972500000000004 - type: nauc_precision_at_100_std value: -7.2572 - type: nauc_precision_at_100_diff1 value: 39.285199999999996 - type: nauc_precision_at_1000_max value: 31.759300000000003 - type: nauc_precision_at_1000_std value: -1.555 - type: nauc_precision_at_1000_diff1 value: 38.7819 - type: nauc_mrr_at_1_max value: 33.1174 - type: nauc_mrr_at_1_std value: -12.0388 - type: nauc_mrr_at_1_diff1 value: 55.63360000000001 - type: nauc_mrr_at_3_max value: 33.333800000000004 - type: nauc_mrr_at_3_std value: -11.4119 - type: nauc_mrr_at_3_diff1 value: 52.1439 - type: nauc_mrr_at_5_max value: 33.2665 - type: nauc_mrr_at_5_std value: -11.4223 - type: nauc_mrr_at_5_diff1 value: 52.0477 - type: nauc_mrr_at_10_max value: 33.1716 - type: nauc_mrr_at_10_std value: -11.4289 - type: nauc_mrr_at_10_diff1 value: 51.8103 - type: nauc_mrr_at_20_max value: 33.1315 - type: nauc_mrr_at_20_std value: -11.4531 - type: nauc_mrr_at_20_diff1 value: 51.7759 - type: nauc_mrr_at_100_max value: 33.0598 - type: nauc_mrr_at_100_std value: -11.4331 - type: nauc_mrr_at_100_diff1 value: 51.739000000000004 - type: nauc_mrr_at_1000_max value: 33.0684 - type: nauc_mrr_at_1000_std value: -11.428 - type: nauc_mrr_at_1000_diff1 value: 51.7548 - type: main_score value: 47.549 - task: type: Retrieval dataset: name: MTEB CodeSearchNetCCRetrieval (java) type: CoIR-Retrieval/CodeSearchNet-ccr config: java split: test revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8 metrics: - type: ndcg_at_1 value: 33.355000000000004 - type: ndcg_at_3 value: 41.551 - type: ndcg_at_5 value: 43.592 - type: ndcg_at_10 value: 45.539 - type: ndcg_at_20 value: 46.922999999999995 - type: ndcg_at_100 value: 49.01 - type: ndcg_at_1000 value: 50.592000000000006 - type: map_at_1 value: 33.355000000000004 - type: map_at_3 value: 39.582 - type: map_at_5 value: 40.716 - type: map_at_10 value: 41.524 - type: map_at_20 value: 41.905 - type: map_at_100 value: 42.185 - type: map_at_1000 value: 42.239 - type: recall_at_1 value: 33.355000000000004 - type: recall_at_3 value: 47.23 - type: recall_at_5 value: 52.17699999999999 - type: recall_at_10 value: 58.17400000000001 - type: recall_at_20 value: 63.641999999999996 - type: recall_at_100 value: 75.034 - type: recall_at_1000 value: 87.85 - type: precision_at_1 value: 33.355000000000004 - type: precision_at_3 value: 15.742999999999999 - type: precision_at_5 value: 10.435 - type: precision_at_10 value: 5.817 - type: precision_at_20 value: 3.182 - type: precision_at_100 value: 0.75 - type: precision_at_1000 value: 0.08800000000000001 - type: mrr_at_1 value: 33.3455 - type: mrr_at_3 value: 39.569500000000005 - type: mrr_at_5 value: 40.7055 - type: mrr_at_10 value: 41.5123 - type: mrr_at_20 value: 41.8948 - type: mrr_at_100 value: 42.175200000000004 - type: mrr_at_1000 value: 42.228500000000004 - type: nauc_ndcg_at_1_max value: 29.177500000000002 - type: nauc_ndcg_at_1_std value: -5.8229999999999995 - type: nauc_ndcg_at_1_diff1 value: 53.2548 - type: nauc_ndcg_at_3_max value: 31.0728 - type: nauc_ndcg_at_3_std value: -4.3403 - type: nauc_ndcg_at_3_diff1 value: 48.6597 - type: nauc_ndcg_at_5_max value: 30.9135 - type: nauc_ndcg_at_5_std value: -3.5812999999999997 - type: nauc_ndcg_at_5_diff1 value: 47.6076 - type: nauc_ndcg_at_10_max value: 30.662899999999997 - type: nauc_ndcg_at_10_std value: -3.3078999999999996 - type: nauc_ndcg_at_10_diff1 value: 46.9647 - type: nauc_ndcg_at_20_max value: 30.7534 - type: nauc_ndcg_at_20_std value: -2.6957 - type: nauc_ndcg_at_20_diff1 value: 46.6956 - type: nauc_ndcg_at_100_max value: 30.8268 - type: nauc_ndcg_at_100_std value: -1.9675000000000002 - type: nauc_ndcg_at_100_diff1 value: 46.4854 - type: nauc_ndcg_at_1000_max value: 30.7713 - type: nauc_ndcg_at_1000_std value: -1.9892 - type: nauc_ndcg_at_1000_diff1 value: 46.7157 - type: nauc_map_at_1_max value: 29.177500000000002 - type: nauc_map_at_1_std value: -5.8229999999999995 - type: nauc_map_at_1_diff1 value: 53.2548 - type: nauc_map_at_3_max value: 30.6136 - type: nauc_map_at_3_std value: -4.7136 - type: nauc_map_at_3_diff1 value: 49.709399999999995 - type: nauc_map_at_5_max value: 30.523699999999998 - type: nauc_map_at_5_std value: -4.288200000000001 - type: nauc_map_at_5_diff1 value: 49.127700000000004 - type: nauc_map_at_10_max value: 30.4224 - type: nauc_map_at_10_std value: -4.1822 - type: nauc_map_at_10_diff1 value: 48.8812 - type: nauc_map_at_20_max value: 30.4446 - type: nauc_map_at_20_std value: -4.0194 - type: nauc_map_at_20_diff1 value: 48.8177 - type: nauc_map_at_100_max value: 30.4531 - type: nauc_map_at_100_std value: -3.9356 - type: nauc_map_at_100_diff1 value: 48.7971 - type: nauc_map_at_1000_max value: 30.4507 - type: nauc_map_at_1000_std value: -3.9337999999999997 - type: nauc_map_at_1000_diff1 value: 48.8055 - type: nauc_recall_at_1_max value: 29.177500000000002 - type: nauc_recall_at_1_std value: -5.8229999999999995 - type: nauc_recall_at_1_diff1 value: 53.2548 - type: nauc_recall_at_3_max value: 32.3983 - type: nauc_recall_at_3_std value: -3.2567 - type: nauc_recall_at_3_diff1 value: 45.6552 - type: nauc_recall_at_5_max value: 32.043 - type: nauc_recall_at_5_std value: -1.3823 - type: nauc_recall_at_5_diff1 value: 42.9898 - type: nauc_recall_at_10_max value: 31.272 - type: nauc_recall_at_10_std value: -0.3417 - type: nauc_recall_at_10_diff1 value: 40.5539 - type: nauc_recall_at_20_max value: 31.7395 - type: nauc_recall_at_20_std value: 2.645 - type: nauc_recall_at_20_diff1 value: 38.777499999999996 - type: nauc_recall_at_100_max value: 32.6198 - type: nauc_recall_at_100_std value: 10.1172 - type: nauc_recall_at_100_diff1 value: 34.6806 - type: nauc_recall_at_1000_max value: 33.0633 - type: nauc_recall_at_1000_std value: 19.5697 - type: nauc_recall_at_1000_diff1 value: 29.418699999999998 - type: nauc_precision_at_1_max value: 29.177500000000002 - type: nauc_precision_at_1_std value: -5.8229999999999995 - type: nauc_precision_at_1_diff1 value: 53.2548 - type: nauc_precision_at_3_max value: 32.3983 - type: nauc_precision_at_3_std value: -3.2567 - type: nauc_precision_at_3_diff1 value: 45.6552 - type: nauc_precision_at_5_max value: 32.043 - type: nauc_precision_at_5_std value: -1.3823 - type: nauc_precision_at_5_diff1 value: 42.9898 - type: nauc_precision_at_10_max value: 31.272 - type: nauc_precision_at_10_std value: -0.3417 - type: nauc_precision_at_10_diff1 value: 40.5539 - type: nauc_precision_at_20_max value: 31.7395 - type: nauc_precision_at_20_std value: 2.645 - type: nauc_precision_at_20_diff1 value: 38.777499999999996 - type: nauc_precision_at_100_max value: 32.6198 - type: nauc_precision_at_100_std value: 10.1172 - type: nauc_precision_at_100_diff1 value: 34.6806 - type: nauc_precision_at_1000_max value: 33.0633 - type: nauc_precision_at_1000_std value: 19.5697 - type: nauc_precision_at_1000_diff1 value: 29.418699999999998 - type: nauc_mrr_at_1_max value: 29.217900000000004 - type: nauc_mrr_at_1_std value: -5.8532 - type: nauc_mrr_at_1_diff1 value: 53.283100000000005 - type: nauc_mrr_at_3_max value: 30.6327 - type: nauc_mrr_at_3_std value: -4.7439 - type: nauc_mrr_at_3_diff1 value: 49.7477 - type: nauc_mrr_at_5_max value: 30.5427 - type: nauc_mrr_at_5_std value: -4.3167 - type: nauc_mrr_at_5_diff1 value: 49.152 - type: nauc_mrr_at_10_max value: 30.444100000000002 - type: nauc_mrr_at_10_std value: -4.2066 - type: nauc_mrr_at_10_diff1 value: 48.9038 - type: nauc_mrr_at_20_max value: 30.462899999999998 - type: nauc_mrr_at_20_std value: -4.0467 - type: nauc_mrr_at_20_diff1 value: 48.8397 - type: nauc_mrr_at_100_max value: 30.4714 - type: nauc_mrr_at_100_std value: -3.963 - type: nauc_mrr_at_100_diff1 value: 48.8192 - type: nauc_mrr_at_1000_max value: 30.469 - type: nauc_mrr_at_1000_std value: -3.9613 - type: nauc_mrr_at_1000_diff1 value: 48.8277 - type: main_score value: 45.539 - task: type: Retrieval dataset: name: MTEB CodeSearchNetCCRetrieval (php) type: CoIR-Retrieval/CodeSearchNet-ccr config: php split: test revision: 6e1effa2c03723c5fde48ee912b5ee08d4f211e8 metrics: - type: ndcg_at_1 value: 25.139 - type: ndcg_at_3 value: 31.922 - type: ndcg_at_5 value: 33.989999999999995 - type: ndcg_at_10 value: 35.942 - type: ndcg_at_20 value: 37.506 - type: ndcg_at_100 value: 39.971000000000004 - type: ndcg_at_1000 value: 42.074 - type: map_at_1 value: 25.139 - type: map_at_3 value: 30.263 - type: map_at_5 value: 31.411 - type: map_at_10 value: 32.218 - type: map_at_20 value: 32.65 - type: map_at_100 value: 32.979 - type: map_at_1000 value: 33.050000000000004 - type: recall_at_1 value: 25.139 - type: recall_at_3 value: 36.720000000000006 - type: recall_at_5 value: 41.737 - type: recall_at_10 value: 47.766999999999996 - type: recall_at_20 value: 53.932 - type: recall_at_100 value: 67.38300000000001 - type: recall_at_1000 value: 84.416 - type: precision_at_1 value: 25.139 - type: precision_at_3 value: 12.24 - type: precision_at_5 value: 8.347 - type: precision_at_10 value: 4.777 - type: precision_at_20 value: 2.697 - type: precision_at_100 value: 0.674 - type: precision_at_1000 value: 0.084 - type: mrr_at_1 value: 25.1463 - type: mrr_at_3 value: 30.2709 - type: mrr_at_5 value: 31.4126 - type: mrr_at_10 value: 32.2202 - type: mrr_at_20 value: 32.6527 - type: mrr_at_100 value: 32.9822 - type: mrr_at_1000 value: 33.0527 - type: nauc_ndcg_at_1_max value: 24.082600000000003 - type: nauc_ndcg_at_1_std value: -3.9068 - type: nauc_ndcg_at_1_diff1 value: 50.1815 - type: nauc_ndcg_at_3_max value: 23.160700000000002 - type: nauc_ndcg_at_3_std value: -3.3746 - type: nauc_ndcg_at_3_diff1 value: 45.009 - type: nauc_ndcg_at_5_max value: 22.644000000000002 - type: nauc_ndcg_at_5_std value: -3.0027999999999997 - type: nauc_ndcg_at_5_diff1 value: 44.0016 - type: nauc_ndcg_at_10_max value: 22.3578 - type: nauc_ndcg_at_10_std value: -2.5096 - type: nauc_ndcg_at_10_diff1 value: 43.4367 - type: nauc_ndcg_at_20_max value: 22.0477 - type: nauc_ndcg_at_20_std value: -1.7484 - type: nauc_ndcg_at_20_diff1 value: 42.9771 - type: nauc_ndcg_at_100_max value: 21.7016 - type: nauc_ndcg_at_100_std value: -1.0854000000000001 - type: nauc_ndcg_at_100_diff1 value: 42.707 - type: nauc_ndcg_at_1000_max value: 21.988 - type: nauc_ndcg_at_1000_std value: -0.8564999999999999 - type: nauc_ndcg_at_1000_diff1 value: 43.0368 - type: nauc_map_at_1_max value: 24.082600000000003 - type: nauc_map_at_1_std value: -3.9068 - type: nauc_map_at_1_diff1 value: 50.1815 - type: nauc_map_at_3_max value: 23.418 - type: nauc_map_at_3_std value: -3.4922 - type: nauc_map_at_3_diff1 value: 46.19 - type: nauc_map_at_5_max value: 23.1203 - type: nauc_map_at_5_std value: -3.2856000000000005 - type: nauc_map_at_5_diff1 value: 45.6063 - type: nauc_map_at_10_max value: 23.0132 - type: nauc_map_at_10_std value: -3.0803000000000003 - type: nauc_map_at_10_diff1 value: 45.3708 - type: nauc_map_at_20_max value: 22.926199999999998 - type: nauc_map_at_20_std value: -2.8717 - type: nauc_map_at_20_diff1 value: 45.2482 - type: nauc_map_at_100_max value: 22.8776 - type: nauc_map_at_100_std value: -2.7819 - type: nauc_map_at_100_diff1 value: 45.2205 - type: nauc_map_at_1000_max value: 22.886 - type: nauc_map_at_1000_std value: -2.7714 - type: nauc_map_at_1000_diff1 value: 45.231300000000005 - type: nauc_recall_at_1_max value: 24.082600000000003 - type: nauc_recall_at_1_std value: -3.9068 - type: nauc_recall_at_1_diff1 value: 50.1815 - type: nauc_recall_at_3_max value: 22.442500000000003 - type: nauc_recall_at_3_std value: -3.0562 - type: nauc_recall_at_3_diff1 value: 41.797000000000004 - type: nauc_recall_at_5_max value: 21.2749 - type: nauc_recall_at_5_std value: -2.1853000000000002 - type: nauc_recall_at_5_diff1 value: 39.543 - type: nauc_recall_at_10_max value: 20.336399999999998 - type: nauc_recall_at_10_std value: -0.6941 - type: nauc_recall_at_10_diff1 value: 37.7835 - type: nauc_recall_at_20_max value: 19.031799999999997 - type: nauc_recall_at_20_std value: 2.4044 - type: nauc_recall_at_20_diff1 value: 35.6973 - type: nauc_recall_at_100_max value: 16.1657 - type: nauc_recall_at_100_std value: 7.480199999999999 - type: nauc_recall_at_100_diff1 value: 32.2845 - type: nauc_recall_at_1000_max value: 16.6175 - type: nauc_recall_at_1000_std value: 17.7626 - type: nauc_recall_at_1000_diff1 value: 29.4846 - type: nauc_precision_at_1_max value: 24.082600000000003 - type: nauc_precision_at_1_std value: -3.9068 - type: nauc_precision_at_1_diff1 value: 50.1815 - type: nauc_precision_at_3_max value: 22.442500000000003 - type: nauc_precision_at_3_std value: -3.0562 - type: nauc_precision_at_3_diff1 value: 41.797000000000004 - type: nauc_precision_at_5_max value: 21.2749 - type: nauc_precision_at_5_std value: -2.1853000000000002 - type: nauc_precision_at_5_diff1 value: 39.543 - type: nauc_precision_at_10_max value: 20.336399999999998 - type: nauc_precision_at_10_std value: -0.6941 - type: nauc_precision_at_10_diff1 value: 37.7835 - type: nauc_precision_at_20_max value: 19.031799999999997 - type: nauc_precision_at_20_std value: 2.4044 - type: nauc_precision_at_20_diff1 value: 35.6973 - type: nauc_precision_at_100_max value: 16.1657 - type: nauc_precision_at_100_std value: 7.480199999999999 - type: nauc_precision_at_100_diff1 value: 32.2845 - type: nauc_precision_at_1000_max value: 16.6175 - type: nauc_precision_at_1000_std value: 17.7626 - type: nauc_precision_at_1000_diff1 value: 29.4846 - type: nauc_mrr_at_1_max value: 23.9848 - type: nauc_mrr_at_1_std value: -3.9669000000000003 - type: nauc_mrr_at_1_diff1 value: 50.152699999999996 - type: nauc_mrr_at_3_max value: 23.3397 - type: nauc_mrr_at_3_std value: -3.5128 - type: nauc_mrr_at_3_diff1 value: 46.1227 - type: nauc_mrr_at_5_max value: 23.0454 - type: nauc_mrr_at_5_std value: -3.3141 - type: nauc_mrr_at_5_diff1 value: 45.561 - type: nauc_mrr_at_10_max value: 22.9526 - type: nauc_mrr_at_10_std value: -3.1052 - type: nauc_mrr_at_10_diff1 value: 45.3316 - type: nauc_mrr_at_20_max value: 22.8654 - type: nauc_mrr_at_20_std value: -2.8967 - type: nauc_mrr_at_20_diff1 value: 45.2089 - type: nauc_mrr_at_100_max value: 22.8164 - type: nauc_mrr_at_100_std value: -2.8074000000000003 - type: nauc_mrr_at_100_diff1 value: 45.1812 - type: nauc_mrr_at_1000_max value: 22.8248 - type: nauc_mrr_at_1000_std value: -2.7968 - type: nauc_mrr_at_1000_diff1 value: 45.191900000000004 - type: main_score value: 35.942 - task: type: Retrieval dataset: name: MTEB CodeSearchNetRetrieval (python) type: code-search-net/code_search_net config: python split: test revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759 metrics: - type: ndcg_at_1 value: 70.89999999999999 - type: ndcg_at_3 value: 80.06400000000001 - type: ndcg_at_5 value: 81.703 - type: ndcg_at_10 value: 83.12 - type: ndcg_at_20 value: 83.67999999999999 - type: ndcg_at_100 value: 84.11 - type: ndcg_at_1000 value: 84.195 - type: map_at_1 value: 70.89999999999999 - type: map_at_3 value: 77.86699999999999 - type: map_at_5 value: 78.77199999999999 - type: map_at_10 value: 79.353 - type: map_at_20 value: 79.508 - type: map_at_100 value: 79.569 - type: map_at_1000 value: 79.571 - type: recall_at_1 value: 70.89999999999999 - type: recall_at_3 value: 86.4 - type: recall_at_5 value: 90.4 - type: recall_at_10 value: 94.8 - type: recall_at_20 value: 97.0 - type: recall_at_100 value: 99.3 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 70.89999999999999 - type: precision_at_3 value: 28.799999999999997 - type: precision_at_5 value: 18.08 - type: precision_at_10 value: 9.48 - type: precision_at_20 value: 4.8500000000000005 - type: precision_at_100 value: 0.993 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 70.89999999999999 - type: mrr_at_3 value: 77.8667 - type: mrr_at_5 value: 78.7717 - type: mrr_at_10 value: 79.3526 - type: mrr_at_20 value: 79.5084 - type: mrr_at_100 value: 79.5687 - type: mrr_at_1000 value: 79.5713 - type: nauc_ndcg_at_1_max value: 42.7162 - type: nauc_ndcg_at_1_std value: -4.6818 - type: nauc_ndcg_at_1_diff1 value: 70.6364 - type: nauc_ndcg_at_3_max value: 48.1282 - type: nauc_ndcg_at_3_std value: -2.8091 - type: nauc_ndcg_at_3_diff1 value: 67.9426 - type: nauc_ndcg_at_5_max value: 45.713 - type: nauc_ndcg_at_5_std value: -4.0022 - type: nauc_ndcg_at_5_diff1 value: 67.0684 - type: nauc_ndcg_at_10_max value: 45.8762 - type: nauc_ndcg_at_10_std value: -2.8594999999999997 - type: nauc_ndcg_at_10_diff1 value: 67.318 - type: nauc_ndcg_at_20_max value: 45.8448 - type: nauc_ndcg_at_20_std value: -2.9843 - type: nauc_ndcg_at_20_diff1 value: 67.5016 - type: nauc_ndcg_at_100_max value: 45.9045 - type: nauc_ndcg_at_100_std value: -3.1647000000000003 - type: nauc_ndcg_at_100_diff1 value: 67.8211 - type: nauc_ndcg_at_1000_max value: 45.7011 - type: nauc_ndcg_at_1000_std value: -3.4981 - type: nauc_ndcg_at_1000_diff1 value: 67.9137 - type: nauc_map_at_1_max value: 42.7162 - type: nauc_map_at_1_std value: -4.6818 - type: nauc_map_at_1_diff1 value: 70.6364 - type: nauc_map_at_3_max value: 46.5287 - type: nauc_map_at_3_std value: -3.6239 - type: nauc_map_at_3_diff1 value: 68.5879 - type: nauc_map_at_5_max value: 45.291599999999995 - type: nauc_map_at_5_std value: -4.2172 - type: nauc_map_at_5_diff1 value: 68.1788 - type: nauc_map_at_10_max value: 45.31 - type: nauc_map_at_10_std value: -3.8557 - type: nauc_map_at_10_diff1 value: 68.2538 - type: nauc_map_at_20_max value: 45.2841 - type: nauc_map_at_20_std value: -3.92 - type: nauc_map_at_20_diff1 value: 68.2978 - type: nauc_map_at_100_max value: 45.3154 - type: nauc_map_at_100_std value: -3.929 - type: nauc_map_at_100_diff1 value: 68.3362 - type: nauc_map_at_1000_max value: 45.3097 - type: nauc_map_at_1000_std value: -3.9364999999999997 - type: nauc_map_at_1000_diff1 value: 68.3376 - type: nauc_recall_at_1_max value: 42.7162 - type: nauc_recall_at_1_std value: -4.6818 - type: nauc_recall_at_1_diff1 value: 70.6364 - type: nauc_recall_at_3_max value: 55.0798 - type: nauc_recall_at_3_std value: 0.9014 - type: nauc_recall_at_3_diff1 value: 65.2358 - type: nauc_recall_at_5_max value: 47.4148 - type: nauc_recall_at_5_std value: -2.9387 - type: nauc_recall_at_5_diff1 value: 60.644299999999994 - type: nauc_recall_at_10_max value: 50.820600000000006 - type: nauc_recall_at_10_std value: 8.7499 - type: nauc_recall_at_10_diff1 value: 58.34049999999999 - type: nauc_recall_at_20_max value: 54.4382 - type: nauc_recall_at_20_std value: 16.0862 - type: nauc_recall_at_20_diff1 value: 55.5229 - type: nauc_recall_at_100_max value: 79.2317 - type: nauc_recall_at_100_std value: 54.095000000000006 - type: nauc_recall_at_100_diff1 value: 50.6869 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 42.7162 - type: nauc_precision_at_1_std value: -4.6818 - type: nauc_precision_at_1_diff1 value: 70.6364 - type: nauc_precision_at_3_max value: 55.0798 - type: nauc_precision_at_3_std value: 0.9014 - type: nauc_precision_at_3_diff1 value: 65.2358 - type: nauc_precision_at_5_max value: 47.4148 - type: nauc_precision_at_5_std value: -2.9387 - type: nauc_precision_at_5_diff1 value: 60.644299999999994 - type: nauc_precision_at_10_max value: 50.820600000000006 - type: nauc_precision_at_10_std value: 8.7499 - type: nauc_precision_at_10_diff1 value: 58.34049999999999 - type: nauc_precision_at_20_max value: 54.4382 - type: nauc_precision_at_20_std value: 16.0862 - type: nauc_precision_at_20_diff1 value: 55.5229 - type: nauc_precision_at_100_max value: 79.2317 - type: nauc_precision_at_100_std value: 54.095000000000006 - type: nauc_precision_at_100_diff1 value: 50.6869 - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_mrr_at_1_max value: 42.7162 - type: nauc_mrr_at_1_std value: -4.6818 - type: nauc_mrr_at_1_diff1 value: 70.6364 - type: nauc_mrr_at_3_max value: 46.5287 - type: nauc_mrr_at_3_std value: -3.6239 - type: nauc_mrr_at_3_diff1 value: 68.5879 - type: nauc_mrr_at_5_max value: 45.291599999999995 - type: nauc_mrr_at_5_std value: -4.2172 - type: nauc_mrr_at_5_diff1 value: 68.1788 - type: nauc_mrr_at_10_max value: 45.31 - type: nauc_mrr_at_10_std value: -3.8557 - type: nauc_mrr_at_10_diff1 value: 68.2538 - type: nauc_mrr_at_20_max value: 45.2841 - type: nauc_mrr_at_20_std value: -3.92 - type: nauc_mrr_at_20_diff1 value: 68.2978 - type: nauc_mrr_at_100_max value: 45.3154 - type: nauc_mrr_at_100_std value: -3.929 - type: nauc_mrr_at_100_diff1 value: 68.3362 - type: nauc_mrr_at_1000_max value: 45.3097 - type: nauc_mrr_at_1000_std value: -3.9364999999999997 - type: nauc_mrr_at_1000_diff1 value: 68.3376 - type: main_score value: 83.12 - task: type: Retrieval dataset: name: MTEB CodeSearchNetRetrieval (javascript) type: code-search-net/code_search_net config: javascript split: test revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759 metrics: - type: ndcg_at_1 value: 57.99999999999999 - type: ndcg_at_3 value: 67.24900000000001 - type: ndcg_at_5 value: 68.781 - type: ndcg_at_10 value: 70.34 - type: ndcg_at_20 value: 71.24000000000001 - type: ndcg_at_100 value: 72.617 - type: ndcg_at_1000 value: 73.436 - type: map_at_1 value: 57.99999999999999 - type: map_at_3 value: 64.983 - type: map_at_5 value: 65.838 - type: map_at_10 value: 66.50500000000001 - type: map_at_20 value: 66.74600000000001 - type: map_at_100 value: 66.93299999999999 - type: map_at_1000 value: 66.959 - type: recall_at_1 value: 57.99999999999999 - type: recall_at_3 value: 73.8 - type: recall_at_5 value: 77.5 - type: recall_at_10 value: 82.19999999999999 - type: recall_at_20 value: 85.8 - type: recall_at_100 value: 93.30000000000001 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 57.99999999999999 - type: precision_at_3 value: 24.6 - type: precision_at_5 value: 15.5 - type: precision_at_10 value: 8.219999999999999 - type: precision_at_20 value: 4.29 - type: precision_at_100 value: 0.9329999999999999 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 57.99999999999999 - type: mrr_at_3 value: 64.9833 - type: mrr_at_5 value: 65.8383 - type: mrr_at_10 value: 66.50500000000001 - type: mrr_at_20 value: 66.7464 - type: mrr_at_100 value: 66.9326 - type: mrr_at_1000 value: 66.9593 - type: nauc_ndcg_at_1_max value: 51.0918 - type: nauc_ndcg_at_1_std value: 12.0501 - type: nauc_ndcg_at_1_diff1 value: 69.1716 - type: nauc_ndcg_at_3_max value: 59.404199999999996 - type: nauc_ndcg_at_3_std value: 22.4787 - type: nauc_ndcg_at_3_diff1 value: 66.2602 - type: nauc_ndcg_at_5_max value: 60.711000000000006 - type: nauc_ndcg_at_5_std value: 24.1272 - type: nauc_ndcg_at_5_diff1 value: 65.9406 - type: nauc_ndcg_at_10_max value: 61.492599999999996 - type: nauc_ndcg_at_10_std value: 26.6758 - type: nauc_ndcg_at_10_diff1 value: 66.1164 - type: nauc_ndcg_at_20_max value: 61.34610000000001 - type: nauc_ndcg_at_20_std value: 27.331 - type: nauc_ndcg_at_20_diff1 value: 66.981 - type: nauc_ndcg_at_100_max value: 60.50020000000001 - type: nauc_ndcg_at_100_std value: 26.623 - type: nauc_ndcg_at_100_diff1 value: 66.4658 - type: nauc_ndcg_at_1000_max value: 59.600500000000004 - type: nauc_ndcg_at_1000_std value: 24.3596 - type: nauc_ndcg_at_1000_diff1 value: 66.7619 - type: nauc_map_at_1_max value: 51.0918 - type: nauc_map_at_1_std value: 12.0501 - type: nauc_map_at_1_diff1 value: 69.1716 - type: nauc_map_at_3_max value: 57.2093 - type: nauc_map_at_3_std value: 19.4523 - type: nauc_map_at_3_diff1 value: 67.0065 - type: nauc_map_at_5_max value: 57.81699999999999 - type: nauc_map_at_5_std value: 20.2597 - type: nauc_map_at_5_diff1 value: 66.8577 - type: nauc_map_at_10_max value: 58.052099999999996 - type: nauc_map_at_10_std value: 21.195 - type: nauc_map_at_10_diff1 value: 66.9095 - type: nauc_map_at_20_max value: 57.9955 - type: nauc_map_at_20_std value: 21.3121 - type: nauc_map_at_20_diff1 value: 67.1257 - type: nauc_map_at_100_max value: 57.8974 - type: nauc_map_at_100_std value: 21.2576 - type: nauc_map_at_100_diff1 value: 67.0765 - type: nauc_map_at_1000_max value: 57.873799999999996 - type: nauc_map_at_1000_std value: 21.195 - type: nauc_map_at_1000_diff1 value: 67.08579999999999 - type: nauc_recall_at_1_max value: 51.0918 - type: nauc_recall_at_1_std value: 12.0501 - type: nauc_recall_at_1_diff1 value: 69.1716 - type: nauc_recall_at_3_max value: 67.0934 - type: nauc_recall_at_3_std value: 33.2241 - type: nauc_recall_at_3_diff1 value: 63.65769999999999 - type: nauc_recall_at_5_max value: 72.2191 - type: nauc_recall_at_5_std value: 39.5657 - type: nauc_recall_at_5_diff1 value: 62.3367 - type: nauc_recall_at_10_max value: 78.3358 - type: nauc_recall_at_10_std value: 54.093599999999995 - type: nauc_recall_at_10_diff1 value: 62.605900000000005 - type: nauc_recall_at_20_max value: 81.0991 - type: nauc_recall_at_20_std value: 64.9068 - type: nauc_recall_at_20_diff1 value: 67.7761 - type: nauc_recall_at_100_max value: 85.0279 - type: nauc_recall_at_100_std value: 87.47930000000001 - type: nauc_recall_at_100_diff1 value: 58.818000000000005 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 51.0918 - type: nauc_precision_at_1_std value: 12.0501 - type: nauc_precision_at_1_diff1 value: 69.1716 - type: nauc_precision_at_3_max value: 67.0934 - type: nauc_precision_at_3_std value: 33.2241 - type: nauc_precision_at_3_diff1 value: 63.65769999999999 - type: nauc_precision_at_5_max value: 72.2191 - type: nauc_precision_at_5_std value: 39.5657 - type: nauc_precision_at_5_diff1 value: 62.3367 - type: nauc_precision_at_10_max value: 78.3358 - type: nauc_precision_at_10_std value: 54.093599999999995 - type: nauc_precision_at_10_diff1 value: 62.605900000000005 - type: nauc_precision_at_20_max value: 81.0991 - type: nauc_precision_at_20_std value: 64.9068 - type: nauc_precision_at_20_diff1 value: 67.7761 - type: nauc_precision_at_100_max value: 85.0279 - type: nauc_precision_at_100_std value: 87.47930000000001 - type: nauc_precision_at_100_diff1 value: 58.818000000000005 - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_mrr_at_1_max value: 51.0918 - type: nauc_mrr_at_1_std value: 12.0501 - type: nauc_mrr_at_1_diff1 value: 69.1716 - type: nauc_mrr_at_3_max value: 57.2093 - type: nauc_mrr_at_3_std value: 19.4523 - type: nauc_mrr_at_3_diff1 value: 67.0065 - type: nauc_mrr_at_5_max value: 57.81699999999999 - type: nauc_mrr_at_5_std value: 20.2597 - type: nauc_mrr_at_5_diff1 value: 66.8577 - type: nauc_mrr_at_10_max value: 58.052099999999996 - type: nauc_mrr_at_10_std value: 21.195 - type: nauc_mrr_at_10_diff1 value: 66.9095 - type: nauc_mrr_at_20_max value: 57.9955 - type: nauc_mrr_at_20_std value: 21.3121 - type: nauc_mrr_at_20_diff1 value: 67.1257 - type: nauc_mrr_at_100_max value: 57.8974 - type: nauc_mrr_at_100_std value: 21.2576 - type: nauc_mrr_at_100_diff1 value: 67.0765 - type: nauc_mrr_at_1000_max value: 57.873799999999996 - type: nauc_mrr_at_1000_std value: 21.195 - type: nauc_mrr_at_1000_diff1 value: 67.08579999999999 - type: main_score value: 70.34 - task: type: Retrieval dataset: name: MTEB CodeSearchNetRetrieval (go) type: code-search-net/code_search_net config: go split: test revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759 metrics: - type: ndcg_at_1 value: 75.6 - type: ndcg_at_3 value: 84.112 - type: ndcg_at_5 value: 85.351 - type: ndcg_at_10 value: 86.139 - type: ndcg_at_20 value: 86.599 - type: ndcg_at_100 value: 86.971 - type: ndcg_at_1000 value: 87.086 - type: map_at_1 value: 75.6 - type: map_at_3 value: 82.1 - type: map_at_5 value: 82.78999999999999 - type: map_at_10 value: 83.122 - type: map_at_20 value: 83.25099999999999 - type: map_at_100 value: 83.30300000000001 - type: map_at_1000 value: 83.307 - type: recall_at_1 value: 75.6 - type: recall_at_3 value: 89.9 - type: recall_at_5 value: 92.9 - type: recall_at_10 value: 95.3 - type: recall_at_20 value: 97.1 - type: recall_at_100 value: 99.1 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 75.6 - type: precision_at_3 value: 29.967 - type: precision_at_5 value: 18.58 - type: precision_at_10 value: 9.53 - type: precision_at_20 value: 4.855 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 75.6 - type: mrr_at_3 value: 82.1 - type: mrr_at_5 value: 82.78999999999999 - type: mrr_at_10 value: 83.12230000000001 - type: mrr_at_20 value: 83.2511 - type: mrr_at_100 value: 83.3027 - type: mrr_at_1000 value: 83.307 - type: nauc_ndcg_at_1_max value: 50.9856 - type: nauc_ndcg_at_1_std value: 6.729 - type: nauc_ndcg_at_1_diff1 value: 75.68589999999999 - type: nauc_ndcg_at_3_max value: 59.266 - type: nauc_ndcg_at_3_std value: 10.0957 - type: nauc_ndcg_at_3_diff1 value: 73.3044 - type: nauc_ndcg_at_5_max value: 58.7545 - type: nauc_ndcg_at_5_std value: 9.295399999999999 - type: nauc_ndcg_at_5_diff1 value: 73.9355 - type: nauc_ndcg_at_10_max value: 58.7538 - type: nauc_ndcg_at_10_std value: 10.335999999999999 - type: nauc_ndcg_at_10_diff1 value: 74.01870000000001 - type: nauc_ndcg_at_20_max value: 57.9057 - type: nauc_ndcg_at_20_std value: 10.115300000000001 - type: nauc_ndcg_at_20_diff1 value: 74.456 - type: nauc_ndcg_at_100_max value: 57.198800000000006 - type: nauc_ndcg_at_100_std value: 9.2269 - type: nauc_ndcg_at_100_diff1 value: 74.2418 - type: nauc_ndcg_at_1000_max value: 57.1141 - type: nauc_ndcg_at_1000_std value: 9.366900000000001 - type: nauc_ndcg_at_1000_diff1 value: 74.3329 - type: nauc_map_at_1_max value: 50.9856 - type: nauc_map_at_1_std value: 6.729 - type: nauc_map_at_1_diff1 value: 75.68589999999999 - type: nauc_map_at_3_max value: 57.0017 - type: nauc_map_at_3_std value: 9.2059 - type: nauc_map_at_3_diff1 value: 73.9956 - type: nauc_map_at_5_max value: 56.6856 - type: nauc_map_at_5_std value: 8.8058 - type: nauc_map_at_5_diff1 value: 74.3367 - type: nauc_map_at_10_max value: 56.652100000000004 - type: nauc_map_at_10_std value: 9.1465 - type: nauc_map_at_10_diff1 value: 74.37519999999999 - type: nauc_map_at_20_max value: 56.4431 - type: nauc_map_at_20_std value: 9.0962 - type: nauc_map_at_20_diff1 value: 74.4763 - type: nauc_map_at_100_max value: 56.3572 - type: nauc_map_at_100_std value: 8.9981 - type: nauc_map_at_100_diff1 value: 74.4551 - type: nauc_map_at_1000_max value: 56.3527 - type: nauc_map_at_1000_std value: 9.0022 - type: nauc_map_at_1000_diff1 value: 74.4583 - type: nauc_recall_at_1_max value: 50.9856 - type: nauc_recall_at_1_std value: 6.729 - type: nauc_recall_at_1_diff1 value: 75.68589999999999 - type: nauc_recall_at_3_max value: 69.7291 - type: nauc_recall_at_3_std value: 14.183000000000002 - type: nauc_recall_at_3_diff1 value: 70.07900000000001 - type: nauc_recall_at_5_max value: 71.5009 - type: nauc_recall_at_5_std value: 11.9764 - type: nauc_recall_at_5_diff1 value: 71.5765 - type: nauc_recall_at_10_max value: 77.7927 - type: nauc_recall_at_10_std value: 22.2123 - type: nauc_recall_at_10_diff1 value: 71.0601 - type: nauc_recall_at_20_max value: 75.421 - type: nauc_recall_at_20_std value: 25.5385 - type: nauc_recall_at_20_diff1 value: 76.5318 - type: nauc_recall_at_100_max value: 64.4206 - type: nauc_recall_at_100_std value: -4.8864 - type: nauc_recall_at_100_diff1 value: 65.2765 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 50.9856 - type: nauc_precision_at_1_std value: 6.729 - type: nauc_precision_at_1_diff1 value: 75.68589999999999 - type: nauc_precision_at_3_max value: 69.7291 - type: nauc_precision_at_3_std value: 14.183000000000002 - type: nauc_precision_at_3_diff1 value: 70.07900000000001 - type: nauc_precision_at_5_max value: 71.5009 - type: nauc_precision_at_5_std value: 11.9764 - type: nauc_precision_at_5_diff1 value: 71.5765 - type: nauc_precision_at_10_max value: 77.7927 - type: nauc_precision_at_10_std value: 22.2123 - type: nauc_precision_at_10_diff1 value: 71.0601 - type: nauc_precision_at_20_max value: 75.421 - type: nauc_precision_at_20_std value: 25.5385 - type: nauc_precision_at_20_diff1 value: 76.5318 - type: nauc_precision_at_100_max value: 64.4206 - type: nauc_precision_at_100_std value: -4.8864 - type: nauc_precision_at_100_diff1 value: 65.2765 - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_mrr_at_1_max value: 50.9856 - type: nauc_mrr_at_1_std value: 6.729 - type: nauc_mrr_at_1_diff1 value: 75.68589999999999 - type: nauc_mrr_at_3_max value: 57.0017 - type: nauc_mrr_at_3_std value: 9.2059 - type: nauc_mrr_at_3_diff1 value: 73.9956 - type: nauc_mrr_at_5_max value: 56.6856 - type: nauc_mrr_at_5_std value: 8.8058 - type: nauc_mrr_at_5_diff1 value: 74.3367 - type: nauc_mrr_at_10_max value: 56.652100000000004 - type: nauc_mrr_at_10_std value: 9.1465 - type: nauc_mrr_at_10_diff1 value: 74.37519999999999 - type: nauc_mrr_at_20_max value: 56.4431 - type: nauc_mrr_at_20_std value: 9.0962 - type: nauc_mrr_at_20_diff1 value: 74.4763 - type: nauc_mrr_at_100_max value: 56.3572 - type: nauc_mrr_at_100_std value: 8.9981 - type: nauc_mrr_at_100_diff1 value: 74.4551 - type: nauc_mrr_at_1000_max value: 56.3527 - type: nauc_mrr_at_1000_std value: 9.0022 - type: nauc_mrr_at_1000_diff1 value: 74.4583 - type: main_score value: 86.139 - task: type: Retrieval dataset: name: MTEB CodeSearchNetRetrieval (ruby) type: code-search-net/code_search_net config: ruby split: test revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759 metrics: - type: ndcg_at_1 value: 61.3 - type: ndcg_at_3 value: 71.232 - type: ndcg_at_5 value: 73.1 - type: ndcg_at_10 value: 74.736 - type: ndcg_at_20 value: 75.511 - type: ndcg_at_100 value: 76.416 - type: ndcg_at_1000 value: 76.996 - type: map_at_1 value: 61.3 - type: map_at_3 value: 68.85 - type: map_at_5 value: 69.895 - type: map_at_10 value: 70.581 - type: map_at_20 value: 70.80199999999999 - type: map_at_100 value: 70.94200000000001 - type: map_at_1000 value: 70.961 - type: recall_at_1 value: 61.3 - type: recall_at_3 value: 78.10000000000001 - type: recall_at_5 value: 82.6 - type: recall_at_10 value: 87.6 - type: recall_at_20 value: 90.60000000000001 - type: recall_at_100 value: 95.3 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 61.3 - type: precision_at_3 value: 26.033 - type: precision_at_5 value: 16.520000000000003 - type: precision_at_10 value: 8.76 - type: precision_at_20 value: 4.53 - type: precision_at_100 value: 0.9530000000000001 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 61.3 - type: mrr_at_3 value: 68.85 - type: mrr_at_5 value: 69.895 - type: mrr_at_10 value: 70.58109999999999 - type: mrr_at_20 value: 70.8024 - type: mrr_at_100 value: 70.94160000000001 - type: mrr_at_1000 value: 70.96090000000001 - type: nauc_ndcg_at_1_max value: 54.2597 - type: nauc_ndcg_at_1_std value: 9.9915 - type: nauc_ndcg_at_1_diff1 value: 72.0029 - type: nauc_ndcg_at_3_max value: 58.517799999999994 - type: nauc_ndcg_at_3_std value: 13.256599999999999 - type: nauc_ndcg_at_3_diff1 value: 67.861 - type: nauc_ndcg_at_5_max value: 59.1541 - type: nauc_ndcg_at_5_std value: 16.237099999999998 - type: nauc_ndcg_at_5_diff1 value: 67.8155 - type: nauc_ndcg_at_10_max value: 59.1703 - type: nauc_ndcg_at_10_std value: 17.8202 - type: nauc_ndcg_at_10_diff1 value: 67.6082 - type: nauc_ndcg_at_20_max value: 58.829299999999996 - type: nauc_ndcg_at_20_std value: 18.001900000000003 - type: nauc_ndcg_at_20_diff1 value: 67.6747 - type: nauc_ndcg_at_100_max value: 58.675399999999996 - type: nauc_ndcg_at_100_std value: 17.7394 - type: nauc_ndcg_at_100_diff1 value: 68.02810000000001 - type: nauc_ndcg_at_1000_max value: 58.333400000000005 - type: nauc_ndcg_at_1000_std value: 16.169900000000002 - type: nauc_ndcg_at_1000_diff1 value: 68.3788 - type: nauc_map_at_1_max value: 54.2597 - type: nauc_map_at_1_std value: 9.9915 - type: nauc_map_at_1_diff1 value: 72.0029 - type: nauc_map_at_3_max value: 57.4277 - type: nauc_map_at_3_std value: 12.1778 - type: nauc_map_at_3_diff1 value: 69.0312 - type: nauc_map_at_5_max value: 57.7291 - type: nauc_map_at_5_std value: 13.655800000000001 - type: nauc_map_at_5_diff1 value: 69.0376 - type: nauc_map_at_10_max value: 57.7091 - type: nauc_map_at_10_std value: 14.2236 - type: nauc_map_at_10_diff1 value: 68.99849999999999 - type: nauc_map_at_20_max value: 57.605700000000006 - type: nauc_map_at_20_std value: 14.2305 - type: nauc_map_at_20_diff1 value: 69.0304 - type: nauc_map_at_100_max value: 57.6007 - type: nauc_map_at_100_std value: 14.219499999999998 - type: nauc_map_at_100_diff1 value: 69.0682 - type: nauc_map_at_1000_max value: 57.5939 - type: nauc_map_at_1000_std value: 14.1793 - type: nauc_map_at_1000_diff1 value: 69.0767 - type: nauc_recall_at_1_max value: 54.2597 - type: nauc_recall_at_1_std value: 9.9915 - type: nauc_recall_at_1_diff1 value: 72.0029 - type: nauc_recall_at_3_max value: 62.5301 - type: nauc_recall_at_3_std value: 17.372799999999998 - type: nauc_recall_at_3_diff1 value: 63.488 - type: nauc_recall_at_5_max value: 65.4804 - type: nauc_recall_at_5_std value: 28.376 - type: nauc_recall_at_5_diff1 value: 62.4274 - type: nauc_recall_at_10_max value: 67.7459 - type: nauc_recall_at_10_std value: 40.8339 - type: nauc_recall_at_10_diff1 value: 59.2704 - type: nauc_recall_at_20_max value: 67.4241 - type: nauc_recall_at_20_std value: 49.1244 - type: nauc_recall_at_20_diff1 value: 57.3728 - type: nauc_recall_at_100_max value: 71.1514 - type: nauc_recall_at_100_std value: 71.35510000000001 - type: nauc_recall_at_100_diff1 value: 55.964800000000004 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 54.2597 - type: nauc_precision_at_1_std value: 9.9915 - type: nauc_precision_at_1_diff1 value: 72.0029 - type: nauc_precision_at_3_max value: 62.5301 - type: nauc_precision_at_3_std value: 17.372799999999998 - type: nauc_precision_at_3_diff1 value: 63.488 - type: nauc_precision_at_5_max value: 65.4804 - type: nauc_precision_at_5_std value: 28.376 - type: nauc_precision_at_5_diff1 value: 62.4274 - type: nauc_precision_at_10_max value: 67.7459 - type: nauc_precision_at_10_std value: 40.8339 - type: nauc_precision_at_10_diff1 value: 59.2704 - type: nauc_precision_at_20_max value: 67.4241 - type: nauc_precision_at_20_std value: 49.1244 - type: nauc_precision_at_20_diff1 value: 57.3728 - type: nauc_precision_at_100_max value: 71.1514 - type: nauc_precision_at_100_std value: 71.35510000000001 - type: nauc_precision_at_100_diff1 value: 55.964800000000004 - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_mrr_at_1_max value: 54.2597 - type: nauc_mrr_at_1_std value: 9.9915 - type: nauc_mrr_at_1_diff1 value: 72.0029 - type: nauc_mrr_at_3_max value: 57.4277 - type: nauc_mrr_at_3_std value: 12.1778 - type: nauc_mrr_at_3_diff1 value: 69.0312 - type: nauc_mrr_at_5_max value: 57.7291 - type: nauc_mrr_at_5_std value: 13.655800000000001 - type: nauc_mrr_at_5_diff1 value: 69.0376 - type: nauc_mrr_at_10_max value: 57.7091 - type: nauc_mrr_at_10_std value: 14.2236 - type: nauc_mrr_at_10_diff1 value: 68.99849999999999 - type: nauc_mrr_at_20_max value: 57.605700000000006 - type: nauc_mrr_at_20_std value: 14.2305 - type: nauc_mrr_at_20_diff1 value: 69.0304 - type: nauc_mrr_at_100_max value: 57.6007 - type: nauc_mrr_at_100_std value: 14.219499999999998 - type: nauc_mrr_at_100_diff1 value: 69.0682 - type: nauc_mrr_at_1000_max value: 57.5939 - type: nauc_mrr_at_1000_std value: 14.1793 - type: nauc_mrr_at_1000_diff1 value: 69.0767 - type: main_score value: 74.736 - task: type: Retrieval dataset: name: MTEB CodeSearchNetRetrieval (java) type: code-search-net/code_search_net config: java split: test revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759 metrics: - type: ndcg_at_1 value: 55.1 - type: ndcg_at_3 value: 66.89399999999999 - type: ndcg_at_5 value: 68.89999999999999 - type: ndcg_at_10 value: 70.89 - type: ndcg_at_20 value: 72.016 - type: ndcg_at_100 value: 73.047 - type: ndcg_at_1000 value: 73.553 - type: map_at_1 value: 55.1 - type: map_at_3 value: 64.05 - type: map_at_5 value: 65.18 - type: map_at_10 value: 66.012 - type: map_at_20 value: 66.328 - type: map_at_100 value: 66.483 - type: map_at_1000 value: 66.498 - type: recall_at_1 value: 55.1 - type: recall_at_3 value: 75.1 - type: recall_at_5 value: 79.9 - type: recall_at_10 value: 86.0 - type: recall_at_20 value: 90.4 - type: recall_at_100 value: 95.8 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 55.1 - type: precision_at_3 value: 25.033 - type: precision_at_5 value: 15.98 - type: precision_at_10 value: 8.6 - type: precision_at_20 value: 4.52 - type: precision_at_100 value: 0.958 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 55.1 - type: mrr_at_3 value: 64.05 - type: mrr_at_5 value: 65.18 - type: mrr_at_10 value: 66.0123 - type: mrr_at_20 value: 66.32820000000001 - type: mrr_at_100 value: 66.4827 - type: mrr_at_1000 value: 66.49810000000001 - type: nauc_ndcg_at_1_max value: 30.206100000000003 - type: nauc_ndcg_at_1_std value: -14.6389 - type: nauc_ndcg_at_1_diff1 value: 61.8849 - type: nauc_ndcg_at_3_max value: 32.7259 - type: nauc_ndcg_at_3_std value: -11.568399999999999 - type: nauc_ndcg_at_3_diff1 value: 59.918800000000005 - type: nauc_ndcg_at_5_max value: 34.1822 - type: nauc_ndcg_at_5_std value: -8.104 - type: nauc_ndcg_at_5_diff1 value: 59.434799999999996 - type: nauc_ndcg_at_10_max value: 36.1247 - type: nauc_ndcg_at_10_std value: -6.585100000000001 - type: nauc_ndcg_at_10_diff1 value: 59.2885 - type: nauc_ndcg_at_20_max value: 35.9396 - type: nauc_ndcg_at_20_std value: -6.0885 - type: nauc_ndcg_at_20_diff1 value: 59.4417 - type: nauc_ndcg_at_100_max value: 35.951499999999996 - type: nauc_ndcg_at_100_std value: -6.1491 - type: nauc_ndcg_at_100_diff1 value: 60.3437 - type: nauc_ndcg_at_1000_max value: 34.7092 - type: nauc_ndcg_at_1000_std value: -8.0607 - type: nauc_ndcg_at_1000_diff1 value: 60.0215 - type: nauc_map_at_1_max value: 30.206100000000003 - type: nauc_map_at_1_std value: -14.6389 - type: nauc_map_at_1_diff1 value: 61.8849 - type: nauc_map_at_3_max value: 31.9303 - type: nauc_map_at_3_std value: -12.651200000000001 - type: nauc_map_at_3_diff1 value: 60.33 - type: nauc_map_at_5_max value: 32.6537 - type: nauc_map_at_5_std value: -10.8746 - type: nauc_map_at_5_diff1 value: 60.0754 - type: nauc_map_at_10_max value: 33.269 - type: nauc_map_at_10_std value: -10.4054 - type: nauc_map_at_10_diff1 value: 60.0235 - type: nauc_map_at_20_max value: 33.1875 - type: nauc_map_at_20_std value: -10.3417 - type: nauc_map_at_20_diff1 value: 60.067899999999995 - type: nauc_map_at_100_max value: 33.213 - type: nauc_map_at_100_std value: -10.3299 - type: nauc_map_at_100_diff1 value: 60.166399999999996 - type: nauc_map_at_1000_max value: 33.186 - type: nauc_map_at_1000_std value: -10.3713 - type: nauc_map_at_1000_diff1 value: 60.16010000000001 - type: nauc_recall_at_1_max value: 30.206100000000003 - type: nauc_recall_at_1_std value: -14.6389 - type: nauc_recall_at_1_diff1 value: 61.8849 - type: nauc_recall_at_3_max value: 35.7096 - type: nauc_recall_at_3_std value: -7.4548000000000005 - type: nauc_recall_at_3_diff1 value: 58.475699999999996 - type: nauc_recall_at_5_max value: 41.0231 - type: nauc_recall_at_5_std value: 4.4421 - type: nauc_recall_at_5_diff1 value: 56.7391 - type: nauc_recall_at_10_max value: 54.789 - type: nauc_recall_at_10_std value: 17.7044 - type: nauc_recall_at_10_diff1 value: 55.0592 - type: nauc_recall_at_20_max value: 60.7809 - type: nauc_recall_at_20_std value: 32.4021 - type: nauc_recall_at_20_diff1 value: 54.7663 - type: nauc_recall_at_100_max value: 89.4591 - type: nauc_recall_at_100_std value: 76.2783 - type: nauc_recall_at_100_diff1 value: 74.4576 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 30.206100000000003 - type: nauc_precision_at_1_std value: -14.6389 - type: nauc_precision_at_1_diff1 value: 61.8849 - type: nauc_precision_at_3_max value: 35.7096 - type: nauc_precision_at_3_std value: -7.4548000000000005 - type: nauc_precision_at_3_diff1 value: 58.475699999999996 - type: nauc_precision_at_5_max value: 41.0231 - type: nauc_precision_at_5_std value: 4.4421 - type: nauc_precision_at_5_diff1 value: 56.7391 - type: nauc_precision_at_10_max value: 54.789 - type: nauc_precision_at_10_std value: 17.7044 - type: nauc_precision_at_10_diff1 value: 55.0592 - type: nauc_precision_at_20_max value: 60.7809 - type: nauc_precision_at_20_std value: 32.4021 - type: nauc_precision_at_20_diff1 value: 54.7663 - type: nauc_precision_at_100_max value: 89.4591 - type: nauc_precision_at_100_std value: 76.2783 - type: nauc_precision_at_100_diff1 value: 74.4576 - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_mrr_at_1_max value: 30.206100000000003 - type: nauc_mrr_at_1_std value: -14.6389 - type: nauc_mrr_at_1_diff1 value: 61.8849 - type: nauc_mrr_at_3_max value: 31.9303 - type: nauc_mrr_at_3_std value: -12.651200000000001 - type: nauc_mrr_at_3_diff1 value: 60.33 - type: nauc_mrr_at_5_max value: 32.6537 - type: nauc_mrr_at_5_std value: -10.8746 - type: nauc_mrr_at_5_diff1 value: 60.0754 - type: nauc_mrr_at_10_max value: 33.269 - type: nauc_mrr_at_10_std value: -10.4054 - type: nauc_mrr_at_10_diff1 value: 60.0235 - type: nauc_mrr_at_20_max value: 33.1875 - type: nauc_mrr_at_20_std value: -10.3417 - type: nauc_mrr_at_20_diff1 value: 60.067899999999995 - type: nauc_mrr_at_100_max value: 33.213 - type: nauc_mrr_at_100_std value: -10.3299 - type: nauc_mrr_at_100_diff1 value: 60.166399999999996 - type: nauc_mrr_at_1000_max value: 33.186 - type: nauc_mrr_at_1000_std value: -10.3713 - type: nauc_mrr_at_1000_diff1 value: 60.16010000000001 - type: main_score value: 70.89 - task: type: Retrieval dataset: name: MTEB CodeSearchNetRetrieval (php) type: code-search-net/code_search_net config: php split: test revision: fdc6a9e39575768c27eb8a2a5f702bf846eb4759 metrics: - type: ndcg_at_1 value: 56.89999999999999 - type: ndcg_at_3 value: 69.128 - type: ndcg_at_5 value: 71.495 - type: ndcg_at_10 value: 72.92999999999999 - type: ndcg_at_20 value: 73.775 - type: ndcg_at_100 value: 74.476 - type: ndcg_at_1000 value: 75.075 - type: map_at_1 value: 56.89999999999999 - type: map_at_3 value: 66.10000000000001 - type: map_at_5 value: 67.425 - type: map_at_10 value: 68.024 - type: map_at_20 value: 68.26100000000001 - type: map_at_100 value: 68.357 - type: map_at_1000 value: 68.376 - type: recall_at_1 value: 56.89999999999999 - type: recall_at_3 value: 77.9 - type: recall_at_5 value: 83.6 - type: recall_at_10 value: 88.0 - type: recall_at_20 value: 91.3 - type: recall_at_100 value: 95.1 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 56.89999999999999 - type: precision_at_3 value: 25.967000000000002 - type: precision_at_5 value: 16.72 - type: precision_at_10 value: 8.799999999999999 - type: precision_at_20 value: 4.565 - type: precision_at_100 value: 0.951 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 56.89999999999999 - type: mrr_at_3 value: 66.10000000000001 - type: mrr_at_5 value: 67.425 - type: mrr_at_10 value: 68.0238 - type: mrr_at_20 value: 68.2613 - type: mrr_at_100 value: 68.35719999999999 - type: mrr_at_1000 value: 68.3763 - type: nauc_ndcg_at_1_max value: 43.5297 - type: nauc_ndcg_at_1_std value: 7.986600000000001 - type: nauc_ndcg_at_1_diff1 value: 65.95689999999999 - type: nauc_ndcg_at_3_max value: 52.166500000000006 - type: nauc_ndcg_at_3_std value: 17.0778 - type: nauc_ndcg_at_3_diff1 value: 60.8598 - type: nauc_ndcg_at_5_max value: 53.1733 - type: nauc_ndcg_at_5_std value: 18.7316 - type: nauc_ndcg_at_5_diff1 value: 61.4908 - type: nauc_ndcg_at_10_max value: 53.6245 - type: nauc_ndcg_at_10_std value: 19.5627 - type: nauc_ndcg_at_10_diff1 value: 61.9788 - type: nauc_ndcg_at_20_max value: 53.725199999999994 - type: nauc_ndcg_at_20_std value: 20.5901 - type: nauc_ndcg_at_20_diff1 value: 62.480199999999996 - type: nauc_ndcg_at_100_max value: 53.083499999999994 - type: nauc_ndcg_at_100_std value: 19.8779 - type: nauc_ndcg_at_100_diff1 value: 62.849 - type: nauc_ndcg_at_1000_max value: 51.9568 - type: nauc_ndcg_at_1000_std value: 17.8629 - type: nauc_ndcg_at_1000_diff1 value: 62.7251 - type: nauc_map_at_1_max value: 43.5297 - type: nauc_map_at_1_std value: 7.986600000000001 - type: nauc_map_at_1_diff1 value: 65.95689999999999 - type: nauc_map_at_3_max value: 49.7136 - type: nauc_map_at_3_std value: 14.054400000000001 - type: nauc_map_at_3_diff1 value: 62.3127 - type: nauc_map_at_5_max value: 50.138400000000004 - type: nauc_map_at_5_std value: 14.7824 - type: nauc_map_at_5_diff1 value: 62.6784 - type: nauc_map_at_10_max value: 50.2613 - type: nauc_map_at_10_std value: 15.024899999999999 - type: nauc_map_at_10_diff1 value: 62.864200000000004 - type: nauc_map_at_20_max value: 50.267300000000006 - type: nauc_map_at_20_std value: 15.234300000000001 - type: nauc_map_at_20_diff1 value: 63.00130000000001 - type: nauc_map_at_100_max value: 50.1927 - type: nauc_map_at_100_std value: 15.1701 - type: nauc_map_at_100_diff1 value: 63.0549 - type: nauc_map_at_1000_max value: 50.1623 - type: nauc_map_at_1000_std value: 15.118500000000001 - type: nauc_map_at_1000_diff1 value: 63.048300000000005 - type: nauc_recall_at_1_max value: 43.5297 - type: nauc_recall_at_1_std value: 7.986600000000001 - type: nauc_recall_at_1_diff1 value: 65.95689999999999 - type: nauc_recall_at_3_max value: 61.7214 - type: nauc_recall_at_3_std value: 29.1046 - type: nauc_recall_at_3_diff1 value: 55.1971 - type: nauc_recall_at_5_max value: 68.1151 - type: nauc_recall_at_5_std value: 38.587700000000005 - type: nauc_recall_at_5_diff1 value: 55.886 - type: nauc_recall_at_10_max value: 75.3834 - type: nauc_recall_at_10_std value: 49.6516 - type: nauc_recall_at_10_diff1 value: 57.0852 - type: nauc_recall_at_20_max value: 83.7342 - type: nauc_recall_at_20_std value: 69.9947 - type: nauc_recall_at_20_diff1 value: 60.002500000000005 - type: nauc_recall_at_100_max value: 91.4204 - type: nauc_recall_at_100_std value: 89.0309 - type: nauc_recall_at_100_diff1 value: 65.7358 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 43.5297 - type: nauc_precision_at_1_std value: 7.986600000000001 - type: nauc_precision_at_1_diff1 value: 65.95689999999999 - type: nauc_precision_at_3_max value: 61.7214 - type: nauc_precision_at_3_std value: 29.1046 - type: nauc_precision_at_3_diff1 value: 55.1971 - type: nauc_precision_at_5_max value: 68.1151 - type: nauc_precision_at_5_std value: 38.587700000000005 - type: nauc_precision_at_5_diff1 value: 55.886 - type: nauc_precision_at_10_max value: 75.3834 - type: nauc_precision_at_10_std value: 49.6516 - type: nauc_precision_at_10_diff1 value: 57.0852 - type: nauc_precision_at_20_max value: 83.7342 - type: nauc_precision_at_20_std value: 69.9947 - type: nauc_precision_at_20_diff1 value: 60.002500000000005 - type: nauc_precision_at_100_max value: 91.4204 - type: nauc_precision_at_100_std value: 89.0309 - type: nauc_precision_at_100_diff1 value: 65.7358 - type: nauc_precision_at_1000_max value: .nan - type: nauc_precision_at_1000_std value: .nan - type: nauc_precision_at_1000_diff1 value: .nan - type: nauc_mrr_at_1_max value: 43.5297 - type: nauc_mrr_at_1_std value: 7.986600000000001 - type: nauc_mrr_at_1_diff1 value: 65.95689999999999 - type: nauc_mrr_at_3_max value: 49.7136 - type: nauc_mrr_at_3_std value: 14.054400000000001 - type: nauc_mrr_at_3_diff1 value: 62.3127 - type: nauc_mrr_at_5_max value: 50.138400000000004 - type: nauc_mrr_at_5_std value: 14.7824 - type: nauc_mrr_at_5_diff1 value: 62.6784 - type: nauc_mrr_at_10_max value: 50.2613 - type: nauc_mrr_at_10_std value: 15.024899999999999 - type: nauc_mrr_at_10_diff1 value: 62.864200000000004 - type: nauc_mrr_at_20_max value: 50.267300000000006 - type: nauc_mrr_at_20_std value: 15.234300000000001 - type: nauc_mrr_at_20_diff1 value: 63.00130000000001 - type: nauc_mrr_at_100_max value: 50.1927 - type: nauc_mrr_at_100_std value: 15.1701 - type: nauc_mrr_at_100_diff1 value: 63.0549 - type: nauc_mrr_at_1000_max value: 50.1623 - type: nauc_mrr_at_1000_std value: 15.118500000000001 - type: nauc_mrr_at_1000_diff1 value: 63.048300000000005 - type: main_score value: 72.92999999999999 - task: type: Retrieval dataset: name: MTEB CodeTransOceanContest (default) type: CoIR-Retrieval/codetrans-contest config: default split: test revision: 20da4eb20a4b17300c0986ee148c90867a7f2a4d metrics: - type: ndcg_at_1 value: 50.226000000000006 - type: ndcg_at_3 value: 55.748 - type: ndcg_at_5 value: 58.007 - type: ndcg_at_10 value: 60.831 - type: ndcg_at_20 value: 62.793 - type: ndcg_at_100 value: 64.43299999999999 - type: ndcg_at_1000 value: 65.60000000000001 - type: map_at_1 value: 50.226000000000006 - type: map_at_3 value: 54.374 - type: map_at_5 value: 55.641 - type: map_at_10 value: 56.83200000000001 - type: map_at_20 value: 57.379999999999995 - type: map_at_100 value: 57.594 - type: map_at_1000 value: 57.633 - type: recall_at_1 value: 50.226000000000006 - type: recall_at_3 value: 59.729 - type: recall_at_5 value: 65.158 - type: recall_at_10 value: 73.756 - type: recall_at_20 value: 81.448 - type: recall_at_100 value: 90.498 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 50.226000000000006 - type: precision_at_3 value: 19.91 - type: precision_at_5 value: 13.032 - type: precision_at_10 value: 7.376 - type: precision_at_20 value: 4.072 - type: precision_at_100 value: 0.905 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 50.2262 - type: mrr_at_3 value: 54.374100000000006 - type: mrr_at_5 value: 55.641 - type: mrr_at_10 value: 56.8322 - type: mrr_at_20 value: 57.3798 - type: mrr_at_100 value: 57.594500000000004 - type: mrr_at_1000 value: 57.6333 - type: nauc_ndcg_at_1_max value: 57.24249999999999 - type: nauc_ndcg_at_1_std value: 3.4893 - type: nauc_ndcg_at_1_diff1 value: 74.5093 - type: nauc_ndcg_at_3_max value: 57.099 - type: nauc_ndcg_at_3_std value: 3.3562000000000003 - type: nauc_ndcg_at_3_diff1 value: 71.5239 - type: nauc_ndcg_at_5_max value: 57.5998 - type: nauc_ndcg_at_5_std value: 4.7879 - type: nauc_ndcg_at_5_diff1 value: 69.9839 - type: nauc_ndcg_at_10_max value: 56.1631 - type: nauc_ndcg_at_10_std value: 6.0869 - type: nauc_ndcg_at_10_diff1 value: 68.32939999999999 - type: nauc_ndcg_at_20_max value: 56.098800000000004 - type: nauc_ndcg_at_20_std value: 5.1246 - type: nauc_ndcg_at_20_diff1 value: 68.9858 - type: nauc_ndcg_at_100_max value: 56.788799999999995 - type: nauc_ndcg_at_100_std value: 5.6714 - type: nauc_ndcg_at_100_diff1 value: 69.3668 - type: nauc_ndcg_at_1000_max value: 56.7396 - type: nauc_ndcg_at_1000_std value: 5.0106 - type: nauc_ndcg_at_1000_diff1 value: 70.1024 - type: nauc_map_at_1_max value: 57.24249999999999 - type: nauc_map_at_1_std value: 3.4893 - type: nauc_map_at_1_diff1 value: 74.5093 - type: nauc_map_at_3_max value: 57.2832 - type: nauc_map_at_3_std value: 3.4703999999999997 - type: nauc_map_at_3_diff1 value: 72.40490000000001 - type: nauc_map_at_5_max value: 57.5445 - type: nauc_map_at_5_std value: 4.1418 - type: nauc_map_at_5_diff1 value: 71.5756 - type: nauc_map_at_10_max value: 57.0669 - type: nauc_map_at_10_std value: 4.7488 - type: nauc_map_at_10_diff1 value: 70.97869999999999 - type: nauc_map_at_20_max value: 57.08800000000001 - type: nauc_map_at_20_std value: 4.4653 - type: nauc_map_at_20_diff1 value: 71.2187 - type: nauc_map_at_100_max value: 57.1484 - type: nauc_map_at_100_std value: 4.5175 - type: nauc_map_at_100_diff1 value: 71.2734 - type: nauc_map_at_1000_max value: 57.1356 - type: nauc_map_at_1000_std value: 4.4929 - type: nauc_map_at_1000_diff1 value: 71.28710000000001 - type: nauc_recall_at_1_max value: 57.24249999999999 - type: nauc_recall_at_1_std value: 3.4893 - type: nauc_recall_at_1_diff1 value: 74.5093 - type: nauc_recall_at_3_max value: 56.469800000000006 - type: nauc_recall_at_3_std value: 2.9709 - type: nauc_recall_at_3_diff1 value: 68.7698 - type: nauc_recall_at_5_max value: 57.811 - type: nauc_recall_at_5_std value: 7.2669999999999995 - type: nauc_recall_at_5_diff1 value: 64.4325 - type: nauc_recall_at_10_max value: 51.5712 - type: nauc_recall_at_10_std value: 12.1867 - type: nauc_recall_at_10_diff1 value: 56.4929 - type: nauc_recall_at_20_max value: 49.3 - type: nauc_recall_at_20_std value: 8.371599999999999 - type: nauc_recall_at_20_diff1 value: 56.2505 - type: nauc_recall_at_100_max value: 55.7663 - type: nauc_recall_at_100_std value: 19.9214 - type: nauc_recall_at_100_diff1 value: 51.6979 - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: 57.24249999999999 - type: nauc_precision_at_1_std value: 3.4893 - type: nauc_precision_at_1_diff1 value: 74.5093 - type: nauc_precision_at_3_max value: 56.469800000000006 - type: nauc_precision_at_3_std value: 2.9709 - type: nauc_precision_at_3_diff1 value: 68.7698 - type: nauc_precision_at_5_max value: 57.811 - type: nauc_precision_at_5_std value: 7.2669999999999995 - type: nauc_precision_at_5_diff1 value: 64.4325 - type: nauc_precision_at_10_max value: 51.5712 - type: nauc_precision_at_10_std value: 12.1867 - type: nauc_precision_at_10_diff1 value: 56.4929 - type: nauc_precision_at_20_max value: 49.3 - type: nauc_precision_at_20_std value: 8.371599999999999 - type: nauc_precision_at_20_diff1 value: 56.2505 - type: nauc_precision_at_100_max value: 55.7663 - type: nauc_precision_at_100_std value: 19.9214 - type: nauc_precision_at_100_diff1 value: 51.6979 - type: nauc_precision_at_1000_max value: 100.0 - type: nauc_precision_at_1000_std value: 100.0 - type: nauc_precision_at_1000_diff1 value: 100.0 - type: nauc_mrr_at_1_max value: 57.24249999999999 - type: nauc_mrr_at_1_std value: 3.4893 - type: nauc_mrr_at_1_diff1 value: 74.5093 - type: nauc_mrr_at_3_max value: 57.2832 - type: nauc_mrr_at_3_std value: 3.4703999999999997 - type: nauc_mrr_at_3_diff1 value: 72.40490000000001 - type: nauc_mrr_at_5_max value: 57.5445 - type: nauc_mrr_at_5_std value: 4.1418 - type: nauc_mrr_at_5_diff1 value: 71.5756 - type: nauc_mrr_at_10_max value: 57.0669 - type: nauc_mrr_at_10_std value: 4.7488 - type: nauc_mrr_at_10_diff1 value: 70.97869999999999 - type: nauc_mrr_at_20_max value: 57.08800000000001 - type: nauc_mrr_at_20_std value: 4.4653 - type: nauc_mrr_at_20_diff1 value: 71.2187 - type: nauc_mrr_at_100_max value: 57.1484 - type: nauc_mrr_at_100_std value: 4.5175 - type: nauc_mrr_at_100_diff1 value: 71.2734 - type: nauc_mrr_at_1000_max value: 57.1356 - type: nauc_mrr_at_1000_std value: 4.4929 - type: nauc_mrr_at_1000_diff1 value: 71.28710000000001 - type: main_score value: 60.831 - task: type: Retrieval dataset: name: MTEB CodeTransOceanDL (default) type: CoIR-Retrieval/codetrans-dl config: default split: test revision: 281562cb8a1265ab5c0824bfa6ddcd9b0a15618f metrics: - type: ndcg_at_1 value: 8.889 - type: ndcg_at_3 value: 12.09 - type: ndcg_at_5 value: 18.355 - type: ndcg_at_10 value: 32.138 - type: ndcg_at_20 value: 38.437 - type: ndcg_at_100 value: 39.031 - type: ndcg_at_1000 value: 39.031 - type: map_at_1 value: 8.889 - type: map_at_3 value: 11.111 - type: map_at_5 value: 14.639 - type: map_at_10 value: 20.193 - type: map_at_20 value: 22.137 - type: map_at_100 value: 22.21 - type: map_at_1000 value: 22.21 - type: recall_at_1 value: 8.889 - type: recall_at_3 value: 15.0 - type: recall_at_5 value: 30.0 - type: recall_at_10 value: 73.333 - type: recall_at_20 value: 96.667 - type: recall_at_100 value: 100.0 - type: recall_at_1000 value: 100.0 - type: precision_at_1 value: 8.889 - type: precision_at_3 value: 5.0 - type: precision_at_5 value: 6.0 - type: precision_at_10 value: 7.333 - type: precision_at_20 value: 4.833 - type: precision_at_100 value: 1.0 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 6.1110999999999995 - type: mrr_at_3 value: 10.0 - type: mrr_at_5 value: 12.8056 - type: mrr_at_10 value: 19.164900000000003 - type: mrr_at_20 value: 20.8374 - type: mrr_at_100 value: 20.9115 - type: mrr_at_1000 value: 20.9115 - type: nauc_ndcg_at_1_max value: -40.8791 - type: nauc_ndcg_at_1_std value: -29.137 - type: nauc_ndcg_at_1_diff1 value: -25.7462 - type: nauc_ndcg_at_3_max value: -43.8611 - type: nauc_ndcg_at_3_std value: -31.619999999999997 - type: nauc_ndcg_at_3_diff1 value: -8.387799999999999 - type: nauc_ndcg_at_5_max value: -34.1018 - type: nauc_ndcg_at_5_std value: -20.9725 - type: nauc_ndcg_at_5_diff1 value: -14.6478 - type: nauc_ndcg_at_10_max value: -29.694599999999998 - type: nauc_ndcg_at_10_std value: -17.6602 - type: nauc_ndcg_at_10_diff1 value: -21.0388 - type: nauc_ndcg_at_20_max value: -42.308800000000005 - type: nauc_ndcg_at_20_std value: -20.778 - type: nauc_ndcg_at_20_diff1 value: -15.67 - type: nauc_ndcg_at_100_max value: -37.4946 - type: nauc_ndcg_at_100_std value: -22.2861 - type: nauc_ndcg_at_100_diff1 value: -16.020300000000002 - type: nauc_ndcg_at_1000_max value: -37.4946 - type: nauc_ndcg_at_1000_std value: -22.2861 - type: nauc_ndcg_at_1000_diff1 value: -16.020300000000002 - type: nauc_map_at_1_max value: -40.8791 - type: nauc_map_at_1_std value: -29.137 - type: nauc_map_at_1_diff1 value: -25.7462 - type: nauc_map_at_3_max value: -43.1058 - type: nauc_map_at_3_std value: -31.071900000000003 - type: nauc_map_at_3_diff1 value: -12.875900000000001 - type: nauc_map_at_5_max value: -36.4737 - type: nauc_map_at_5_std value: -23.8979 - type: nauc_map_at_5_diff1 value: -16.206400000000002 - type: nauc_map_at_10_max value: -34.2318 - type: nauc_map_at_10_std value: -22.0811 - type: nauc_map_at_10_diff1 value: -18.5454 - type: nauc_map_at_20_max value: -37.9204 - type: nauc_map_at_20_std value: -23.3876 - type: nauc_map_at_20_diff1 value: -16.8628 - type: nauc_map_at_100_max value: -37.401 - type: nauc_map_at_100_std value: -23.595299999999998 - type: nauc_map_at_100_diff1 value: -16.8443 - type: nauc_map_at_1000_max value: -37.401 - type: nauc_map_at_1000_std value: -23.595299999999998 - type: nauc_map_at_1000_diff1 value: -16.8443 - type: nauc_recall_at_1_max value: -40.8791 - type: nauc_recall_at_1_std value: -29.137 - type: nauc_recall_at_1_diff1 value: -25.7462 - type: nauc_recall_at_3_max value: -45.6372 - type: nauc_recall_at_3_std value: -32.8876 - type: nauc_recall_at_3_diff1 value: 2.1906 - type: nauc_recall_at_5_max value: -29.531299999999998 - type: nauc_recall_at_5_std value: -15.2907 - type: nauc_recall_at_5_diff1 value: -12.279900000000001 - type: nauc_recall_at_10_max value: -17.0981 - type: nauc_recall_at_10_std value: -5.6821 - type: nauc_recall_at_10_diff1 value: -31.382700000000003 - type: nauc_recall_at_20_max value: -164.1923 - type: nauc_recall_at_20_std value: 14.6592 - type: nauc_recall_at_20_diff1 value: -1.6729 - type: nauc_recall_at_100_max value: .nan - type: nauc_recall_at_100_std value: .nan - type: nauc_recall_at_100_diff1 value: .nan - type: nauc_recall_at_1000_max value: .nan - type: nauc_recall_at_1000_std value: .nan - type: nauc_recall_at_1000_diff1 value: .nan - type: nauc_precision_at_1_max value: -40.8791 - type: nauc_precision_at_1_std value: -29.137 - type: nauc_precision_at_1_diff1 value: -25.7462 - type: nauc_precision_at_3_max value: -45.6372 - type: nauc_precision_at_3_std value: -32.8876 - type: nauc_precision_at_3_diff1 value: 2.1906 - type: nauc_precision_at_5_max value: -29.531299999999998 - type: nauc_precision_at_5_std value: -15.2907 - type: nauc_precision_at_5_diff1 value: -12.279900000000001 - type: nauc_precision_at_10_max value: -17.0981 - type: nauc_precision_at_10_std value: -5.6821 - type: nauc_precision_at_10_diff1 value: -31.382700000000003 - type: nauc_precision_at_20_max value: -164.1923 - type: nauc_precision_at_20_std value: 14.6592 - type: nauc_precision_at_20_diff1 value: -1.6729 - type: nauc_precision_at_100_max value: 100.0 - type: nauc_precision_at_100_std value: 100.0 - type: nauc_precision_at_100_diff1 value: 100.0 - type: nauc_precision_at_1000_max value: 100.0 - type: nauc_precision_at_1000_std value: 100.0 - type: nauc_precision_at_1000_diff1 value: 100.0 - type: nauc_mrr_at_1_max value: -38.4833 - type: nauc_mrr_at_1_std value: -27.4288 - type: nauc_mrr_at_1_diff1 value: -2.3441 - type: nauc_mrr_at_3_max value: -40.2427 - type: nauc_mrr_at_3_std value: -28.479 - type: nauc_mrr_at_3_diff1 value: 14.5837 - type: nauc_mrr_at_5_max value: -32.784400000000005 - type: nauc_mrr_at_5_std value: -19.3984 - type: nauc_mrr_at_5_diff1 value: 8.2762 - type: nauc_mrr_at_10_max value: -31.999499999999998 - type: nauc_mrr_at_10_std value: -20.9878 - type: nauc_mrr_at_10_diff1 value: 9.2346 - type: nauc_mrr_at_20_max value: -36.2588 - type: nauc_mrr_at_20_std value: -21.057699999999997 - type: nauc_mrr_at_20_diff1 value: 9.4499 - type: nauc_mrr_at_100_max value: -35.6528 - type: nauc_mrr_at_100_std value: -21.288 - type: nauc_mrr_at_100_diff1 value: 9.591 - type: nauc_mrr_at_1000_max value: -35.6528 - type: nauc_mrr_at_1000_std value: -21.288 - type: nauc_mrr_at_1000_diff1 value: 9.591 - type: main_score value: 32.138 - task: type: Retrieval dataset: name: MTEB CosQA (default) type: CoIR-Retrieval/cosqa config: default split: test revision: bc5efb7e9d437246ce393ed19d772e08e4a79535 metrics: - type: ndcg_at_1 value: 14.6 - type: ndcg_at_3 value: 23.043 - type: ndcg_at_5 value: 28.551 - type: ndcg_at_10 value: 33.452 - type: ndcg_at_20 value: 37.094 - type: ndcg_at_100 value: 40.416999999999994 - type: ndcg_at_1000 value: 41.684 - type: map_at_1 value: 14.6 - type: map_at_3 value: 20.8 - type: map_at_5 value: 23.849999999999998 - type: map_at_10 value: 25.941 - type: map_at_20 value: 26.941 - type: map_at_100 value: 27.418 - type: map_at_1000 value: 27.473999999999997 - type: recall_at_1 value: 14.6 - type: recall_at_3 value: 29.599999999999998 - type: recall_at_5 value: 43.0 - type: recall_at_10 value: 57.8 - type: recall_at_20 value: 72.2 - type: recall_at_100 value: 89.8 - type: recall_at_1000 value: 99.4 - type: precision_at_1 value: 14.6 - type: precision_at_3 value: 9.866999999999999 - type: precision_at_5 value: 8.6 - type: precision_at_10 value: 5.779999999999999 - type: precision_at_20 value: 3.61 - type: precision_at_100 value: 0.898 - type: precision_at_1000 value: 0.099 - type: mrr_at_1 value: 15.4 - type: mrr_at_3 value: 21.099999999999998 - type: mrr_at_5 value: 23.380000000000003 - type: mrr_at_10 value: 25.5087 - type: mrr_at_20 value: 26.5332 - type: mrr_at_100 value: 27.0822 - type: mrr_at_1000 value: 27.1358 - type: nauc_ndcg_at_1_max value: 15.7645 - type: nauc_ndcg_at_1_std value: -8.4668 - type: nauc_ndcg_at_1_diff1 value: 38.0187 - type: nauc_ndcg_at_3_max value: 14.791799999999999 - type: nauc_ndcg_at_3_std value: -11.6736 - type: nauc_ndcg_at_3_diff1 value: 24.288899999999998 - type: nauc_ndcg_at_5_max value: 17.9426 - type: nauc_ndcg_at_5_std value: -11.1099 - type: nauc_ndcg_at_5_diff1 value: 18.8892 - type: nauc_ndcg_at_10_max value: 18.3537 - type: nauc_ndcg_at_10_std value: -9.0621 - type: nauc_ndcg_at_10_diff1 value: 17.6054 - type: nauc_ndcg_at_20_max value: 19.9156 - type: nauc_ndcg_at_20_std value: -6.926699999999999 - type: nauc_ndcg_at_20_diff1 value: 16.125 - type: nauc_ndcg_at_100_max value: 19.527900000000002 - type: nauc_ndcg_at_100_std value: -5.9748 - type: nauc_ndcg_at_100_diff1 value: 18.8697 - type: nauc_ndcg_at_1000_max value: 18.6624 - type: nauc_ndcg_at_1000_std value: -7.6636999999999995 - type: nauc_ndcg_at_1000_diff1 value: 20.2624 - type: nauc_map_at_1_max value: 15.7645 - type: nauc_map_at_1_std value: -8.4668 - type: nauc_map_at_1_diff1 value: 38.0187 - type: nauc_map_at_3_max value: 14.932200000000002 - type: nauc_map_at_3_std value: -11.2233 - type: nauc_map_at_3_diff1 value: 27.254800000000003 - type: nauc_map_at_5_max value: 16.700599999999998 - type: nauc_map_at_5_std value: -10.9701 - type: nauc_map_at_5_diff1 value: 23.9832 - type: nauc_map_at_10_max value: 16.947200000000002 - type: nauc_map_at_10_std value: -9.896099999999999 - type: nauc_map_at_10_diff1 value: 23.4428 - type: nauc_map_at_20_max value: 17.3857 - type: nauc_map_at_20_std value: -9.2728 - type: nauc_map_at_20_diff1 value: 23.1321 - type: nauc_map_at_100_max value: 17.3462 - type: nauc_map_at_100_std value: -9.2043 - type: nauc_map_at_100_diff1 value: 23.5583 - type: nauc_map_at_1000_max value: 17.3214 - type: nauc_map_at_1000_std value: -9.2627 - type: nauc_map_at_1000_diff1 value: 23.6455 - type: nauc_recall_at_1_max value: 15.7645 - type: nauc_recall_at_1_std value: -8.4668 - type: nauc_recall_at_1_diff1 value: 38.0187 - type: nauc_recall_at_3_max value: 14.4809 - type: nauc_recall_at_3_std value: -12.664700000000002 - type: nauc_recall_at_3_diff1 value: 17.275199999999998 - type: nauc_recall_at_5_max value: 21.2405 - type: nauc_recall_at_5_std value: -11.2278 - type: nauc_recall_at_5_diff1 value: 6.6622 - type: nauc_recall_at_10_max value: 22.3474 - type: nauc_recall_at_10_std value: -6.399299999999999 - type: nauc_recall_at_10_diff1 value: 2.0452000000000004 - type: nauc_recall_at_20_max value: 30.1398 - type: nauc_recall_at_20_std value: 3.3263000000000003 - type: nauc_recall_at_20_diff1 value: -9.3067 - type: nauc_recall_at_100_max value: 37.6654 - type: nauc_recall_at_100_std value: 30.699700000000004 - type: nauc_recall_at_100_diff1 value: -8.959999999999999 - type: nauc_recall_at_1000_max value: 47.3389 - type: nauc_recall_at_1000_std value: 95.6427 - type: nauc_recall_at_1000_diff1 value: -102.10079999999999 - type: nauc_precision_at_1_max value: 15.7645 - type: nauc_precision_at_1_std value: -8.4668 - type: nauc_precision_at_1_diff1 value: 38.0187 - type: nauc_precision_at_3_max value: 14.4809 - type: nauc_precision_at_3_std value: -12.664700000000002 - type: nauc_precision_at_3_diff1 value: 17.275199999999998 - type: nauc_precision_at_5_max value: 21.2405 - type: nauc_precision_at_5_std value: -11.2278 - type: nauc_precision_at_5_diff1 value: 6.6622 - type: nauc_precision_at_10_max value: 22.3474 - type: nauc_precision_at_10_std value: -6.399299999999999 - type: nauc_precision_at_10_diff1 value: 2.0452000000000004 - type: nauc_precision_at_20_max value: 30.1398 - type: nauc_precision_at_20_std value: 3.3263000000000003 - type: nauc_precision_at_20_diff1 value: -9.3067 - type: nauc_precision_at_100_max value: 37.6654 - type: nauc_precision_at_100_std value: 30.699700000000004 - type: nauc_precision_at_100_diff1 value: -8.959999999999999 - type: nauc_precision_at_1000_max value: 47.3389 - type: nauc_precision_at_1000_std value: 95.6427 - type: nauc_precision_at_1000_diff1 value: -102.10079999999999 - type: nauc_mrr_at_1_max value: 15.059800000000001 - type: nauc_mrr_at_1_std value: -17.3443 - type: nauc_mrr_at_1_diff1 value: 34.5918 - type: nauc_mrr_at_3_max value: 15.5076 - type: nauc_mrr_at_3_std value: -16.3353 - type: nauc_mrr_at_3_diff1 value: 27.414899999999996 - type: nauc_mrr_at_5_max value: 15.033299999999999 - type: nauc_mrr_at_5_std value: -16.0288 - type: nauc_mrr_at_5_diff1 value: 25.4198 - type: nauc_mrr_at_10_max value: 15.7434 - type: nauc_mrr_at_10_std value: -14.8923 - type: nauc_mrr_at_10_diff1 value: 23.6099 - type: nauc_mrr_at_20_max value: 16.2588 - type: nauc_mrr_at_20_std value: -14.5306 - type: nauc_mrr_at_20_diff1 value: 23.718700000000002 - type: nauc_mrr_at_100_max value: 16.2196 - type: nauc_mrr_at_100_std value: -14.4928 - type: nauc_mrr_at_100_diff1 value: 24.017 - type: nauc_mrr_at_1000_max value: 16.1885 - type: nauc_mrr_at_1000_std value: -14.5629 - type: nauc_mrr_at_1000_diff1 value: 24.0998 - type: main_score value: 33.452 - task: type: Retrieval dataset: name: MTEB DBPedia (default) type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: ndcg_at_1 value: 48.75 - type: ndcg_at_3 value: 40.266000000000005 - type: ndcg_at_5 value: 37.034 - type: ndcg_at_10 value: 34.565 - type: ndcg_at_20 value: 34.013 - type: ndcg_at_100 value: 39.006 - type: ndcg_at_1000 value: 46.64 - type: map_at_1 value: 7.866 - type: map_at_3 value: 12.145999999999999 - type: map_at_5 value: 13.874 - type: map_at_10 value: 16.02 - type: map_at_20 value: 18.183 - type: map_at_100 value: 21.775 - type: map_at_1000 value: 23.203 - type: recall_at_1 value: 7.866 - type: recall_at_3 value: 13.700000000000001 - type: recall_at_5 value: 16.683 - type: recall_at_10 value: 21.059 - type: recall_at_20 value: 27.045 - type: recall_at_100 value: 45.236 - type: recall_at_1000 value: 69.867 - type: precision_at_1 value: 60.5 - type: precision_at_3 value: 44.083 - type: precision_at_5 value: 35.449999999999996 - type: precision_at_10 value: 26.400000000000002 - type: precision_at_20 value: 19.75 - type: precision_at_100 value: 8.472 - type: precision_at_1000 value: 1.822 - type: mrr_at_1 value: 60.5 - type: mrr_at_3 value: 67.625 - type: mrr_at_5 value: 68.4625 - type: mrr_at_10 value: 69.4092 - type: mrr_at_20 value: 69.6644 - type: mrr_at_100 value: 69.8187 - type: mrr_at_1000 value: 69.8284 - type: nauc_ndcg_at_1_max value: 27.385199999999998 - type: nauc_ndcg_at_1_std value: 15.502199999999998 - type: nauc_ndcg_at_1_diff1 value: 40.3474 - type: nauc_ndcg_at_3_max value: 23.691100000000002 - type: nauc_ndcg_at_3_std value: 17.8766 - type: nauc_ndcg_at_3_diff1 value: 26.1322 - type: nauc_ndcg_at_5_max value: 21.908 - type: nauc_ndcg_at_5_std value: 16.5012 - type: nauc_ndcg_at_5_diff1 value: 24.9377 - type: nauc_ndcg_at_10_max value: 21.5239 - type: nauc_ndcg_at_10_std value: 15.327399999999999 - type: nauc_ndcg_at_10_diff1 value: 25.0379 - type: nauc_ndcg_at_20_max value: 18.6445 - type: nauc_ndcg_at_20_std value: 10.4816 - type: nauc_ndcg_at_20_diff1 value: 24.5885 - type: nauc_ndcg_at_100_max value: 21.7258 - type: nauc_ndcg_at_100_std value: 14.514199999999999 - type: nauc_ndcg_at_100_diff1 value: 21.6285 - type: nauc_ndcg_at_1000_max value: 25.515 - type: nauc_ndcg_at_1000_std value: 23.278499999999998 - type: nauc_ndcg_at_1000_diff1 value: 21.3373 - type: nauc_map_at_1_max value: 2.911 - type: nauc_map_at_1_std value: -23.3734 - type: nauc_map_at_1_diff1 value: 31.251099999999997 - type: nauc_map_at_3_max value: 6.7765 - type: nauc_map_at_3_std value: -21.1466 - type: nauc_map_at_3_diff1 value: 26.6096 - type: nauc_map_at_5_max value: 7.2574 - type: nauc_map_at_5_std value: -18.0369 - type: nauc_map_at_5_diff1 value: 24.0648 - type: nauc_map_at_10_max value: 11.669699999999999 - type: nauc_map_at_10_std value: -10.5142 - type: nauc_map_at_10_diff1 value: 23.289099999999998 - type: nauc_map_at_20_max value: 13.9376 - type: nauc_map_at_20_std value: -4.1179 - type: nauc_map_at_20_diff1 value: 22.9493 - type: nauc_map_at_100_max value: 18.756600000000002 - type: nauc_map_at_100_std value: 7.5601 - type: nauc_map_at_100_diff1 value: 21.1962 - type: nauc_map_at_1000_max value: 20.4084 - type: nauc_map_at_1000_std value: 10.7807 - type: nauc_map_at_1000_diff1 value: 21.6074 - type: nauc_recall_at_1_max value: 2.911 - type: nauc_recall_at_1_std value: -23.3734 - type: nauc_recall_at_1_diff1 value: 31.251099999999997 - type: nauc_recall_at_3_max value: 5.9628 - type: nauc_recall_at_3_std value: -21.7657 - type: nauc_recall_at_3_diff1 value: 22.1779 - type: nauc_recall_at_5_max value: 4.2336 - type: nauc_recall_at_5_std value: -19.872 - type: nauc_recall_at_5_diff1 value: 17.4799 - type: nauc_recall_at_10_max value: 9.376900000000001 - type: nauc_recall_at_10_std value: -12.3596 - type: nauc_recall_at_10_diff1 value: 15.801100000000002 - type: nauc_recall_at_20_max value: 11.2098 - type: nauc_recall_at_20_std value: -6.471699999999999 - type: nauc_recall_at_20_diff1 value: 15.1155 - type: nauc_recall_at_100_max value: 16.7433 - type: nauc_recall_at_100_std value: 12.2849 - type: nauc_recall_at_100_diff1 value: 6.908499999999999 - type: nauc_recall_at_1000_max value: 18.6941 - type: nauc_recall_at_1000_std value: 25.2521 - type: nauc_recall_at_1000_diff1 value: 1.0488000000000002 - type: nauc_precision_at_1_max value: 39.5387 - type: nauc_precision_at_1_std value: 23.244600000000002 - type: nauc_precision_at_1_diff1 value: 50.275499999999994 - type: nauc_precision_at_3_max value: 32.3641 - type: nauc_precision_at_3_std value: 34.4136 - type: nauc_precision_at_3_diff1 value: 17.316200000000002 - type: nauc_precision_at_5_max value: 29.9613 - type: nauc_precision_at_5_std value: 39.3271 - type: nauc_precision_at_5_diff1 value: 13.352 - type: nauc_precision_at_10_max value: 29.5821 - type: nauc_precision_at_10_std value: 48.0976 - type: nauc_precision_at_10_diff1 value: 9.610000000000001 - type: nauc_precision_at_20_max value: 25.5555 - type: nauc_precision_at_20_std value: 49.3622 - type: nauc_precision_at_20_diff1 value: 8.0656 - type: nauc_precision_at_100_max value: 24.3874 - type: nauc_precision_at_100_std value: 49.613600000000005 - type: nauc_precision_at_100_diff1 value: 4.1512 - type: nauc_precision_at_1000_max value: 16.0014 - type: nauc_precision_at_1000_std value: 28.3243 - type: nauc_precision_at_1000_diff1 value: 11.5068 - type: nauc_mrr_at_1_max value: 39.5387 - type: nauc_mrr_at_1_std value: 23.244600000000002 - type: nauc_mrr_at_1_diff1 value: 50.275499999999994 - type: nauc_mrr_at_3_max value: 44.3328 - type: nauc_mrr_at_3_std value: 29.595900000000004 - type: nauc_mrr_at_3_diff1 value: 47.0929 - type: nauc_mrr_at_5_max value: 43.6678 - type: nauc_mrr_at_5_std value: 29.219299999999997 - type: nauc_mrr_at_5_diff1 value: 47.7731 - type: nauc_mrr_at_10_max value: 43.1409 - type: nauc_mrr_at_10_std value: 29.5283 - type: nauc_mrr_at_10_diff1 value: 47.7777 - type: nauc_mrr_at_20_max value: 43.2155 - type: nauc_mrr_at_20_std value: 29.378999999999998 - type: nauc_mrr_at_20_diff1 value: 47.826800000000006 - type: nauc_mrr_at_100_max value: 43.2448 - type: nauc_mrr_at_100_std value: 29.385 - type: nauc_mrr_at_100_diff1 value: 47.7931 - type: nauc_mrr_at_1000_max value: 43.2316 - type: nauc_mrr_at_1000_std value: 29.3645 - type: nauc_mrr_at_1000_diff1 value: 47.7958 - type: main_score value: 34.565 - task: type: Classification dataset: name: MTEB EmotionClassification (default) type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 36.449999999999996 - type: f1 value: 32.3042 - type: f1_weighted value: 38.7818 - type: main_score value: 36.449999999999996 - task: type: Retrieval dataset: name: MTEB FEVER (default) type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: ndcg_at_1 value: 77.93299999999999 - type: ndcg_at_3 value: 83.146 - type: ndcg_at_5 value: 84.188 - type: ndcg_at_10 value: 84.932 - type: ndcg_at_20 value: 85.187 - type: ndcg_at_100 value: 85.452 - type: ndcg_at_1000 value: 85.68599999999999 - type: map_at_1 value: 72.173 - type: map_at_3 value: 79.618 - type: map_at_5 value: 80.32000000000001 - type: map_at_10 value: 80.674 - type: map_at_20 value: 80.762 - type: map_at_100 value: 80.81 - type: map_at_1000 value: 80.822 - type: recall_at_1 value: 72.173 - type: recall_at_3 value: 87.804 - type: recall_at_5 value: 90.556 - type: recall_at_10 value: 92.869 - type: recall_at_20 value: 93.768 - type: recall_at_100 value: 95.00699999999999 - type: recall_at_1000 value: 96.504 - type: precision_at_1 value: 77.93299999999999 - type: precision_at_3 value: 31.828 - type: precision_at_5 value: 19.727 - type: precision_at_10 value: 10.135 - type: precision_at_20 value: 5.136 - type: precision_at_100 value: 1.049 - type: precision_at_1000 value: 0.109 - type: mrr_at_1 value: 77.9328 - type: mrr_at_3 value: 85.221 - type: mrr_at_5 value: 85.8076 - type: mrr_at_10 value: 86.0963 - type: mrr_at_20 value: 86.1448 - type: mrr_at_100 value: 86.1622 - type: mrr_at_1000 value: 86.1631 - type: nauc_ndcg_at_1_max value: 27.804499999999997 - type: nauc_ndcg_at_1_std value: -31.1045 - type: nauc_ndcg_at_1_diff1 value: 66.6633 - type: nauc_ndcg_at_3_max value: 21.6576 - type: nauc_ndcg_at_3_std value: -24.3372 - type: nauc_ndcg_at_3_diff1 value: 48.9088 - type: nauc_ndcg_at_5_max value: 20.612 - type: nauc_ndcg_at_5_std value: -23.8007 - type: nauc_ndcg_at_5_diff1 value: 48.0635 - type: nauc_ndcg_at_10_max value: 19.6463 - type: nauc_ndcg_at_10_std value: -22.5941 - type: nauc_ndcg_at_10_diff1 value: 47.5561 - type: nauc_ndcg_at_20_max value: 19.5443 - type: nauc_ndcg_at_20_std value: -21.998 - type: nauc_ndcg_at_20_diff1 value: 47.664699999999996 - type: nauc_ndcg_at_100_max value: 19.2285 - type: nauc_ndcg_at_100_std value: -21.6826 - type: nauc_ndcg_at_100_diff1 value: 47.897099999999995 - type: nauc_ndcg_at_1000_max value: 19.5578 - type: nauc_ndcg_at_1000_std value: -21.9412 - type: nauc_ndcg_at_1000_diff1 value: 48.361 - type: nauc_map_at_1_max value: 20.3735 - type: nauc_map_at_1_std value: -24.7274 - type: nauc_map_at_1_diff1 value: 54.148399999999995 - type: nauc_map_at_3_max value: 19.3166 - type: nauc_map_at_3_std value: -23.171 - type: nauc_map_at_3_diff1 value: 48.254000000000005 - type: nauc_map_at_5_max value: 19.158900000000003 - type: nauc_map_at_5_std value: -22.966900000000003 - type: nauc_map_at_5_diff1 value: 48.0877 - type: nauc_map_at_10_max value: 18.8745 - type: nauc_map_at_10_std value: -22.5913 - type: nauc_map_at_10_diff1 value: 47.957899999999995 - type: nauc_map_at_20_max value: 18.895200000000003 - type: nauc_map_at_20_std value: -22.4542 - type: nauc_map_at_20_diff1 value: 48.0047 - type: nauc_map_at_100_max value: 18.8722 - type: nauc_map_at_100_std value: -22.3984 - type: nauc_map_at_100_diff1 value: 48.0394 - type: nauc_map_at_1000_max value: 18.8824 - type: nauc_map_at_1000_std value: -22.4034 - type: nauc_map_at_1000_diff1 value: 48.0533 - type: nauc_recall_at_1_max value: 20.3735 - type: nauc_recall_at_1_std value: -24.7274 - type: nauc_recall_at_1_diff1 value: 54.148399999999995 - type: nauc_recall_at_3_max value: 15.2387 - type: nauc_recall_at_3_std value: -17.3947 - type: nauc_recall_at_3_diff1 value: 30.6589 - type: nauc_recall_at_5_max value: 11.4037 - type: nauc_recall_at_5_std value: -14.3603 - type: nauc_recall_at_5_diff1 value: 23.7356 - type: nauc_recall_at_10_max value: 3.8233 - type: nauc_recall_at_10_std value: -4.6399 - type: nauc_recall_at_10_diff1 value: 13.8514 - type: nauc_recall_at_20_max value: 0.3939 - type: nauc_recall_at_20_std value: 2.4212000000000002 - type: nauc_recall_at_20_diff1 value: 10.110800000000001 - type: nauc_recall_at_100_max value: -8.9768 - type: nauc_recall_at_100_std value: 11.2598 - type: nauc_recall_at_100_diff1 value: 4.6753 - type: nauc_recall_at_1000_max value: -13.494800000000001 - type: nauc_recall_at_1000_std value: 17.2306 - type: nauc_recall_at_1000_diff1 value: 0.0856 - type: nauc_precision_at_1_max value: 27.804499999999997 - type: nauc_precision_at_1_std value: -31.1045 - type: nauc_precision_at_1_diff1 value: 66.6633 - type: nauc_precision_at_3_max value: 25.660899999999998 - type: nauc_precision_at_3_std value: -22.0243 - type: nauc_precision_at_3_diff1 value: 34.5966 - type: nauc_precision_at_5_max value: 22.4777 - type: nauc_precision_at_5_std value: -14.9469 - type: nauc_precision_at_5_diff1 value: 20.9233 - type: nauc_precision_at_10_max value: 13.7882 - type: nauc_precision_at_10_std value: -0.1941 - type: nauc_precision_at_10_diff1 value: 2.5737 - type: nauc_precision_at_20_max value: 10.422099999999999 - type: nauc_precision_at_20_std value: 8.518 - type: nauc_precision_at_20_diff1 value: -4.2715000000000005 - type: nauc_precision_at_100_max value: 3.8884000000000003 - type: nauc_precision_at_100_std value: 14.529800000000002 - type: nauc_precision_at_100_diff1 value: -10.066 - type: nauc_precision_at_1000_max value: 5.5056 - type: nauc_precision_at_1000_std value: 10.3948 - type: nauc_precision_at_1000_diff1 value: -9.5234 - type: nauc_mrr_at_1_max value: 27.804499999999997 - type: nauc_mrr_at_1_std value: -31.1045 - type: nauc_mrr_at_1_diff1 value: 66.6633 - type: nauc_mrr_at_3_max value: 30.593500000000002 - type: nauc_mrr_at_3_std value: -31.844499999999996 - type: nauc_mrr_at_3_diff1 value: 63.571 - type: nauc_mrr_at_5_max value: 30.544700000000002 - type: nauc_mrr_at_5_std value: -32.0369 - type: nauc_mrr_at_5_diff1 value: 63.8464 - type: nauc_mrr_at_10_max value: 30.459000000000003 - type: nauc_mrr_at_10_std value: -31.799500000000002 - type: nauc_mrr_at_10_diff1 value: 64.0984 - type: nauc_mrr_at_20_max value: 30.3871 - type: nauc_mrr_at_20_std value: -31.6429 - type: nauc_mrr_at_20_diff1 value: 64.1444 - type: nauc_mrr_at_100_max value: 30.324099999999998 - type: nauc_mrr_at_100_std value: -31.629800000000003 - type: nauc_mrr_at_100_diff1 value: 64.163 - type: nauc_mrr_at_1000_max value: 30.3201 - type: nauc_mrr_at_1000_std value: -31.6352 - type: nauc_mrr_at_1000_diff1 value: 64.1637 - type: main_score value: 84.932 - task: type: Retrieval dataset: name: MTEB FiQA2018 (default) type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: ndcg_at_1 value: 34.259 - type: ndcg_at_3 value: 32.14 - type: ndcg_at_5 value: 33.391 - type: ndcg_at_10 value: 35.663 - type: ndcg_at_20 value: 38.193 - type: ndcg_at_100 value: 42.232 - type: ndcg_at_1000 value: 45.595 - type: map_at_1 value: 17.124 - type: map_at_3 value: 24.359 - type: map_at_5 value: 26.532 - type: map_at_10 value: 28.183000000000003 - type: map_at_20 value: 29.119 - type: map_at_100 value: 29.881 - type: map_at_1000 value: 30.070000000000004 - type: recall_at_1 value: 17.124 - type: recall_at_3 value: 29.488999999999997 - type: recall_at_5 value: 35.436 - type: recall_at_10 value: 42.665 - type: recall_at_20 value: 50.381 - type: recall_at_100 value: 67.364 - type: recall_at_1000 value: 87.315 - type: precision_at_1 value: 34.259 - type: precision_at_3 value: 21.399 - type: precision_at_5 value: 15.926000000000002 - type: precision_at_10 value: 9.907 - type: precision_at_20 value: 6.026 - type: precision_at_100 value: 1.637 - type: precision_at_1000 value: 0.22599999999999998 - type: mrr_at_1 value: 34.259299999999996 - type: mrr_at_3 value: 40.7922 - type: mrr_at_5 value: 42.1811 - type: mrr_at_10 value: 43.1663 - type: mrr_at_20 value: 43.684400000000004 - type: mrr_at_100 value: 44.079 - type: mrr_at_1000 value: 44.1277 - type: nauc_ndcg_at_1_max value: 45.5993 - type: nauc_ndcg_at_1_std value: 4.2730999999999995 - type: nauc_ndcg_at_1_diff1 value: 51.0941 - type: nauc_ndcg_at_3_max value: 38.6082 - type: nauc_ndcg_at_3_std value: 1.7973 - type: nauc_ndcg_at_3_diff1 value: 41.556599999999996 - type: nauc_ndcg_at_5_max value: 37.0326 - type: nauc_ndcg_at_5_std value: 3.5555000000000003 - type: nauc_ndcg_at_5_diff1 value: 41.166599999999995 - type: nauc_ndcg_at_10_max value: 36.8257 - type: nauc_ndcg_at_10_std value: 4.6765 - type: nauc_ndcg_at_10_diff1 value: 40.7039 - type: nauc_ndcg_at_20_max value: 37.9542 - type: nauc_ndcg_at_20_std value: 6.2273000000000005 - type: nauc_ndcg_at_20_diff1 value: 40.7126 - type: nauc_ndcg_at_100_max value: 40.029399999999995 - type: nauc_ndcg_at_100_std value: 8.8925 - type: nauc_ndcg_at_100_diff1 value: 40.8749 - type: nauc_ndcg_at_1000_max value: 41.0995 - type: nauc_ndcg_at_1000_std value: 9.055399999999999 - type: nauc_ndcg_at_1000_diff1 value: 42.0999 - type: nauc_map_at_1_max value: 29.1034 - type: nauc_map_at_1_std value: -1.3329 - type: nauc_map_at_1_diff1 value: 49.6713 - type: nauc_map_at_3_max value: 31.2555 - type: nauc_map_at_3_std value: -1.2727 - type: nauc_map_at_3_diff1 value: 42.8671 - type: nauc_map_at_5_max value: 32.7495 - type: nauc_map_at_5_std value: 0.4463 - type: nauc_map_at_5_diff1 value: 42.3138 - type: nauc_map_at_10_max value: 34.0564 - type: nauc_map_at_10_std value: 1.8785 - type: nauc_map_at_10_diff1 value: 41.9711 - type: nauc_map_at_20_max value: 34.7449 - type: nauc_map_at_20_std value: 2.6273 - type: nauc_map_at_20_diff1 value: 41.9563 - type: nauc_map_at_100_max value: 35.3724 - type: nauc_map_at_100_std value: 3.1910000000000003 - type: nauc_map_at_100_diff1 value: 41.990899999999996 - type: nauc_map_at_1000_max value: 35.4782 - type: nauc_map_at_1000_std value: 3.2302999999999997 - type: nauc_map_at_1000_diff1 value: 42.0484 - type: nauc_recall_at_1_max value: 29.1034 - type: nauc_recall_at_1_std value: -1.3329 - type: nauc_recall_at_1_diff1 value: 49.6713 - type: nauc_recall_at_3_max value: 28.3729 - type: nauc_recall_at_3_std value: 0.0225 - type: nauc_recall_at_3_diff1 value: 35.2655 - type: nauc_recall_at_5_max value: 28.0157 - type: nauc_recall_at_5_std value: 3.5967 - type: nauc_recall_at_5_diff1 value: 31.5507 - type: nauc_recall_at_10_max value: 28.0271 - type: nauc_recall_at_10_std value: 6.7875000000000005 - type: nauc_recall_at_10_diff1 value: 28.3267 - type: nauc_recall_at_20_max value: 30.2764 - type: nauc_recall_at_20_std value: 11.2697 - type: nauc_recall_at_20_diff1 value: 27.5277 - type: nauc_recall_at_100_max value: 33.2215 - type: nauc_recall_at_100_std value: 23.6362 - type: nauc_recall_at_100_diff1 value: 23.1851 - type: nauc_recall_at_1000_max value: 41.8199 - type: nauc_recall_at_1000_std value: 42.2866 - type: nauc_recall_at_1000_diff1 value: 29.341099999999997 - type: nauc_precision_at_1_max value: 45.5993 - type: nauc_precision_at_1_std value: 4.2730999999999995 - type: nauc_precision_at_1_diff1 value: 51.0941 - type: nauc_precision_at_3_max value: 40.541 - type: nauc_precision_at_3_std value: 3.6046 - type: nauc_precision_at_3_diff1 value: 29.2879 - type: nauc_precision_at_5_max value: 40.4116 - type: nauc_precision_at_5_std value: 9.523 - type: nauc_precision_at_5_diff1 value: 24.9572 - type: nauc_precision_at_10_max value: 39.7377 - type: nauc_precision_at_10_std value: 11.8076 - type: nauc_precision_at_10_diff1 value: 21.1979 - type: nauc_precision_at_20_max value: 40.1851 - type: nauc_precision_at_20_std value: 14.967 - type: nauc_precision_at_20_diff1 value: 19.0881 - type: nauc_precision_at_100_max value: 39.4474 - type: nauc_precision_at_100_std value: 19.6785 - type: nauc_precision_at_100_diff1 value: 12.6951 - type: nauc_precision_at_1000_max value: 32.071600000000004 - type: nauc_precision_at_1000_std value: 14.7899 - type: nauc_precision_at_1000_diff1 value: 7.456599999999999 - type: nauc_mrr_at_1_max value: 45.5993 - type: nauc_mrr_at_1_std value: 4.2730999999999995 - type: nauc_mrr_at_1_diff1 value: 51.0941 - type: nauc_mrr_at_3_max value: 45.5586 - type: nauc_mrr_at_3_std value: 5.6932 - type: nauc_mrr_at_3_diff1 value: 47.1359 - type: nauc_mrr_at_5_max value: 45.0408 - type: nauc_mrr_at_5_std value: 6.4838000000000005 - type: nauc_mrr_at_5_diff1 value: 46.4912 - type: nauc_mrr_at_10_max value: 44.9499 - type: nauc_mrr_at_10_std value: 6.6139 - type: nauc_mrr_at_10_diff1 value: 46.332699999999996 - type: nauc_mrr_at_20_max value: 45.063900000000004 - type: nauc_mrr_at_20_std value: 6.6114999999999995 - type: nauc_mrr_at_20_diff1 value: 46.3181 - type: nauc_mrr_at_100_max value: 45.2249 - type: nauc_mrr_at_100_std value: 6.8897 - type: nauc_mrr_at_100_diff1 value: 46.373799999999996 - type: nauc_mrr_at_1000_max value: 45.2235 - type: nauc_mrr_at_1000_std value: 6.8732 - type: nauc_mrr_at_1000_diff1 value: 46.399699999999996 - type: main_score value: 35.663 - task: type: Retrieval dataset: name: MTEB HotpotQA (default) type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: ndcg_at_1 value: 75.908 - type: ndcg_at_3 value: 57.643 - type: ndcg_at_5 value: 59.689 - type: ndcg_at_10 value: 61.513 - type: ndcg_at_20 value: 62.721000000000004 - type: ndcg_at_100 value: 64.57000000000001 - type: ndcg_at_1000 value: 65.981 - type: map_at_1 value: 37.954 - type: map_at_3 value: 49.424 - type: map_at_5 value: 50.99399999999999 - type: map_at_10 value: 52.066 - type: map_at_20 value: 52.54600000000001 - type: map_at_100 value: 52.910000000000004 - type: map_at_1000 value: 52.981 - type: recall_at_1 value: 37.954 - type: recall_at_3 value: 53.201 - type: recall_at_5 value: 57.232000000000006 - type: recall_at_10 value: 61.82299999999999 - type: recall_at_20 value: 65.692 - type: recall_at_100 value: 73.896 - type: recall_at_1000 value: 83.255 - type: precision_at_1 value: 75.908 - type: precision_at_3 value: 35.467 - type: precision_at_5 value: 22.893 - type: precision_at_10 value: 12.365 - type: precision_at_20 value: 6.569 - type: precision_at_100 value: 1.478 - type: precision_at_1000 value: 0.167 - type: mrr_at_1 value: 75.90820000000001 - type: mrr_at_3 value: 80.5717 - type: mrr_at_5 value: 81.15299999999999 - type: mrr_at_10 value: 81.4709 - type: mrr_at_20 value: 81.6082 - type: mrr_at_100 value: 81.69239999999999 - type: mrr_at_1000 value: 81.7034 - type: nauc_ndcg_at_1_max value: 53.456199999999995 - type: nauc_ndcg_at_1_std value: -7.1338 - type: nauc_ndcg_at_1_diff1 value: 72.2296 - type: nauc_ndcg_at_3_max value: 30.760199999999998 - type: nauc_ndcg_at_3_std value: -3.1088999999999998 - type: nauc_ndcg_at_3_diff1 value: 29.957099999999997 - type: nauc_ndcg_at_5_max value: 29.404000000000003 - type: nauc_ndcg_at_5_std value: -1.8713 - type: nauc_ndcg_at_5_diff1 value: 27.3461 - type: nauc_ndcg_at_10_max value: 28.0841 - type: nauc_ndcg_at_10_std value: -0.8572 - type: nauc_ndcg_at_10_diff1 value: 25.1934 - type: nauc_ndcg_at_20_max value: 27.581099999999996 - type: nauc_ndcg_at_20_std value: -0.1989 - type: nauc_ndcg_at_20_diff1 value: 24.3724 - type: nauc_ndcg_at_100_max value: 27.0287 - type: nauc_ndcg_at_100_std value: 0.7972 - type: nauc_ndcg_at_100_diff1 value: 23.6936 - type: nauc_ndcg_at_1000_max value: 27.070800000000002 - type: nauc_ndcg_at_1000_std value: 0.8108000000000001 - type: nauc_ndcg_at_1000_diff1 value: 24.0546 - type: nauc_map_at_1_max value: 53.456199999999995 - type: nauc_map_at_1_std value: -7.1338 - type: nauc_map_at_1_diff1 value: 72.2296 - type: nauc_map_at_3_max value: 26.085199999999997 - type: nauc_map_at_3_std value: -3.3792999999999997 - type: nauc_map_at_3_diff1 value: 23.335900000000002 - type: nauc_map_at_5_max value: 25.2911 - type: nauc_map_at_5_std value: -2.6356 - type: nauc_map_at_5_diff1 value: 21.7569 - type: nauc_map_at_10_max value: 24.5926 - type: nauc_map_at_10_std value: -2.1178 - type: nauc_map_at_10_diff1 value: 20.6735 - type: nauc_map_at_20_max value: 24.479400000000002 - type: nauc_map_at_20_std value: -1.8454000000000002 - type: nauc_map_at_20_diff1 value: 20.4617 - type: nauc_map_at_100_max value: 24.390600000000003 - type: nauc_map_at_100_std value: -1.6625999999999999 - type: nauc_map_at_100_diff1 value: 20.3774 - type: nauc_map_at_1000_max value: 24.387900000000002 - type: nauc_map_at_1000_std value: -1.6534 - type: nauc_map_at_1000_diff1 value: 20.3887 - type: nauc_recall_at_1_max value: 53.456199999999995 - type: nauc_recall_at_1_std value: -7.1338 - type: nauc_recall_at_1_diff1 value: 72.2296 - type: nauc_recall_at_3_max value: 22.2324 - type: nauc_recall_at_3_std value: -1.4433 - type: nauc_recall_at_3_diff1 value: 14.944799999999999 - type: nauc_recall_at_5_max value: 19.1126 - type: nauc_recall_at_5_std value: 0.9252 - type: nauc_recall_at_5_diff1 value: 9.6723 - type: nauc_recall_at_10_max value: 15.4048 - type: nauc_recall_at_10_std value: 3.3196000000000003 - type: nauc_recall_at_10_diff1 value: 4.2059 - type: nauc_recall_at_20_max value: 12.7643 - type: nauc_recall_at_20_std value: 5.431699999999999 - type: nauc_recall_at_20_diff1 value: 0.46880000000000005 - type: nauc_recall_at_100_max value: 7.538 - type: nauc_recall_at_100_std value: 10.5696 - type: nauc_recall_at_100_diff1 value: -6.472300000000001 - type: nauc_recall_at_1000_max value: 1.7873 - type: nauc_recall_at_1000_std value: 13.6112 - type: nauc_recall_at_1000_diff1 value: -13.081000000000001 - type: nauc_precision_at_1_max value: 53.456199999999995 - type: nauc_precision_at_1_std value: -7.1338 - type: nauc_precision_at_1_diff1 value: 72.2296 - type: nauc_precision_at_3_max value: 22.2324 - type: nauc_precision_at_3_std value: -1.4433 - type: nauc_precision_at_3_diff1 value: 14.944799999999999 - type: nauc_precision_at_5_max value: 19.1126 - type: nauc_precision_at_5_std value: 0.9252 - type: nauc_precision_at_5_diff1 value: 9.6723 - type: nauc_precision_at_10_max value: 15.4048 - type: nauc_precision_at_10_std value: 3.3196000000000003 - type: nauc_precision_at_10_diff1 value: 4.2059 - type: nauc_precision_at_20_max value: 12.7643 - type: nauc_precision_at_20_std value: 5.431699999999999 - type: nauc_precision_at_20_diff1 value: 0.46880000000000005 - type: nauc_precision_at_100_max value: 7.538 - type: nauc_precision_at_100_std value: 10.5696 - type: nauc_precision_at_100_diff1 value: -6.472300000000001 - type: nauc_precision_at_1000_max value: 1.7873 - type: nauc_precision_at_1000_std value: 13.6112 - type: nauc_precision_at_1000_diff1 value: -13.081000000000001 - type: nauc_mrr_at_1_max value: 53.456199999999995 - type: nauc_mrr_at_1_std value: -7.1338 - type: nauc_mrr_at_1_diff1 value: 72.2296 - type: nauc_mrr_at_3_max value: 54.94369999999999 - type: nauc_mrr_at_3_std value: -5.0057 - type: nauc_mrr_at_3_diff1 value: 69.6774 - type: nauc_mrr_at_5_max value: 54.970699999999994 - type: nauc_mrr_at_5_std value: -4.3104000000000005 - type: nauc_mrr_at_5_diff1 value: 69.4618 - type: nauc_mrr_at_10_max value: 55.01970000000001 - type: nauc_mrr_at_10_std value: -4.0596 - type: nauc_mrr_at_10_diff1 value: 69.435 - type: nauc_mrr_at_20_max value: 54.9824 - type: nauc_mrr_at_20_std value: -4.1227 - type: nauc_mrr_at_20_diff1 value: 69.4712 - type: nauc_mrr_at_100_max value: 54.9588 - type: nauc_mrr_at_100_std value: -4.1325 - type: nauc_mrr_at_100_diff1 value: 69.498 - type: nauc_mrr_at_1000_max value: 54.95179999999999 - type: nauc_mrr_at_1000_std value: -4.1442 - type: nauc_mrr_at_1000_diff1 value: 69.503 - type: main_score value: 61.513 - task: type: Classification dataset: name: MTEB ImdbClassification (default) type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 63.0232 - type: f1 value: 62.8137 - type: f1_weighted value: 62.8137 - type: ap value: 58.377199999999995 - type: ap_weighted value: 58.377199999999995 - type: main_score value: 63.0232 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (ar) type: miracl/mmteb-miracl config: ar split: dev revision: main metrics: - type: ndcg_at_1 value: 57.459 - type: ndcg_at_3 value: 58.162000000000006 - type: ndcg_at_5 value: 60.831 - type: ndcg_at_10 value: 64.238 - type: ndcg_at_20 value: 66.455 - type: ndcg_at_100 value: 68.67 - type: ndcg_at_1000 value: 69.51 - type: map_at_1 value: 38.064 - type: map_at_3 value: 51.217999999999996 - type: map_at_5 value: 54.364999999999995 - type: map_at_10 value: 56.589999999999996 - type: map_at_20 value: 57.545 - type: map_at_100 value: 58.06400000000001 - type: map_at_1000 value: 58.111999999999995 - type: recall_at_1 value: 38.064 - type: recall_at_3 value: 58.618 - type: recall_at_5 value: 66.353 - type: recall_at_10 value: 75.098 - type: recall_at_20 value: 81.978 - type: recall_at_100 value: 91.203 - type: recall_at_1000 value: 96.706 - type: precision_at_1 value: 57.459 - type: precision_at_3 value: 32.965 - type: precision_at_5 value: 23.405 - type: precision_at_10 value: 13.816 - type: precision_at_20 value: 7.742 - type: precision_at_100 value: 1.7739999999999998 - type: precision_at_1000 value: 0.189 - type: mrr_at_1 value: 57.458600000000004 - type: mrr_at_3 value: 65.4523 - type: mrr_at_5 value: 66.6506 - type: mrr_at_10 value: 67.48100000000001 - type: mrr_at_20 value: 67.7522 - type: mrr_at_100 value: 67.88419999999999 - type: mrr_at_1000 value: 67.8972 - type: nauc_ndcg_at_1_max value: 38.2614 - type: nauc_ndcg_at_1_std value: 1.0798999999999999 - type: nauc_ndcg_at_1_diff1 value: 44.3159 - type: nauc_ndcg_at_3_max value: 35.7658 - type: nauc_ndcg_at_3_std value: -3.9097 - type: nauc_ndcg_at_3_diff1 value: 36.8009 - type: nauc_ndcg_at_5_max value: 37.7543 - type: nauc_ndcg_at_5_std value: -2.7727999999999997 - type: nauc_ndcg_at_5_diff1 value: 36.8992 - type: nauc_ndcg_at_10_max value: 39.9339 - type: nauc_ndcg_at_10_std value: -0.2843 - type: nauc_ndcg_at_10_diff1 value: 36.7359 - type: nauc_ndcg_at_20_max value: 40.9231 - type: nauc_ndcg_at_20_std value: 1.5467 - type: nauc_ndcg_at_20_diff1 value: 36.5693 - type: nauc_ndcg_at_100_max value: 41.554 - type: nauc_ndcg_at_100_std value: 3.7470999999999997 - type: nauc_ndcg_at_100_diff1 value: 36.6323 - type: nauc_ndcg_at_1000_max value: 41.1969 - type: nauc_ndcg_at_1000_std value: 2.9972 - type: nauc_ndcg_at_1000_diff1 value: 37.1419 - type: nauc_map_at_1_max value: 21.1612 - type: nauc_map_at_1_std value: -11.2901 - type: nauc_map_at_1_diff1 value: 43.8572 - type: nauc_map_at_3_max value: 31.0197 - type: nauc_map_at_3_std value: -7.5985 - type: nauc_map_at_3_diff1 value: 38.0396 - type: nauc_map_at_5_max value: 33.8261 - type: nauc_map_at_5_std value: -5.501 - type: nauc_map_at_5_diff1 value: 37.2243 - type: nauc_map_at_10_max value: 35.5222 - type: nauc_map_at_10_std value: -3.7351 - type: nauc_map_at_10_diff1 value: 36.8849 - type: nauc_map_at_20_max value: 36.0478 - type: nauc_map_at_20_std value: -2.9566 - type: nauc_map_at_20_diff1 value: 36.7755 - type: nauc_map_at_100_max value: 36.256 - type: nauc_map_at_100_std value: -2.455 - type: nauc_map_at_100_diff1 value: 36.778800000000004 - type: nauc_map_at_1000_max value: 36.249900000000004 - type: nauc_map_at_1000_std value: -2.4678999999999998 - type: nauc_map_at_1000_diff1 value: 36.7962 - type: nauc_recall_at_1_max value: 21.1612 - type: nauc_recall_at_1_std value: -11.2901 - type: nauc_recall_at_1_diff1 value: 43.8572 - type: nauc_recall_at_3_max value: 30.1126 - type: nauc_recall_at_3_std value: -8.705499999999999 - type: nauc_recall_at_3_diff1 value: 33.0274 - type: nauc_recall_at_5_max value: 35.5301 - type: nauc_recall_at_5_std value: -4.1692 - type: nauc_recall_at_5_diff1 value: 30.693900000000003 - type: nauc_recall_at_10_max value: 41.431200000000004 - type: nauc_recall_at_10_std value: 3.1441999999999997 - type: nauc_recall_at_10_diff1 value: 28.5864 - type: nauc_recall_at_20_max value: 46.097100000000005 - type: nauc_recall_at_20_std value: 10.93 - type: nauc_recall_at_20_diff1 value: 26.930100000000003 - type: nauc_recall_at_100_max value: 58.3395 - type: nauc_recall_at_100_std value: 40.328599999999994 - type: nauc_recall_at_100_diff1 value: 21.9273 - type: nauc_recall_at_1000_max value: 72.4689 - type: nauc_recall_at_1000_std value: 59.1972 - type: nauc_recall_at_1000_diff1 value: 27.697899999999997 - type: nauc_precision_at_1_max value: 38.2614 - type: nauc_precision_at_1_std value: 1.0798999999999999 - type: nauc_precision_at_1_diff1 value: 44.3159 - type: nauc_precision_at_3_max value: 35.755700000000004 - type: nauc_precision_at_3_std value: 11.9015 - type: nauc_precision_at_3_diff1 value: 8.3107 - type: nauc_precision_at_5_max value: 33.9849 - type: nauc_precision_at_5_std value: 16.7448 - type: nauc_precision_at_5_diff1 value: 0.6217999999999999 - type: nauc_precision_at_10_max value: 29.9323 - type: nauc_precision_at_10_std value: 21.601100000000002 - type: nauc_precision_at_10_diff1 value: -5.758900000000001 - type: nauc_precision_at_20_max value: 26.142100000000003 - type: nauc_precision_at_20_std value: 25.1079 - type: nauc_precision_at_20_diff1 value: -9.9798 - type: nauc_precision_at_100_max value: 19.456100000000003 - type: nauc_precision_at_100_std value: 28.674899999999997 - type: nauc_precision_at_100_diff1 value: -14.6005 - type: nauc_precision_at_1000_max value: 14.49 - type: nauc_precision_at_1000_std value: 25.480399999999996 - type: nauc_precision_at_1000_diff1 value: -15.570899999999998 - type: nauc_mrr_at_1_max value: 38.2614 - type: nauc_mrr_at_1_std value: 1.0798999999999999 - type: nauc_mrr_at_1_diff1 value: 44.3159 - type: nauc_mrr_at_3_max value: 42.2344 - type: nauc_mrr_at_3_std value: 1.9994 - type: nauc_mrr_at_3_diff1 value: 41.5794 - type: nauc_mrr_at_5_max value: 42.9754 - type: nauc_mrr_at_5_std value: 2.8443 - type: nauc_mrr_at_5_diff1 value: 41.5702 - type: nauc_mrr_at_10_max value: 43.0856 - type: nauc_mrr_at_10_std value: 3.1882 - type: nauc_mrr_at_10_diff1 value: 41.6792 - type: nauc_mrr_at_20_max value: 42.972300000000004 - type: nauc_mrr_at_20_std value: 3.2651 - type: nauc_mrr_at_20_diff1 value: 41.6405 - type: nauc_mrr_at_100_max value: 42.945499999999996 - type: nauc_mrr_at_100_std value: 3.3168 - type: nauc_mrr_at_100_diff1 value: 41.6818 - type: nauc_mrr_at_1000_max value: 42.9332 - type: nauc_mrr_at_1000_std value: 3.3009999999999997 - type: nauc_mrr_at_1000_diff1 value: 41.6879 - type: main_score value: 64.238 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (bn) type: miracl/mmteb-miracl config: bn split: dev revision: main metrics: - type: ndcg_at_1 value: 60.341 - type: ndcg_at_3 value: 60.805 - type: ndcg_at_5 value: 64.486 - type: ndcg_at_10 value: 68.05499999999999 - type: ndcg_at_20 value: 69.914 - type: ndcg_at_100 value: 72.00800000000001 - type: ndcg_at_1000 value: 72.71600000000001 - type: map_at_1 value: 37.948 - type: map_at_3 value: 52.89 - type: map_at_5 value: 56.845 - type: map_at_10 value: 59.329 - type: map_at_20 value: 60.158 - type: map_at_100 value: 60.73 - type: map_at_1000 value: 60.778 - type: recall_at_1 value: 37.948 - type: recall_at_3 value: 61.095 - type: recall_at_5 value: 71.316 - type: recall_at_10 value: 80.609 - type: recall_at_20 value: 86.141 - type: recall_at_100 value: 94.305 - type: recall_at_1000 value: 98.625 - type: precision_at_1 value: 60.341 - type: precision_at_3 value: 36.172 - type: precision_at_5 value: 26.277 - type: precision_at_10 value: 15.595999999999998 - type: precision_at_20 value: 8.552 - type: precision_at_100 value: 1.9539999999999997 - type: precision_at_1000 value: 0.207 - type: mrr_at_1 value: 60.3406 - type: mrr_at_3 value: 68.8564 - type: mrr_at_5 value: 70.51089999999999 - type: mrr_at_10 value: 71.3043 - type: mrr_at_20 value: 71.5148 - type: mrr_at_100 value: 71.5779 - type: mrr_at_1000 value: 71.5857 - type: nauc_ndcg_at_1_max value: 39.480900000000005 - type: nauc_ndcg_at_1_std value: 4.66 - type: nauc_ndcg_at_1_diff1 value: 43.4568 - type: nauc_ndcg_at_3_max value: 34.6544 - type: nauc_ndcg_at_3_std value: -1.7936 - type: nauc_ndcg_at_3_diff1 value: 39.1951 - type: nauc_ndcg_at_5_max value: 36.9934 - type: nauc_ndcg_at_5_std value: -1.427 - type: nauc_ndcg_at_5_diff1 value: 39.6396 - type: nauc_ndcg_at_10_max value: 38.9518 - type: nauc_ndcg_at_10_std value: 0.1574 - type: nauc_ndcg_at_10_diff1 value: 37.6783 - type: nauc_ndcg_at_20_max value: 38.5914 - type: nauc_ndcg_at_20_std value: 1.8135999999999999 - type: nauc_ndcg_at_20_diff1 value: 38.063 - type: nauc_ndcg_at_100_max value: 40.2409 - type: nauc_ndcg_at_100_std value: 5.0953 - type: nauc_ndcg_at_100_diff1 value: 38.5175 - type: nauc_ndcg_at_1000_max value: 39.9212 - type: nauc_ndcg_at_1000_std value: 4.5499 - type: nauc_ndcg_at_1000_diff1 value: 38.6193 - type: nauc_map_at_1_max value: 17.9005 - type: nauc_map_at_1_std value: -15.587699999999998 - type: nauc_map_at_1_diff1 value: 48.1378 - type: nauc_map_at_3_max value: 28.119300000000003 - type: nauc_map_at_3_std value: -11.3599 - type: nauc_map_at_3_diff1 value: 41.3327 - type: nauc_map_at_5_max value: 32.3026 - type: nauc_map_at_5_std value: -7.741499999999999 - type: nauc_map_at_5_diff1 value: 40.5989 - type: nauc_map_at_10_max value: 33.8864 - type: nauc_map_at_10_std value: -5.6699 - type: nauc_map_at_10_diff1 value: 39.586 - type: nauc_map_at_20_max value: 34.0193 - type: nauc_map_at_20_std value: -4.6238 - type: nauc_map_at_20_diff1 value: 39.7785 - type: nauc_map_at_100_max value: 34.475699999999996 - type: nauc_map_at_100_std value: -3.6669 - type: nauc_map_at_100_diff1 value: 39.8911 - type: nauc_map_at_1000_max value: 34.4983 - type: nauc_map_at_1000_std value: -3.6664000000000003 - type: nauc_map_at_1000_diff1 value: 39.9015 - type: nauc_recall_at_1_max value: 17.9005 - type: nauc_recall_at_1_std value: -15.587699999999998 - type: nauc_recall_at_1_diff1 value: 48.1378 - type: nauc_recall_at_3_max value: 27.0807 - type: nauc_recall_at_3_std value: -10.071 - type: nauc_recall_at_3_diff1 value: 35.7245 - type: nauc_recall_at_5_max value: 32.561499999999995 - type: nauc_recall_at_5_std value: -7.4364 - type: nauc_recall_at_5_diff1 value: 32.2967 - type: nauc_recall_at_10_max value: 36.9998 - type: nauc_recall_at_10_std value: -1.9453000000000003 - type: nauc_recall_at_10_diff1 value: 23.9665 - type: nauc_recall_at_20_max value: 34.0415 - type: nauc_recall_at_20_std value: 3.2483999999999997 - type: nauc_recall_at_20_diff1 value: 22.3991 - type: nauc_recall_at_100_max value: 52.1359 - type: nauc_recall_at_100_std value: 39.305299999999995 - type: nauc_recall_at_100_diff1 value: 17.8559 - type: nauc_recall_at_1000_max value: 53.5217 - type: nauc_recall_at_1000_std value: 78.536 - type: nauc_recall_at_1000_diff1 value: -24.390600000000003 - type: nauc_precision_at_1_max value: 39.480900000000005 - type: nauc_precision_at_1_std value: 4.66 - type: nauc_precision_at_1_diff1 value: 43.4568 - type: nauc_precision_at_3_max value: 38.954499999999996 - type: nauc_precision_at_3_std value: 21.0387 - type: nauc_precision_at_3_diff1 value: 4.625900000000001 - type: nauc_precision_at_5_max value: 38.8673 - type: nauc_precision_at_5_std value: 31.512800000000002 - type: nauc_precision_at_5_diff1 value: -4.147399999999999 - type: nauc_precision_at_10_max value: 32.7684 - type: nauc_precision_at_10_std value: 36.237700000000004 - type: nauc_precision_at_10_diff1 value: -13.6404 - type: nauc_precision_at_20_max value: 26.0982 - type: nauc_precision_at_20_std value: 38.5385 - type: nauc_precision_at_20_diff1 value: -16.3735 - type: nauc_precision_at_100_max value: 20.8957 - type: nauc_precision_at_100_std value: 42.1707 - type: nauc_precision_at_100_diff1 value: -18.7092 - type: nauc_precision_at_1000_max value: 17.1788 - type: nauc_precision_at_1000_std value: 39.5064 - type: nauc_precision_at_1000_diff1 value: -20.671400000000002 - type: nauc_mrr_at_1_max value: 39.480900000000005 - type: nauc_mrr_at_1_std value: 4.66 - type: nauc_mrr_at_1_diff1 value: 43.4568 - type: nauc_mrr_at_3_max value: 44.2708 - type: nauc_mrr_at_3_std value: 11.021799999999999 - type: nauc_mrr_at_3_diff1 value: 41.6187 - type: nauc_mrr_at_5_max value: 44.9277 - type: nauc_mrr_at_5_std value: 11.3479 - type: nauc_mrr_at_5_diff1 value: 41.14 - type: nauc_mrr_at_10_max value: 44.6467 - type: nauc_mrr_at_10_std value: 11.3277 - type: nauc_mrr_at_10_diff1 value: 40.5017 - type: nauc_mrr_at_20_max value: 44.298 - type: nauc_mrr_at_20_std value: 11.0061 - type: nauc_mrr_at_20_diff1 value: 40.6235 - type: nauc_mrr_at_100_max value: 44.2517 - type: nauc_mrr_at_100_std value: 10.9246 - type: nauc_mrr_at_100_diff1 value: 40.7234 - type: nauc_mrr_at_1000_max value: 44.241 - type: nauc_mrr_at_1000_std value: 10.9113 - type: nauc_mrr_at_1000_diff1 value: 40.7358 - type: main_score value: 68.05499999999999 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (de) type: miracl/mmteb-miracl config: de split: dev revision: main metrics: - type: ndcg_at_1 value: 45.574 - type: ndcg_at_3 value: 41.243 - type: ndcg_at_5 value: 43.86 - type: ndcg_at_10 value: 48.123 - type: ndcg_at_20 value: 51.785000000000004 - type: ndcg_at_100 value: 56.04900000000001 - type: ndcg_at_1000 value: 57.979 - type: map_at_1 value: 20.401 - type: map_at_3 value: 31.308000000000003 - type: map_at_5 value: 35.356 - type: map_at_10 value: 38.24 - type: map_at_20 value: 39.879 - type: map_at_100 value: 40.979 - type: map_at_1000 value: 41.103 - type: recall_at_1 value: 20.401 - type: recall_at_3 value: 36.573 - type: recall_at_5 value: 47.495 - type: recall_at_10 value: 58.779 - type: recall_at_20 value: 69.06099999999999 - type: recall_at_100 value: 85.84 - type: recall_at_1000 value: 97.36399999999999 - type: precision_at_1 value: 45.574 - type: precision_at_3 value: 30.055 - type: precision_at_5 value: 23.344 - type: precision_at_10 value: 14.754000000000001 - type: precision_at_20 value: 9.033 - type: precision_at_100 value: 2.275 - type: precision_at_1000 value: 0.258 - type: mrr_at_1 value: 45.5738 - type: mrr_at_3 value: 52.18580000000001 - type: mrr_at_5 value: 54.5628 - type: mrr_at_10 value: 55.604699999999994 - type: mrr_at_20 value: 55.9833 - type: mrr_at_100 value: 56.2015 - type: mrr_at_1000 value: 56.2431 - type: nauc_ndcg_at_1_max value: 48.355 - type: nauc_ndcg_at_1_std value: 15.508 - type: nauc_ndcg_at_1_diff1 value: 42.6569 - type: nauc_ndcg_at_3_max value: 45.5945 - type: nauc_ndcg_at_3_std value: 16.6953 - type: nauc_ndcg_at_3_diff1 value: 38.6081 - type: nauc_ndcg_at_5_max value: 43.3231 - type: nauc_ndcg_at_5_std value: 14.394100000000002 - type: nauc_ndcg_at_5_diff1 value: 38.846799999999995 - type: nauc_ndcg_at_10_max value: 44.0599 - type: nauc_ndcg_at_10_std value: 16.0584 - type: nauc_ndcg_at_10_diff1 value: 38.2432 - type: nauc_ndcg_at_20_max value: 45.8588 - type: nauc_ndcg_at_20_std value: 17.531 - type: nauc_ndcg_at_20_diff1 value: 38.982099999999996 - type: nauc_ndcg_at_100_max value: 48.7095 - type: nauc_ndcg_at_100_std value: 20.7655 - type: nauc_ndcg_at_100_diff1 value: 39.7349 - type: nauc_ndcg_at_1000_max value: 48.024499999999996 - type: nauc_ndcg_at_1000_std value: 20.1299 - type: nauc_ndcg_at_1000_diff1 value: 39.8087 - type: nauc_map_at_1_max value: 30.0998 - type: nauc_map_at_1_std value: 4.7429 - type: nauc_map_at_1_diff1 value: 45.4045 - type: nauc_map_at_3_max value: 39.053399999999996 - type: nauc_map_at_3_std value: 10.807 - type: nauc_map_at_3_diff1 value: 40.8294 - type: nauc_map_at_5_max value: 39.204499999999996 - type: nauc_map_at_5_std value: 11.5165 - type: nauc_map_at_5_diff1 value: 38.9168 - type: nauc_map_at_10_max value: 41.099799999999995 - type: nauc_map_at_10_std value: 13.758899999999999 - type: nauc_map_at_10_diff1 value: 38.2256 - type: nauc_map_at_20_max value: 42.2131 - type: nauc_map_at_20_std value: 14.366000000000001 - type: nauc_map_at_20_diff1 value: 38.572 - type: nauc_map_at_100_max value: 43.0508 - type: nauc_map_at_100_std value: 15.060100000000002 - type: nauc_map_at_100_diff1 value: 38.9831 - type: nauc_map_at_1000_max value: 43.048700000000004 - type: nauc_map_at_1000_std value: 15.085999999999999 - type: nauc_map_at_1000_diff1 value: 38.9957 - type: nauc_recall_at_1_max value: 30.0998 - type: nauc_recall_at_1_std value: 4.7429 - type: nauc_recall_at_1_diff1 value: 45.4045 - type: nauc_recall_at_3_max value: 36.9204 - type: nauc_recall_at_3_std value: 11.2734 - type: nauc_recall_at_3_diff1 value: 37.431 - type: nauc_recall_at_5_max value: 33.4392 - type: nauc_recall_at_5_std value: 9.4283 - type: nauc_recall_at_5_diff1 value: 32.7815 - type: nauc_recall_at_10_max value: 34.427099999999996 - type: nauc_recall_at_10_std value: 13.147400000000001 - type: nauc_recall_at_10_diff1 value: 29.394199999999998 - type: nauc_recall_at_20_max value: 36.8459 - type: nauc_recall_at_20_std value: 16.1323 - type: nauc_recall_at_20_diff1 value: 29.9502 - type: nauc_recall_at_100_max value: 56.360600000000005 - type: nauc_recall_at_100_std value: 40.8465 - type: nauc_recall_at_100_diff1 value: 33.2542 - type: nauc_recall_at_1000_max value: 62.121 - type: nauc_recall_at_1000_std value: 65.4518 - type: nauc_recall_at_1000_diff1 value: 23.9221 - type: nauc_precision_at_1_max value: 48.355 - type: nauc_precision_at_1_std value: 15.508 - type: nauc_precision_at_1_diff1 value: 42.6569 - type: nauc_precision_at_3_max value: 46.72 - type: nauc_precision_at_3_std value: 21.5057 - type: nauc_precision_at_3_diff1 value: 23.3313 - type: nauc_precision_at_5_max value: 39.5888 - type: nauc_precision_at_5_std value: 20.930699999999998 - type: nauc_precision_at_5_diff1 value: 15.661900000000001 - type: nauc_precision_at_10_max value: 37.8371 - type: nauc_precision_at_10_std value: 25.2882 - type: nauc_precision_at_10_diff1 value: 8.7263 - type: nauc_precision_at_20_max value: 34.7638 - type: nauc_precision_at_20_std value: 25.795800000000003 - type: nauc_precision_at_20_diff1 value: 5.5533 - type: nauc_precision_at_100_max value: 31.1513 - type: nauc_precision_at_100_std value: 28.7441 - type: nauc_precision_at_100_diff1 value: -0.2107 - type: nauc_precision_at_1000_max value: 24.329700000000003 - type: nauc_precision_at_1000_std value: 27.4593 - type: nauc_precision_at_1000_diff1 value: -5.1174 - type: nauc_mrr_at_1_max value: 48.355 - type: nauc_mrr_at_1_std value: 15.508 - type: nauc_mrr_at_1_diff1 value: 42.6569 - type: nauc_mrr_at_3_max value: 50.1901 - type: nauc_mrr_at_3_std value: 17.6811 - type: nauc_mrr_at_3_diff1 value: 42.7492 - type: nauc_mrr_at_5_max value: 50.210699999999996 - type: nauc_mrr_at_5_std value: 17.4661 - type: nauc_mrr_at_5_diff1 value: 42.9336 - type: nauc_mrr_at_10_max value: 49.9472 - type: nauc_mrr_at_10_std value: 17.3815 - type: nauc_mrr_at_10_diff1 value: 42.4177 - type: nauc_mrr_at_20_max value: 49.9918 - type: nauc_mrr_at_20_std value: 17.7321 - type: nauc_mrr_at_20_diff1 value: 42.5105 - type: nauc_mrr_at_100_max value: 49.9862 - type: nauc_mrr_at_100_std value: 17.7582 - type: nauc_mrr_at_100_diff1 value: 42.5947 - type: nauc_mrr_at_1000_max value: 49.9819 - type: nauc_mrr_at_1000_std value: 17.7188 - type: nauc_mrr_at_1000_diff1 value: 42.620000000000005 - type: main_score value: 48.123 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (en) type: miracl/mmteb-miracl config: en split: dev revision: main metrics: - type: ndcg_at_1 value: 45.556999999999995 - type: ndcg_at_3 value: 43.969 - type: ndcg_at_5 value: 45.551 - type: ndcg_at_10 value: 49.372 - type: ndcg_at_20 value: 52.86300000000001 - type: ndcg_at_100 value: 57.28 - type: ndcg_at_1000 value: 59.187 - type: map_at_1 value: 21.785 - type: map_at_3 value: 32.679 - type: map_at_5 value: 35.885 - type: map_at_10 value: 38.836 - type: map_at_20 value: 40.425 - type: map_at_100 value: 41.592 - type: map_at_1000 value: 41.749 - type: recall_at_1 value: 21.785 - type: recall_at_3 value: 40.403 - type: recall_at_5 value: 48.498999999999995 - type: recall_at_10 value: 59.513000000000005 - type: recall_at_20 value: 69.357 - type: recall_at_100 value: 85.785 - type: recall_at_1000 value: 96.041 - type: precision_at_1 value: 45.556999999999995 - type: precision_at_3 value: 30.287999999999997 - type: precision_at_5 value: 23.204 - type: precision_at_10 value: 15.006 - type: precision_at_20 value: 9.118 - type: precision_at_100 value: 2.404 - type: precision_at_1000 value: 0.27799999999999997 - type: mrr_at_1 value: 45.5569 - type: mrr_at_3 value: 55.4234 - type: mrr_at_5 value: 57.3884 - type: mrr_at_10 value: 58.391400000000004 - type: mrr_at_20 value: 58.7477 - type: mrr_at_100 value: 58.93620000000001 - type: mrr_at_1000 value: 58.949600000000004 - type: nauc_ndcg_at_1_max value: 34.794799999999995 - type: nauc_ndcg_at_1_std value: 2.102 - type: nauc_ndcg_at_1_diff1 value: 33.8113 - type: nauc_ndcg_at_3_max value: 31.6187 - type: nauc_ndcg_at_3_std value: -1.3106 - type: nauc_ndcg_at_3_diff1 value: 28.5676 - type: nauc_ndcg_at_5_max value: 30.4962 - type: nauc_ndcg_at_5_std value: -1.016 - type: nauc_ndcg_at_5_diff1 value: 28.0032 - type: nauc_ndcg_at_10_max value: 29.460900000000002 - type: nauc_ndcg_at_10_std value: -0.6328 - type: nauc_ndcg_at_10_diff1 value: 26.351000000000003 - type: nauc_ndcg_at_20_max value: 31.443900000000003 - type: nauc_ndcg_at_20_std value: 1.1067 - type: nauc_ndcg_at_20_diff1 value: 26.2068 - type: nauc_ndcg_at_100_max value: 34.273199999999996 - type: nauc_ndcg_at_100_std value: 5.1303 - type: nauc_ndcg_at_100_diff1 value: 26.4772 - type: nauc_ndcg_at_1000_max value: 34.1663 - type: nauc_ndcg_at_1000_std value: 5.1834999999999996 - type: nauc_ndcg_at_1000_diff1 value: 26.6768 - type: nauc_map_at_1_max value: 23.6327 - type: nauc_map_at_1_std value: -6.3777 - type: nauc_map_at_1_diff1 value: 32.028800000000004 - type: nauc_map_at_3_max value: 27.869300000000003 - type: nauc_map_at_3_std value: -5.9788 - type: nauc_map_at_3_diff1 value: 29.8636 - type: nauc_map_at_5_max value: 28.6043 - type: nauc_map_at_5_std value: -4.4539 - type: nauc_map_at_5_diff1 value: 29.044999999999998 - type: nauc_map_at_10_max value: 29.065600000000003 - type: nauc_map_at_10_std value: -3.2986 - type: nauc_map_at_10_diff1 value: 27.8952 - type: nauc_map_at_20_max value: 30.191200000000002 - type: nauc_map_at_20_std value: -2.4181999999999997 - type: nauc_map_at_20_diff1 value: 27.973399999999998 - type: nauc_map_at_100_max value: 31.0841 - type: nauc_map_at_100_std value: -1.1223 - type: nauc_map_at_100_diff1 value: 28.089199999999998 - type: nauc_map_at_1000_max value: 31.114399999999996 - type: nauc_map_at_1000_std value: -1.0668 - type: nauc_map_at_1000_diff1 value: 28.098 - type: nauc_recall_at_1_max value: 23.6327 - type: nauc_recall_at_1_std value: -6.3777 - type: nauc_recall_at_1_diff1 value: 32.028800000000004 - type: nauc_recall_at_3_max value: 20.9084 - type: nauc_recall_at_3_std value: -7.3713 - type: nauc_recall_at_3_diff1 value: 23.488300000000002 - type: nauc_recall_at_5_max value: 20.4249 - type: nauc_recall_at_5_std value: -3.8598 - type: nauc_recall_at_5_diff1 value: 20.935200000000002 - type: nauc_recall_at_10_max value: 17.5405 - type: nauc_recall_at_10_std value: -3.5011 - type: nauc_recall_at_10_diff1 value: 16.9646 - type: nauc_recall_at_20_max value: 20.6496 - type: nauc_recall_at_20_std value: 0.1168 - type: nauc_recall_at_20_diff1 value: 14.2125 - type: nauc_recall_at_100_max value: 31.916099999999997 - type: nauc_recall_at_100_std value: 20.2048 - type: nauc_recall_at_100_diff1 value: 9.3709 - type: nauc_recall_at_1000_max value: 46.2569 - type: nauc_recall_at_1000_std value: 55.2292 - type: nauc_recall_at_1000_diff1 value: -0.2909 - type: nauc_precision_at_1_max value: 34.794799999999995 - type: nauc_precision_at_1_std value: 2.102 - type: nauc_precision_at_1_diff1 value: 33.8113 - type: nauc_precision_at_3_max value: 31.221700000000002 - type: nauc_precision_at_3_std value: 7.513 - type: nauc_precision_at_3_diff1 value: 15.9311 - type: nauc_precision_at_5_max value: 28.5241 - type: nauc_precision_at_5_std value: 12.2286 - type: nauc_precision_at_5_diff1 value: 9.5435 - type: nauc_precision_at_10_max value: 24.3663 - type: nauc_precision_at_10_std value: 15.867700000000001 - type: nauc_precision_at_10_diff1 value: 2.396 - type: nauc_precision_at_20_max value: 22.322300000000002 - type: nauc_precision_at_20_std value: 18.3505 - type: nauc_precision_at_20_diff1 value: 0.0719 - type: nauc_precision_at_100_max value: 18.8029 - type: nauc_precision_at_100_std value: 24.728 - type: nauc_precision_at_100_diff1 value: -4.0887 - type: nauc_precision_at_1000_max value: 12.315800000000001 - type: nauc_precision_at_1000_std value: 20.9058 - type: nauc_precision_at_1000_diff1 value: -6.4069 - type: nauc_mrr_at_1_max value: 34.794799999999995 - type: nauc_mrr_at_1_std value: 2.102 - type: nauc_mrr_at_1_diff1 value: 33.8113 - type: nauc_mrr_at_3_max value: 33.3929 - type: nauc_mrr_at_3_std value: 3.4512 - type: nauc_mrr_at_3_diff1 value: 29.718 - type: nauc_mrr_at_5_max value: 34.586 - type: nauc_mrr_at_5_std value: 5.4722 - type: nauc_mrr_at_5_diff1 value: 30.0744 - type: nauc_mrr_at_10_max value: 34.3898 - type: nauc_mrr_at_10_std value: 4.854 - type: nauc_mrr_at_10_diff1 value: 29.979 - type: nauc_mrr_at_20_max value: 34.516000000000005 - type: nauc_mrr_at_20_std value: 4.9616 - type: nauc_mrr_at_20_diff1 value: 29.907899999999998 - type: nauc_mrr_at_100_max value: 34.515499999999996 - type: nauc_mrr_at_100_std value: 4.8578 - type: nauc_mrr_at_100_diff1 value: 29.997 - type: nauc_mrr_at_1000_max value: 34.5046 - type: nauc_mrr_at_1000_std value: 4.8536 - type: nauc_mrr_at_1000_diff1 value: 30.0019 - type: main_score value: 49.372 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (es) type: miracl/mmteb-miracl config: es split: dev revision: main metrics: - type: ndcg_at_1 value: 55.71 - type: ndcg_at_3 value: 47.981 - type: ndcg_at_5 value: 46.583999999999996 - type: ndcg_at_10 value: 49.688 - type: ndcg_at_20 value: 54.437999999999995 - type: ndcg_at_100 value: 60.492999999999995 - type: ndcg_at_1000 value: 62.922 - type: map_at_1 value: 16.38 - type: map_at_3 value: 27.137 - type: map_at_5 value: 31.81 - type: map_at_10 value: 36.986999999999995 - type: map_at_20 value: 39.749 - type: map_at_100 value: 41.69 - type: map_at_1000 value: 41.924 - type: recall_at_1 value: 16.38 - type: recall_at_3 value: 31.502999999999997 - type: recall_at_5 value: 40.355999999999995 - type: recall_at_10 value: 54.155 - type: recall_at_20 value: 65.32900000000001 - type: recall_at_100 value: 85.136 - type: recall_at_1000 value: 96.951 - type: precision_at_1 value: 55.71 - type: precision_at_3 value: 39.969 - type: precision_at_5 value: 32.469 - type: precision_at_10 value: 23.071 - type: precision_at_20 value: 14.482999999999999 - type: precision_at_100 value: 3.8920000000000003 - type: precision_at_1000 value: 0.44799999999999995 - type: mrr_at_1 value: 55.709900000000005 - type: mrr_at_3 value: 63.9146 - type: mrr_at_5 value: 65.4192 - type: mrr_at_10 value: 66.4602 - type: mrr_at_20 value: 66.71249999999999 - type: mrr_at_100 value: 66.8844 - type: mrr_at_1000 value: 66.893 - type: nauc_ndcg_at_1_max value: 39.4623 - type: nauc_ndcg_at_1_std value: 18.2237 - type: nauc_ndcg_at_1_diff1 value: 34.3382 - type: nauc_ndcg_at_3_max value: 33.3518 - type: nauc_ndcg_at_3_std value: 14.2885 - type: nauc_ndcg_at_3_diff1 value: 22.4965 - type: nauc_ndcg_at_5_max value: 31.5822 - type: nauc_ndcg_at_5_std value: 10.4064 - type: nauc_ndcg_at_5_diff1 value: 24.4417 - type: nauc_ndcg_at_10_max value: 33.4838 - type: nauc_ndcg_at_10_std value: 11.5351 - type: nauc_ndcg_at_10_diff1 value: 27.1137 - type: nauc_ndcg_at_20_max value: 38.831700000000005 - type: nauc_ndcg_at_20_std value: 18.784 - type: nauc_ndcg_at_20_diff1 value: 27.408700000000003 - type: nauc_ndcg_at_100_max value: 42.8785 - type: nauc_ndcg_at_100_std value: 24.596 - type: nauc_ndcg_at_100_diff1 value: 25.8252 - type: nauc_ndcg_at_1000_max value: 42.023500000000006 - type: nauc_ndcg_at_1000_std value: 23.2727 - type: nauc_ndcg_at_1000_diff1 value: 24.8455 - type: nauc_map_at_1_max value: 10.5243 - type: nauc_map_at_1_std value: -10.143699999999999 - type: nauc_map_at_1_diff1 value: 32.2699 - type: nauc_map_at_3_max value: 16.902900000000002 - type: nauc_map_at_3_std value: -5.6548 - type: nauc_map_at_3_diff1 value: 26.238699999999998 - type: nauc_map_at_5_max value: 21.4475 - type: nauc_map_at_5_std value: -2.1950000000000003 - type: nauc_map_at_5_diff1 value: 25.2077 - type: nauc_map_at_10_max value: 27.2231 - type: nauc_map_at_10_std value: 3.9522000000000004 - type: nauc_map_at_10_diff1 value: 26.0175 - type: nauc_map_at_20_max value: 30.8106 - type: nauc_map_at_20_std value: 8.9534 - type: nauc_map_at_20_diff1 value: 25.8477 - type: nauc_map_at_100_max value: 32.5864 - type: nauc_map_at_100_std value: 11.2878 - type: nauc_map_at_100_diff1 value: 25.3496 - type: nauc_map_at_1000_max value: 32.573 - type: nauc_map_at_1000_std value: 11.2812 - type: nauc_map_at_1000_diff1 value: 25.2334 - type: nauc_recall_at_1_max value: 10.5243 - type: nauc_recall_at_1_std value: -10.143699999999999 - type: nauc_recall_at_1_diff1 value: 32.2699 - type: nauc_recall_at_3_max value: 12.1019 - type: nauc_recall_at_3_std value: -8.2304 - type: nauc_recall_at_3_diff1 value: 22.9436 - type: nauc_recall_at_5_max value: 15.0438 - type: nauc_recall_at_5_std value: -6.216200000000001 - type: nauc_recall_at_5_diff1 value: 21.5158 - type: nauc_recall_at_10_max value: 22.825100000000003 - type: nauc_recall_at_10_std value: 4.994400000000001 - type: nauc_recall_at_10_diff1 value: 22.4346 - type: nauc_recall_at_20_max value: 33.1395 - type: nauc_recall_at_20_std value: 19.5456 - type: nauc_recall_at_20_diff1 value: 24.0575 - type: nauc_recall_at_100_max value: 50.0911 - type: nauc_recall_at_100_std value: 45.542300000000004 - type: nauc_recall_at_100_diff1 value: 19.9322 - type: nauc_recall_at_1000_max value: 73.2055 - type: nauc_recall_at_1000_std value: 74.8121 - type: nauc_recall_at_1000_diff1 value: 6.7021999999999995 - type: nauc_precision_at_1_max value: 39.4623 - type: nauc_precision_at_1_std value: 18.2237 - type: nauc_precision_at_1_diff1 value: 34.3382 - type: nauc_precision_at_3_max value: 37.2684 - type: nauc_precision_at_3_std value: 24.1559 - type: nauc_precision_at_3_diff1 value: 10.6349 - type: nauc_precision_at_5_max value: 37.9483 - type: nauc_precision_at_5_std value: 26.973000000000003 - type: nauc_precision_at_5_diff1 value: 6.722499999999999 - type: nauc_precision_at_10_max value: 41.4223 - type: nauc_precision_at_10_std value: 35.661100000000005 - type: nauc_precision_at_10_diff1 value: 3.8463 - type: nauc_precision_at_20_max value: 41.917300000000004 - type: nauc_precision_at_20_std value: 42.0563 - type: nauc_precision_at_20_diff1 value: 0.4484 - type: nauc_precision_at_100_max value: 37.4895 - type: nauc_precision_at_100_std value: 45.1734 - type: nauc_precision_at_100_diff1 value: -7.4965 - type: nauc_precision_at_1000_max value: 27.853299999999997 - type: nauc_precision_at_1000_std value: 36.997 - type: nauc_precision_at_1000_diff1 value: -13.5956 - type: nauc_mrr_at_1_max value: 39.4623 - type: nauc_mrr_at_1_std value: 18.2237 - type: nauc_mrr_at_1_diff1 value: 34.3382 - type: nauc_mrr_at_3_max value: 43.2341 - type: nauc_mrr_at_3_std value: 22.287599999999998 - type: nauc_mrr_at_3_diff1 value: 32.1338 - type: nauc_mrr_at_5_max value: 43.1729 - type: nauc_mrr_at_5_std value: 21.9232 - type: nauc_mrr_at_5_diff1 value: 32.0241 - type: nauc_mrr_at_10_max value: 43.8014 - type: nauc_mrr_at_10_std value: 23.1591 - type: nauc_mrr_at_10_diff1 value: 31.898100000000003 - type: nauc_mrr_at_20_max value: 43.7825 - type: nauc_mrr_at_20_std value: 23.1845 - type: nauc_mrr_at_20_diff1 value: 32.2338 - type: nauc_mrr_at_100_max value: 43.6665 - type: nauc_mrr_at_100_std value: 23.0026 - type: nauc_mrr_at_100_diff1 value: 32.177299999999995 - type: nauc_mrr_at_1000_max value: 43.6579 - type: nauc_mrr_at_1000_std value: 22.986500000000003 - type: nauc_mrr_at_1000_diff1 value: 32.1927 - type: main_score value: 49.688 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (fa) type: miracl/mmteb-miracl config: fa split: dev revision: main metrics: - type: ndcg_at_1 value: 39.873 - type: ndcg_at_3 value: 42.738 - type: ndcg_at_5 value: 45.843 - type: ndcg_at_10 value: 50.226000000000006 - type: ndcg_at_20 value: 52.92 - type: ndcg_at_100 value: 56.516999999999996 - type: ndcg_at_1000 value: 57.967 - type: map_at_1 value: 25.369000000000003 - type: map_at_3 value: 35.791000000000004 - type: map_at_5 value: 39.027 - type: map_at_10 value: 41.759 - type: map_at_20 value: 42.899 - type: map_at_100 value: 43.637 - type: map_at_1000 value: 43.734 - type: recall_at_1 value: 25.369000000000003 - type: recall_at_3 value: 43.808 - type: recall_at_5 value: 52.378 - type: recall_at_10 value: 63.775999999999996 - type: recall_at_20 value: 72.099 - type: recall_at_100 value: 87.68599999999999 - type: recall_at_1000 value: 96.71 - type: precision_at_1 value: 39.873 - type: precision_at_3 value: 25.580000000000002 - type: precision_at_5 value: 19.367 - type: precision_at_10 value: 12.437 - type: precision_at_20 value: 7.247000000000001 - type: precision_at_100 value: 1.807 - type: precision_at_1000 value: 0.202 - type: mrr_at_1 value: 39.8734 - type: mrr_at_3 value: 49.1297 - type: mrr_at_5 value: 50.8703 - type: mrr_at_10 value: 52.0393 - type: mrr_at_20 value: 52.428 - type: mrr_at_100 value: 52.7259 - type: mrr_at_1000 value: 52.7512 - type: nauc_ndcg_at_1_max value: 37.2005 - type: nauc_ndcg_at_1_std value: 7.2856000000000005 - type: nauc_ndcg_at_1_diff1 value: 24.3391 - type: nauc_ndcg_at_3_max value: 34.9919 - type: nauc_ndcg_at_3_std value: 4.1377 - type: nauc_ndcg_at_3_diff1 value: 22.7251 - type: nauc_ndcg_at_5_max value: 35.3802 - type: nauc_ndcg_at_5_std value: 5.1718 - type: nauc_ndcg_at_5_diff1 value: 20.7966 - type: nauc_ndcg_at_10_max value: 37.5244 - type: nauc_ndcg_at_10_std value: 8.4159 - type: nauc_ndcg_at_10_diff1 value: 20.3825 - type: nauc_ndcg_at_20_max value: 39.457 - type: nauc_ndcg_at_20_std value: 10.9359 - type: nauc_ndcg_at_20_diff1 value: 20.1633 - type: nauc_ndcg_at_100_max value: 40.605799999999995 - type: nauc_ndcg_at_100_std value: 12.8063 - type: nauc_ndcg_at_100_diff1 value: 20.1186 - type: nauc_ndcg_at_1000_max value: 39.6952 - type: nauc_ndcg_at_1000_std value: 12.0795 - type: nauc_ndcg_at_1000_diff1 value: 20.1048 - type: nauc_map_at_1_max value: 22.758200000000002 - type: nauc_map_at_1_std value: -4.4208 - type: nauc_map_at_1_diff1 value: 32.8042 - type: nauc_map_at_3_max value: 29.5871 - type: nauc_map_at_3_std value: -1.0369 - type: nauc_map_at_3_diff1 value: 26.7399 - type: nauc_map_at_5_max value: 31.630799999999997 - type: nauc_map_at_5_std value: 1.133 - type: nauc_map_at_5_diff1 value: 23.9264 - type: nauc_map_at_10_max value: 33.5866 - type: nauc_map_at_10_std value: 3.8602999999999996 - type: nauc_map_at_10_diff1 value: 23.0431 - type: nauc_map_at_20_max value: 34.7099 - type: nauc_map_at_20_std value: 5.2187 - type: nauc_map_at_20_diff1 value: 22.751099999999997 - type: nauc_map_at_100_max value: 35.0549 - type: nauc_map_at_100_std value: 5.7357 - type: nauc_map_at_100_diff1 value: 22.7261 - type: nauc_map_at_1000_max value: 35.02 - type: nauc_map_at_1000_std value: 5.7542 - type: nauc_map_at_1000_diff1 value: 22.717000000000002 - type: nauc_recall_at_1_max value: 22.758200000000002 - type: nauc_recall_at_1_std value: -4.4208 - type: nauc_recall_at_1_diff1 value: 32.8042 - type: nauc_recall_at_3_max value: 29.2098 - type: nauc_recall_at_3_std value: 0.1884 - type: nauc_recall_at_3_diff1 value: 21.9167 - type: nauc_recall_at_5_max value: 30.634099999999997 - type: nauc_recall_at_5_std value: 2.9632 - type: nauc_recall_at_5_diff1 value: 15.8588 - type: nauc_recall_at_10_max value: 34.958 - type: nauc_recall_at_10_std value: 10.6769 - type: nauc_recall_at_10_diff1 value: 13.9022 - type: nauc_recall_at_20_max value: 40.5569 - type: nauc_recall_at_20_std value: 18.1782 - type: nauc_recall_at_20_diff1 value: 13.4488 - type: nauc_recall_at_100_max value: 54.6126 - type: nauc_recall_at_100_std value: 39.507999999999996 - type: nauc_recall_at_100_diff1 value: 10.122 - type: nauc_recall_at_1000_max value: 64.1019 - type: nauc_recall_at_1000_std value: 65.3022 - type: nauc_recall_at_1000_diff1 value: -0.9008 - type: nauc_precision_at_1_max value: 37.2005 - type: nauc_precision_at_1_std value: 7.2856000000000005 - type: nauc_precision_at_1_diff1 value: 24.3391 - type: nauc_precision_at_3_max value: 40.8492 - type: nauc_precision_at_3_std value: 14.955099999999998 - type: nauc_precision_at_3_diff1 value: 5.8083 - type: nauc_precision_at_5_max value: 37.6411 - type: nauc_precision_at_5_std value: 20.1371 - type: nauc_precision_at_5_diff1 value: -4.7182 - type: nauc_precision_at_10_max value: 35.9345 - type: nauc_precision_at_10_std value: 27.593899999999998 - type: nauc_precision_at_10_diff1 value: -9.1429 - type: nauc_precision_at_20_max value: 33.7364 - type: nauc_precision_at_20_std value: 31.8223 - type: nauc_precision_at_20_diff1 value: -11.98 - type: nauc_precision_at_100_max value: 25.7037 - type: nauc_precision_at_100_std value: 32.6954 - type: nauc_precision_at_100_diff1 value: -15.2838 - type: nauc_precision_at_1000_max value: 16.6881 - type: nauc_precision_at_1000_std value: 27.787200000000002 - type: nauc_precision_at_1000_diff1 value: -16.964000000000002 - type: nauc_mrr_at_1_max value: 37.2005 - type: nauc_mrr_at_1_std value: 7.2856000000000005 - type: nauc_mrr_at_1_diff1 value: 24.3391 - type: nauc_mrr_at_3_max value: 40.9867 - type: nauc_mrr_at_3_std value: 10.7794 - type: nauc_mrr_at_3_diff1 value: 21.0522 - type: nauc_mrr_at_5_max value: 40.7712 - type: nauc_mrr_at_5_std value: 11.2036 - type: nauc_mrr_at_5_diff1 value: 20.3769 - type: nauc_mrr_at_10_max value: 40.8976 - type: nauc_mrr_at_10_std value: 11.7276 - type: nauc_mrr_at_10_diff1 value: 20.261699999999998 - type: nauc_mrr_at_20_max value: 40.8283 - type: nauc_mrr_at_20_std value: 11.6606 - type: nauc_mrr_at_20_diff1 value: 20.430300000000003 - type: nauc_mrr_at_100_max value: 40.9123 - type: nauc_mrr_at_100_std value: 11.6937 - type: nauc_mrr_at_100_diff1 value: 20.4759 - type: nauc_mrr_at_1000_max value: 40.895399999999995 - type: nauc_mrr_at_1000_std value: 11.6648 - type: nauc_mrr_at_1000_diff1 value: 20.4831 - type: main_score value: 50.226000000000006 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (fi) type: miracl/mmteb-miracl config: fi split: dev revision: main metrics: - type: ndcg_at_1 value: 60.818000000000005 - type: ndcg_at_3 value: 60.06 - type: ndcg_at_5 value: 63.842 - type: ndcg_at_10 value: 67.46 - type: ndcg_at_20 value: 69.692 - type: ndcg_at_100 value: 71.516 - type: ndcg_at_1000 value: 72.18 - type: map_at_1 value: 39.263999999999996 - type: map_at_3 value: 53.723 - type: map_at_5 value: 57.118 - type: map_at_10 value: 59.394000000000005 - type: map_at_20 value: 60.339 - type: map_at_100 value: 60.739 - type: map_at_1000 value: 60.782000000000004 - type: recall_at_1 value: 39.263999999999996 - type: recall_at_3 value: 61.05500000000001 - type: recall_at_5 value: 69.774 - type: recall_at_10 value: 78.577 - type: recall_at_20 value: 85.435 - type: recall_at_100 value: 93.291 - type: recall_at_1000 value: 97.493 - type: precision_at_1 value: 60.818000000000005 - type: precision_at_3 value: 35.064 - type: precision_at_5 value: 24.815 - type: precision_at_10 value: 14.445 - type: precision_at_20 value: 8.049000000000001 - type: precision_at_100 value: 1.7819999999999998 - type: precision_at_1000 value: 0.187 - type: mrr_at_1 value: 60.8183 - type: mrr_at_3 value: 68.7516 - type: mrr_at_5 value: 70.1678 - type: mrr_at_10 value: 70.85040000000001 - type: mrr_at_20 value: 71.1314 - type: mrr_at_100 value: 71.2271 - type: mrr_at_1000 value: 71.2334 - type: nauc_ndcg_at_1_max value: 39.623000000000005 - type: nauc_ndcg_at_1_std value: -0.6057 - type: nauc_ndcg_at_1_diff1 value: 50.2688 - type: nauc_ndcg_at_3_max value: 36.2982 - type: nauc_ndcg_at_3_std value: -0.4931 - type: nauc_ndcg_at_3_diff1 value: 41.5229 - type: nauc_ndcg_at_5_max value: 37.1813 - type: nauc_ndcg_at_5_std value: -1.1114000000000002 - type: nauc_ndcg_at_5_diff1 value: 41.429700000000004 - type: nauc_ndcg_at_10_max value: 39.3656 - type: nauc_ndcg_at_10_std value: 0.2202 - type: nauc_ndcg_at_10_diff1 value: 41.4453 - type: nauc_ndcg_at_20_max value: 40.186 - type: nauc_ndcg_at_20_std value: 2.8166 - type: nauc_ndcg_at_20_diff1 value: 41.0657 - type: nauc_ndcg_at_100_max value: 40.2423 - type: nauc_ndcg_at_100_std value: 4.5445 - type: nauc_ndcg_at_100_diff1 value: 42.1274 - type: nauc_ndcg_at_1000_max value: 39.821200000000005 - type: nauc_ndcg_at_1000_std value: 3.71 - type: nauc_ndcg_at_1000_diff1 value: 42.2532 - type: nauc_map_at_1_max value: 25.539 - type: nauc_map_at_1_std value: -7.6318 - type: nauc_map_at_1_diff1 value: 47.2875 - type: nauc_map_at_3_max value: 33.5096 - type: nauc_map_at_3_std value: -3.4685 - type: nauc_map_at_3_diff1 value: 41.2351 - type: nauc_map_at_5_max value: 35.0144 - type: nauc_map_at_5_std value: -2.9198999999999997 - type: nauc_map_at_5_diff1 value: 40.892 - type: nauc_map_at_10_max value: 36.4497 - type: nauc_map_at_10_std value: -1.8148999999999997 - type: nauc_map_at_10_diff1 value: 40.823100000000004 - type: nauc_map_at_20_max value: 36.863 - type: nauc_map_at_20_std value: -0.7572 - type: nauc_map_at_20_diff1 value: 40.6285 - type: nauc_map_at_100_max value: 36.882 - type: nauc_map_at_100_std value: -0.40850000000000003 - type: nauc_map_at_100_diff1 value: 40.844500000000004 - type: nauc_map_at_1000_max value: 36.8736 - type: nauc_map_at_1000_std value: -0.4359 - type: nauc_map_at_1000_diff1 value: 40.8569 - type: nauc_recall_at_1_max value: 25.539 - type: nauc_recall_at_1_std value: -7.6318 - type: nauc_recall_at_1_diff1 value: 47.2875 - type: nauc_recall_at_3_max value: 32.7716 - type: nauc_recall_at_3_std value: -1.6856 - type: nauc_recall_at_3_diff1 value: 36.4533 - type: nauc_recall_at_5_max value: 33.5681 - type: nauc_recall_at_5_std value: -2.4453 - type: nauc_recall_at_5_diff1 value: 33.8472 - type: nauc_recall_at_10_max value: 39.5319 - type: nauc_recall_at_10_std value: 0.6228 - type: nauc_recall_at_10_diff1 value: 31.935200000000002 - type: nauc_recall_at_20_max value: 44.3495 - type: nauc_recall_at_20_std value: 12.5445 - type: nauc_recall_at_20_diff1 value: 27.6315 - type: nauc_recall_at_100_max value: 53.924499999999995 - type: nauc_recall_at_100_std value: 44.5927 - type: nauc_recall_at_100_diff1 value: 32.2776 - type: nauc_recall_at_1000_max value: 59.7088 - type: nauc_recall_at_1000_std value: 61.6974 - type: nauc_recall_at_1000_diff1 value: 28.367700000000003 - type: nauc_precision_at_1_max value: 39.623000000000005 - type: nauc_precision_at_1_std value: -0.6057 - type: nauc_precision_at_1_diff1 value: 50.2688 - type: nauc_precision_at_3_max value: 29.5187 - type: nauc_precision_at_3_std value: 11.1305 - type: nauc_precision_at_3_diff1 value: 11.674 - type: nauc_precision_at_5_max value: 25.5889 - type: nauc_precision_at_5_std value: 13.4716 - type: nauc_precision_at_5_diff1 value: 3.2894 - type: nauc_precision_at_10_max value: 21.2446 - type: nauc_precision_at_10_std value: 15.7787 - type: nauc_precision_at_10_diff1 value: -4.0968 - type: nauc_precision_at_20_max value: 15.9944 - type: nauc_precision_at_20_std value: 22.4212 - type: nauc_precision_at_20_diff1 value: -11.3771 - type: nauc_precision_at_100_max value: 8.592600000000001 - type: nauc_precision_at_100_std value: 26.4342 - type: nauc_precision_at_100_diff1 value: -15.402 - type: nauc_precision_at_1000_max value: 2.8388 - type: nauc_precision_at_1000_std value: 23.2317 - type: nauc_precision_at_1000_diff1 value: -19.1173 - type: nauc_mrr_at_1_max value: 39.623000000000005 - type: nauc_mrr_at_1_std value: -0.6057 - type: nauc_mrr_at_1_diff1 value: 50.2688 - type: nauc_mrr_at_3_max value: 41.694199999999995 - type: nauc_mrr_at_3_std value: 2.5751 - type: nauc_mrr_at_3_diff1 value: 48.6111 - type: nauc_mrr_at_5_max value: 41.5674 - type: nauc_mrr_at_5_std value: 2.7312 - type: nauc_mrr_at_5_diff1 value: 48.6988 - type: nauc_mrr_at_10_max value: 41.7364 - type: nauc_mrr_at_10_std value: 2.5787 - type: nauc_mrr_at_10_diff1 value: 48.5842 - type: nauc_mrr_at_20_max value: 41.7509 - type: nauc_mrr_at_20_std value: 2.6837 - type: nauc_mrr_at_20_diff1 value: 48.7196 - type: nauc_mrr_at_100_max value: 41.6895 - type: nauc_mrr_at_100_std value: 2.6545 - type: nauc_mrr_at_100_diff1 value: 48.7483 - type: nauc_mrr_at_1000_max value: 41.6849 - type: nauc_mrr_at_1000_std value: 2.6379 - type: nauc_mrr_at_1000_diff1 value: 48.753600000000006 - type: main_score value: 67.46 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (fr) type: miracl/mmteb-miracl config: fr split: dev revision: main metrics: - type: ndcg_at_1 value: 39.65 - type: ndcg_at_3 value: 39.843 - type: ndcg_at_5 value: 44.416 - type: ndcg_at_10 value: 49.891000000000005 - type: ndcg_at_20 value: 53.163000000000004 - type: ndcg_at_100 value: 56.492 - type: ndcg_at_1000 value: 57.837 - type: map_at_1 value: 22.644000000000002 - type: map_at_3 value: 33.021 - type: map_at_5 value: 36.958 - type: map_at_10 value: 39.967999999999996 - type: map_at_20 value: 41.298 - type: map_at_100 value: 42.03 - type: map_at_1000 value: 42.119 - type: recall_at_1 value: 22.644000000000002 - type: recall_at_3 value: 39.798 - type: recall_at_5 value: 51.001 - type: recall_at_10 value: 65.169 - type: recall_at_20 value: 75.33800000000001 - type: recall_at_100 value: 89.786 - type: recall_at_1000 value: 98.08099999999999 - type: precision_at_1 value: 39.65 - type: precision_at_3 value: 25.656000000000002 - type: precision_at_5 value: 20.175 - type: precision_at_10 value: 13.120000000000001 - type: precision_at_20 value: 7.7410000000000005 - type: precision_at_100 value: 1.883 - type: precision_at_1000 value: 0.208 - type: mrr_at_1 value: 39.6501 - type: mrr_at_3 value: 48.7366 - type: mrr_at_5 value: 50.9961 - type: mrr_at_10 value: 52.659 - type: mrr_at_20 value: 53.0856 - type: mrr_at_100 value: 53.273199999999996 - type: mrr_at_1000 value: 53.2931 - type: nauc_ndcg_at_1_max value: 29.1135 - type: nauc_ndcg_at_1_std value: 13.9561 - type: nauc_ndcg_at_1_diff1 value: 28.410400000000003 - type: nauc_ndcg_at_3_max value: 29.0117 - type: nauc_ndcg_at_3_std value: 15.655 - type: nauc_ndcg_at_3_diff1 value: 19.7043 - type: nauc_ndcg_at_5_max value: 31.3257 - type: nauc_ndcg_at_5_std value: 17.4096 - type: nauc_ndcg_at_5_diff1 value: 20.5295 - type: nauc_ndcg_at_10_max value: 33.244 - type: nauc_ndcg_at_10_std value: 18.8436 - type: nauc_ndcg_at_10_diff1 value: 17.9986 - type: nauc_ndcg_at_20_max value: 35.0697 - type: nauc_ndcg_at_20_std value: 19.84 - type: nauc_ndcg_at_20_diff1 value: 19.611600000000003 - type: nauc_ndcg_at_100_max value: 34.7837 - type: nauc_ndcg_at_100_std value: 22.2762 - type: nauc_ndcg_at_100_diff1 value: 19.3138 - type: nauc_ndcg_at_1000_max value: 34.4487 - type: nauc_ndcg_at_1000_std value: 20.8402 - type: nauc_ndcg_at_1000_diff1 value: 20.2691 - type: nauc_map_at_1_max value: 20.247200000000003 - type: nauc_map_at_1_std value: 8.8046 - type: nauc_map_at_1_diff1 value: 27.227600000000002 - type: nauc_map_at_3_max value: 26.7076 - type: nauc_map_at_3_std value: 13.7464 - type: nauc_map_at_3_diff1 value: 21.1266 - type: nauc_map_at_5_max value: 28.777399999999997 - type: nauc_map_at_5_std value: 15.348400000000002 - type: nauc_map_at_5_diff1 value: 21.4282 - type: nauc_map_at_10_max value: 29.907600000000002 - type: nauc_map_at_10_std value: 16.3636 - type: nauc_map_at_10_diff1 value: 20.1957 - type: nauc_map_at_20_max value: 30.864399999999996 - type: nauc_map_at_20_std value: 16.936999999999998 - type: nauc_map_at_20_diff1 value: 20.8871 - type: nauc_map_at_100_max value: 30.998900000000003 - type: nauc_map_at_100_std value: 17.673 - type: nauc_map_at_100_diff1 value: 20.7773 - type: nauc_map_at_1000_max value: 31.0185 - type: nauc_map_at_1000_std value: 17.6212 - type: nauc_map_at_1000_diff1 value: 20.846700000000002 - type: nauc_recall_at_1_max value: 20.247200000000003 - type: nauc_recall_at_1_std value: 8.8046 - type: nauc_recall_at_1_diff1 value: 27.227600000000002 - type: nauc_recall_at_3_max value: 25.074600000000004 - type: nauc_recall_at_3_std value: 14.0657 - type: nauc_recall_at_3_diff1 value: 14.7258 - type: nauc_recall_at_5_max value: 29.442899999999998 - type: nauc_recall_at_5_std value: 16.2404 - type: nauc_recall_at_5_diff1 value: 15.4134 - type: nauc_recall_at_10_max value: 33.5052 - type: nauc_recall_at_10_std value: 19.417 - type: nauc_recall_at_10_diff1 value: 7.933700000000001 - type: nauc_recall_at_20_max value: 40.2402 - type: nauc_recall_at_20_std value: 22.7218 - type: nauc_recall_at_20_diff1 value: 11.777600000000001 - type: nauc_recall_at_100_max value: 44.4613 - type: nauc_recall_at_100_std value: 52.5751 - type: nauc_recall_at_100_diff1 value: 5.1827 - type: nauc_recall_at_1000_max value: 80.4059 - type: nauc_recall_at_1000_std value: 82.2582 - type: nauc_recall_at_1000_diff1 value: 37.9332 - type: nauc_precision_at_1_max value: 29.1135 - type: nauc_precision_at_1_std value: 13.9561 - type: nauc_precision_at_1_diff1 value: 28.410400000000003 - type: nauc_precision_at_3_max value: 32.4031 - type: nauc_precision_at_3_std value: 21.222099999999998 - type: nauc_precision_at_3_diff1 value: 9.2426 - type: nauc_precision_at_5_max value: 31.372600000000002 - type: nauc_precision_at_5_std value: 22.4259 - type: nauc_precision_at_5_diff1 value: 7.199 - type: nauc_precision_at_10_max value: 29.5298 - type: nauc_precision_at_10_std value: 22.183 - type: nauc_precision_at_10_diff1 value: -1.2202 - type: nauc_precision_at_20_max value: 28.1874 - type: nauc_precision_at_20_std value: 21.7393 - type: nauc_precision_at_20_diff1 value: 0.2774 - type: nauc_precision_at_100_max value: 18.2122 - type: nauc_precision_at_100_std value: 21.566 - type: nauc_precision_at_100_diff1 value: -5.8792 - type: nauc_precision_at_1000_max value: 11.3258 - type: nauc_precision_at_1000_std value: 12.261700000000001 - type: nauc_precision_at_1000_diff1 value: -5.8514 - type: nauc_mrr_at_1_max value: 29.1135 - type: nauc_mrr_at_1_std value: 13.9561 - type: nauc_mrr_at_1_diff1 value: 28.410400000000003 - type: nauc_mrr_at_3_max value: 30.904999999999998 - type: nauc_mrr_at_3_std value: 16.5695 - type: nauc_mrr_at_3_diff1 value: 22.555 - type: nauc_mrr_at_5_max value: 32.408 - type: nauc_mrr_at_5_std value: 17.7334 - type: nauc_mrr_at_5_diff1 value: 22.912399999999998 - type: nauc_mrr_at_10_max value: 33.069500000000005 - type: nauc_mrr_at_10_std value: 17.8731 - type: nauc_mrr_at_10_diff1 value: 22.270300000000002 - type: nauc_mrr_at_20_max value: 33.062000000000005 - type: nauc_mrr_at_20_std value: 17.8293 - type: nauc_mrr_at_20_diff1 value: 22.5118 - type: nauc_mrr_at_100_max value: 32.9394 - type: nauc_mrr_at_100_std value: 17.7815 - type: nauc_mrr_at_100_diff1 value: 22.676199999999998 - type: nauc_mrr_at_1000_max value: 32.9188 - type: nauc_mrr_at_1000_std value: 17.7435 - type: nauc_mrr_at_1000_diff1 value: 22.6855 - type: main_score value: 49.891000000000005 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (hi) type: miracl/mmteb-miracl config: hi split: dev revision: main metrics: - type: ndcg_at_1 value: 36.857 - type: ndcg_at_3 value: 39.469 - type: ndcg_at_5 value: 41.839999999999996 - type: ndcg_at_10 value: 46.141 - type: ndcg_at_20 value: 49.384 - type: ndcg_at_100 value: 52.565 - type: ndcg_at_1000 value: 54.318999999999996 - type: map_at_1 value: 20.185 - type: map_at_3 value: 30.9 - type: map_at_5 value: 34.311 - type: map_at_10 value: 37.074 - type: map_at_20 value: 38.493 - type: map_at_100 value: 39.174 - type: map_at_1000 value: 39.269 - type: recall_at_1 value: 20.185 - type: recall_at_3 value: 38.993 - type: recall_at_5 value: 47.881 - type: recall_at_10 value: 59.474000000000004 - type: recall_at_20 value: 69.437 - type: recall_at_100 value: 83.38499999999999 - type: recall_at_1000 value: 94.813 - type: precision_at_1 value: 36.857 - type: precision_at_3 value: 26.19 - type: precision_at_5 value: 19.829 - type: precision_at_10 value: 12.543000000000001 - type: precision_at_20 value: 7.542999999999999 - type: precision_at_100 value: 1.8030000000000002 - type: precision_at_1000 value: 0.20500000000000002 - type: mrr_at_1 value: 36.857099999999996 - type: mrr_at_3 value: 46.5238 - type: mrr_at_5 value: 47.9952 - type: mrr_at_10 value: 49.331399999999995 - type: mrr_at_20 value: 49.8255 - type: mrr_at_100 value: 50.0575 - type: mrr_at_1000 value: 50.097 - type: nauc_ndcg_at_1_max value: 42.226200000000006 - type: nauc_ndcg_at_1_std value: 4.0359 - type: nauc_ndcg_at_1_diff1 value: 41.728500000000004 - type: nauc_ndcg_at_3_max value: 37.5731 - type: nauc_ndcg_at_3_std value: 7.4824 - type: nauc_ndcg_at_3_diff1 value: 25.607499999999998 - type: nauc_ndcg_at_5_max value: 36.1243 - type: nauc_ndcg_at_5_std value: 6.7822 - type: nauc_ndcg_at_5_diff1 value: 26.4955 - type: nauc_ndcg_at_10_max value: 38.8673 - type: nauc_ndcg_at_10_std value: 9.925699999999999 - type: nauc_ndcg_at_10_diff1 value: 25.262400000000003 - type: nauc_ndcg_at_20_max value: 41.564099999999996 - type: nauc_ndcg_at_20_std value: 12.4619 - type: nauc_ndcg_at_20_diff1 value: 26.902900000000002 - type: nauc_ndcg_at_100_max value: 42.2534 - type: nauc_ndcg_at_100_std value: 12.1461 - type: nauc_ndcg_at_100_diff1 value: 27.721600000000002 - type: nauc_ndcg_at_1000_max value: 42.3689 - type: nauc_ndcg_at_1000_std value: 11.9947 - type: nauc_ndcg_at_1000_diff1 value: 28.6224 - type: nauc_map_at_1_max value: 23.4774 - type: nauc_map_at_1_std value: -1.6596 - type: nauc_map_at_1_diff1 value: 32.9091 - type: nauc_map_at_3_max value: 29.2888 - type: nauc_map_at_3_std value: 2.8310999999999997 - type: nauc_map_at_3_diff1 value: 25.7556 - type: nauc_map_at_5_max value: 32.013200000000005 - type: nauc_map_at_5_std value: 3.8372 - type: nauc_map_at_5_diff1 value: 26.3662 - type: nauc_map_at_10_max value: 34.6644 - type: nauc_map_at_10_std value: 5.9211 - type: nauc_map_at_10_diff1 value: 25.737700000000004 - type: nauc_map_at_20_max value: 36.5315 - type: nauc_map_at_20_std value: 7.657500000000001 - type: nauc_map_at_20_diff1 value: 26.2519 - type: nauc_map_at_100_max value: 36.7956 - type: nauc_map_at_100_std value: 7.6282000000000005 - type: nauc_map_at_100_diff1 value: 26.5173 - type: nauc_map_at_1000_max value: 36.822500000000005 - type: nauc_map_at_1000_std value: 7.641100000000001 - type: nauc_map_at_1000_diff1 value: 26.5875 - type: nauc_recall_at_1_max value: 23.4774 - type: nauc_recall_at_1_std value: -1.6596 - type: nauc_recall_at_1_diff1 value: 32.9091 - type: nauc_recall_at_3_max value: 23.9443 - type: nauc_recall_at_3_std value: 7.0466 - type: nauc_recall_at_3_diff1 value: 15.045 - type: nauc_recall_at_5_max value: 27.515 - type: nauc_recall_at_5_std value: 7.8471 - type: nauc_recall_at_5_diff1 value: 16.0936 - type: nauc_recall_at_10_max value: 32.9675 - type: nauc_recall_at_10_std value: 15.6248 - type: nauc_recall_at_10_diff1 value: 11.8783 - type: nauc_recall_at_20_max value: 40.6864 - type: nauc_recall_at_20_std value: 23.9995 - type: nauc_recall_at_20_diff1 value: 16.9561 - type: nauc_recall_at_100_max value: 47.5027 - type: nauc_recall_at_100_std value: 30.6021 - type: nauc_recall_at_100_diff1 value: 17.3955 - type: nauc_recall_at_1000_max value: 66.6978 - type: nauc_recall_at_1000_std value: 62.0413 - type: nauc_recall_at_1000_diff1 value: 27.5068 - type: nauc_precision_at_1_max value: 42.226200000000006 - type: nauc_precision_at_1_std value: 4.0359 - type: nauc_precision_at_1_diff1 value: 41.728500000000004 - type: nauc_precision_at_3_max value: 44.7816 - type: nauc_precision_at_3_std value: 15.473300000000002 - type: nauc_precision_at_3_diff1 value: 17.0949 - type: nauc_precision_at_5_max value: 44.6483 - type: nauc_precision_at_5_std value: 14.8981 - type: nauc_precision_at_5_diff1 value: 17.1841 - type: nauc_precision_at_10_max value: 45.796 - type: nauc_precision_at_10_std value: 21.046300000000002 - type: nauc_precision_at_10_diff1 value: 10.9757 - type: nauc_precision_at_20_max value: 45.0264 - type: nauc_precision_at_20_std value: 24.8162 - type: nauc_precision_at_20_diff1 value: 10.624699999999999 - type: nauc_precision_at_100_max value: 39.8456 - type: nauc_precision_at_100_std value: 21.0487 - type: nauc_precision_at_100_diff1 value: 8.372 - type: nauc_precision_at_1000_max value: 34.7517 - type: nauc_precision_at_1000_std value: 18.3825 - type: nauc_precision_at_1000_diff1 value: 7.969900000000001 - type: nauc_mrr_at_1_max value: 42.226200000000006 - type: nauc_mrr_at_1_std value: 4.0359 - type: nauc_mrr_at_1_diff1 value: 41.728500000000004 - type: nauc_mrr_at_3_max value: 42.1134 - type: nauc_mrr_at_3_std value: 7.674799999999999 - type: nauc_mrr_at_3_diff1 value: 34.1447 - type: nauc_mrr_at_5_max value: 42.668800000000005 - type: nauc_mrr_at_5_std value: 7.3921 - type: nauc_mrr_at_5_diff1 value: 34.6011 - type: nauc_mrr_at_10_max value: 43.473099999999995 - type: nauc_mrr_at_10_std value: 8.0841 - type: nauc_mrr_at_10_diff1 value: 34.679500000000004 - type: nauc_mrr_at_20_max value: 43.3626 - type: nauc_mrr_at_20_std value: 7.7897 - type: nauc_mrr_at_20_diff1 value: 35.0828 - type: nauc_mrr_at_100_max value: 43.287 - type: nauc_mrr_at_100_std value: 7.7234 - type: nauc_mrr_at_100_diff1 value: 35.169200000000004 - type: nauc_mrr_at_1000_max value: 43.2954 - type: nauc_mrr_at_1000_std value: 7.7224 - type: nauc_mrr_at_1000_diff1 value: 35.1808 - type: main_score value: 46.141 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (id) type: miracl/mmteb-miracl config: id split: dev revision: main metrics: - type: ndcg_at_1 value: 46.354 - type: ndcg_at_3 value: 42.538 - type: ndcg_at_5 value: 43.717 - type: ndcg_at_10 value: 47.229 - type: ndcg_at_20 value: 50.605999999999995 - type: ndcg_at_100 value: 55.25 - type: ndcg_at_1000 value: 57.647999999999996 - type: map_at_1 value: 20.787 - type: map_at_3 value: 30.721999999999998 - type: map_at_5 value: 34.096 - type: map_at_10 value: 36.994 - type: map_at_20 value: 38.622 - type: map_at_100 value: 39.872 - type: map_at_1000 value: 40.056000000000004 - type: recall_at_1 value: 20.787 - type: recall_at_3 value: 36.229 - type: recall_at_5 value: 44.437 - type: recall_at_10 value: 54.771 - type: recall_at_20 value: 63.842 - type: recall_at_100 value: 80.689 - type: recall_at_1000 value: 94.03200000000001 - type: precision_at_1 value: 46.354 - type: precision_at_3 value: 30.625000000000004 - type: precision_at_5 value: 23.708000000000002 - type: precision_at_10 value: 15.719 - type: precision_at_20 value: 9.589 - type: precision_at_100 value: 2.5700000000000003 - type: precision_at_1000 value: 0.302 - type: mrr_at_1 value: 46.3542 - type: mrr_at_3 value: 54.6875 - type: mrr_at_5 value: 56.5521 - type: mrr_at_10 value: 57.6894 - type: mrr_at_20 value: 58.05630000000001 - type: mrr_at_100 value: 58.217 - type: mrr_at_1000 value: 58.2387 - type: nauc_ndcg_at_1_max value: 27.987000000000002 - type: nauc_ndcg_at_1_std value: 7.784000000000001 - type: nauc_ndcg_at_1_diff1 value: 29.116799999999998 - type: nauc_ndcg_at_3_max value: 25.316899999999997 - type: nauc_ndcg_at_3_std value: 3.3255 - type: nauc_ndcg_at_3_diff1 value: 25.4685 - type: nauc_ndcg_at_5_max value: 26.1614 - type: nauc_ndcg_at_5_std value: 0.8946000000000001 - type: nauc_ndcg_at_5_diff1 value: 25.269799999999996 - type: nauc_ndcg_at_10_max value: 26.898 - type: nauc_ndcg_at_10_std value: 0.505 - type: nauc_ndcg_at_10_diff1 value: 25.0664 - type: nauc_ndcg_at_20_max value: 28.384900000000002 - type: nauc_ndcg_at_20_std value: 3.0328 - type: nauc_ndcg_at_20_diff1 value: 25.011 - type: nauc_ndcg_at_100_max value: 29.4682 - type: nauc_ndcg_at_100_std value: 8.5929 - type: nauc_ndcg_at_100_diff1 value: 23.0951 - type: nauc_ndcg_at_1000_max value: 29.384900000000002 - type: nauc_ndcg_at_1000_std value: 8.7787 - type: nauc_ndcg_at_1000_diff1 value: 23.454900000000002 - type: nauc_map_at_1_max value: 17.6022 - type: nauc_map_at_1_std value: -3.9352 - type: nauc_map_at_1_diff1 value: 31.478 - type: nauc_map_at_3_max value: 22.4116 - type: nauc_map_at_3_std value: -3.0375 - type: nauc_map_at_3_diff1 value: 28.6608 - type: nauc_map_at_5_max value: 23.4486 - type: nauc_map_at_5_std value: -3.7261 - type: nauc_map_at_5_diff1 value: 27.2458 - type: nauc_map_at_10_max value: 24.4413 - type: nauc_map_at_10_std value: -2.4634 - type: nauc_map_at_10_diff1 value: 26.3372 - type: nauc_map_at_20_max value: 25.1924 - type: nauc_map_at_20_std value: -1.0928 - type: nauc_map_at_20_diff1 value: 26.028299999999998 - type: nauc_map_at_100_max value: 25.7081 - type: nauc_map_at_100_std value: 0.6245999999999999 - type: nauc_map_at_100_diff1 value: 25.599 - type: nauc_map_at_1000_max value: 25.714100000000002 - type: nauc_map_at_1000_std value: 0.7106 - type: nauc_map_at_1000_diff1 value: 25.609700000000004 - type: nauc_recall_at_1_max value: 17.6022 - type: nauc_recall_at_1_std value: -3.9352 - type: nauc_recall_at_1_diff1 value: 31.478 - type: nauc_recall_at_3_max value: 20.314799999999998 - type: nauc_recall_at_3_std value: -4.1603 - type: nauc_recall_at_3_diff1 value: 26.1438 - type: nauc_recall_at_5_max value: 22.866500000000002 - type: nauc_recall_at_5_std value: -4.755 - type: nauc_recall_at_5_diff1 value: 22.1412 - type: nauc_recall_at_10_max value: 22.900000000000002 - type: nauc_recall_at_10_std value: -3.9179 - type: nauc_recall_at_10_diff1 value: 19.3005 - type: nauc_recall_at_20_max value: 26.3519 - type: nauc_recall_at_20_std value: 1.1686 - type: nauc_recall_at_20_diff1 value: 18.94 - type: nauc_recall_at_100_max value: 30.2413 - type: nauc_recall_at_100_std value: 24.4636 - type: nauc_recall_at_100_diff1 value: 6.5627 - type: nauc_recall_at_1000_max value: 43.778 - type: nauc_recall_at_1000_std value: 48.835699999999996 - type: nauc_recall_at_1000_diff1 value: -1.5112 - type: nauc_precision_at_1_max value: 27.987000000000002 - type: nauc_precision_at_1_std value: 7.784000000000001 - type: nauc_precision_at_1_diff1 value: 29.116799999999998 - type: nauc_precision_at_3_max value: 24.6393 - type: nauc_precision_at_3_std value: 7.932599999999999 - type: nauc_precision_at_3_diff1 value: 11.9215 - type: nauc_precision_at_5_max value: 23.0426 - type: nauc_precision_at_5_std value: 8.9273 - type: nauc_precision_at_5_diff1 value: 5.0737 - type: nauc_precision_at_10_max value: 18.0093 - type: nauc_precision_at_10_std value: 13.093 - type: nauc_precision_at_10_diff1 value: -1.5028 - type: nauc_precision_at_20_max value: 16.1061 - type: nauc_precision_at_20_std value: 18.3582 - type: nauc_precision_at_20_diff1 value: -4.3066 - type: nauc_precision_at_100_max value: 10.9945 - type: nauc_precision_at_100_std value: 28.2804 - type: nauc_precision_at_100_diff1 value: -11.6381 - type: nauc_precision_at_1000_max value: 4.9859 - type: nauc_precision_at_1000_std value: 26.3117 - type: nauc_precision_at_1000_diff1 value: -13.819300000000002 - type: nauc_mrr_at_1_max value: 27.987000000000002 - type: nauc_mrr_at_1_std value: 7.784000000000001 - type: nauc_mrr_at_1_diff1 value: 29.116799999999998 - type: nauc_mrr_at_3_max value: 28.635899999999996 - type: nauc_mrr_at_3_std value: 8.309700000000001 - type: nauc_mrr_at_3_diff1 value: 27.976499999999998 - type: nauc_mrr_at_5_max value: 29.8296 - type: nauc_mrr_at_5_std value: 9.4775 - type: nauc_mrr_at_5_diff1 value: 26.685799999999997 - type: nauc_mrr_at_10_max value: 29.4522 - type: nauc_mrr_at_10_std value: 9.1613 - type: nauc_mrr_at_10_diff1 value: 26.933600000000002 - type: nauc_mrr_at_20_max value: 29.5446 - type: nauc_mrr_at_20_std value: 9.3451 - type: nauc_mrr_at_20_diff1 value: 27.074900000000003 - type: nauc_mrr_at_100_max value: 29.4977 - type: nauc_mrr_at_100_std value: 9.4252 - type: nauc_mrr_at_100_diff1 value: 27.0534 - type: nauc_mrr_at_1000_max value: 29.499599999999997 - type: nauc_mrr_at_1000_std value: 9.4193 - type: nauc_mrr_at_1000_diff1 value: 27.054000000000002 - type: main_score value: 47.229 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (ja) type: miracl/mmteb-miracl config: ja split: dev revision: main metrics: - type: ndcg_at_1 value: 56.279 - type: ndcg_at_3 value: 56.226 - type: ndcg_at_5 value: 58.660000000000004 - type: ndcg_at_10 value: 62.81 - type: ndcg_at_20 value: 65.21000000000001 - type: ndcg_at_100 value: 67.757 - type: ndcg_at_1000 value: 68.667 - type: map_at_1 value: 36.647999999999996 - type: map_at_3 value: 48.154 - type: map_at_5 value: 51.336999999999996 - type: map_at_10 value: 53.998000000000005 - type: map_at_20 value: 55.074 - type: map_at_100 value: 55.701 - type: map_at_1000 value: 55.767 - type: recall_at_1 value: 36.647999999999996 - type: recall_at_3 value: 55.845 - type: recall_at_5 value: 63.854 - type: recall_at_10 value: 74.96000000000001 - type: recall_at_20 value: 82.326 - type: recall_at_100 value: 92.461 - type: recall_at_1000 value: 97.827 - type: precision_at_1 value: 56.279 - type: precision_at_3 value: 31.86 - type: precision_at_5 value: 22.884 - type: precision_at_10 value: 14.058000000000002 - type: precision_at_20 value: 7.965 - type: precision_at_100 value: 1.883 - type: precision_at_1000 value: 0.203 - type: mrr_at_1 value: 56.27910000000001 - type: mrr_at_3 value: 64.7868 - type: mrr_at_5 value: 65.9496 - type: mrr_at_10 value: 67.0763 - type: mrr_at_20 value: 67.3531 - type: mrr_at_100 value: 67.48920000000001 - type: mrr_at_1000 value: 67.5016 - type: nauc_ndcg_at_1_max value: 34.801300000000005 - type: nauc_ndcg_at_1_std value: 3.6539 - type: nauc_ndcg_at_1_diff1 value: 42.9912 - type: nauc_ndcg_at_3_max value: 27.3758 - type: nauc_ndcg_at_3_std value: -5.6399 - type: nauc_ndcg_at_3_diff1 value: 35.0235 - type: nauc_ndcg_at_5_max value: 26.5087 - type: nauc_ndcg_at_5_std value: -7.2121 - type: nauc_ndcg_at_5_diff1 value: 34.3684 - type: nauc_ndcg_at_10_max value: 27.756199999999996 - type: nauc_ndcg_at_10_std value: -6.9499 - type: nauc_ndcg_at_10_diff1 value: 34.9472 - type: nauc_ndcg_at_20_max value: 30.6925 - type: nauc_ndcg_at_20_std value: -3.7859 - type: nauc_ndcg_at_20_diff1 value: 35.833 - type: nauc_ndcg_at_100_max value: 31.6641 - type: nauc_ndcg_at_100_std value: -1.1897 - type: nauc_ndcg_at_100_diff1 value: 36.218 - type: nauc_ndcg_at_1000_max value: 31.5623 - type: nauc_ndcg_at_1000_std value: -1.2468 - type: nauc_ndcg_at_1000_diff1 value: 36.4007 - type: nauc_map_at_1_max value: 13.1087 - type: nauc_map_at_1_std value: -13.6324 - type: nauc_map_at_1_diff1 value: 36.5411 - type: nauc_map_at_3_max value: 19.108900000000002 - type: nauc_map_at_3_std value: -12.8558 - type: nauc_map_at_3_diff1 value: 33.797 - type: nauc_map_at_5_max value: 20.935100000000002 - type: nauc_map_at_5_std value: -11.6525 - type: nauc_map_at_5_diff1 value: 33.392500000000005 - type: nauc_map_at_10_max value: 22.9758 - type: nauc_map_at_10_std value: -10.3728 - type: nauc_map_at_10_diff1 value: 33.8681 - type: nauc_map_at_20_max value: 24.357100000000003 - type: nauc_map_at_20_std value: -8.9932 - type: nauc_map_at_20_diff1 value: 34.2437 - type: nauc_map_at_100_max value: 24.622700000000002 - type: nauc_map_at_100_std value: -8.3079 - type: nauc_map_at_100_diff1 value: 34.3227 - type: nauc_map_at_1000_max value: 24.6436 - type: nauc_map_at_1000_std value: -8.280999999999999 - type: nauc_map_at_1000_diff1 value: 34.3499 - type: nauc_recall_at_1_max value: 13.1087 - type: nauc_recall_at_1_std value: -13.6324 - type: nauc_recall_at_1_diff1 value: 36.5411 - type: nauc_recall_at_3_max value: 17.369899999999998 - type: nauc_recall_at_3_std value: -14.6564 - type: nauc_recall_at_3_diff1 value: 29.4825 - type: nauc_recall_at_5_max value: 18.2446 - type: nauc_recall_at_5_std value: -13.422400000000001 - type: nauc_recall_at_5_diff1 value: 26.5515 - type: nauc_recall_at_10_max value: 18.6431 - type: nauc_recall_at_10_std value: -13.3386 - type: nauc_recall_at_10_diff1 value: 25.001299999999997 - type: nauc_recall_at_20_max value: 28.248099999999997 - type: nauc_recall_at_20_std value: -2.9409 - type: nauc_recall_at_20_diff1 value: 26.283800000000003 - type: nauc_recall_at_100_max value: 38.6213 - type: nauc_recall_at_100_std value: 20.5175 - type: nauc_recall_at_100_diff1 value: 23.8743 - type: nauc_recall_at_1000_max value: 54.1945 - type: nauc_recall_at_1000_std value: 48.3776 - type: nauc_recall_at_1000_diff1 value: 21.786 - type: nauc_precision_at_1_max value: 34.801300000000005 - type: nauc_precision_at_1_std value: 3.6539 - type: nauc_precision_at_1_diff1 value: 42.9912 - type: nauc_precision_at_3_max value: 36.7085 - type: nauc_precision_at_3_std value: 13.653799999999999 - type: nauc_precision_at_3_diff1 value: 16.8438 - type: nauc_precision_at_5_max value: 33.541199999999996 - type: nauc_precision_at_5_std value: 17.418400000000002 - type: nauc_precision_at_5_diff1 value: 8.5281 - type: nauc_precision_at_10_max value: 32.448100000000004 - type: nauc_precision_at_10_std value: 22.8249 - type: nauc_precision_at_10_diff1 value: 2.5392 - type: nauc_precision_at_20_max value: 32.423 - type: nauc_precision_at_20_std value: 29.353800000000003 - type: nauc_precision_at_20_diff1 value: 0.1455 - type: nauc_precision_at_100_max value: 25.0045 - type: nauc_precision_at_100_std value: 34.6492 - type: nauc_precision_at_100_diff1 value: -5.5314000000000005 - type: nauc_precision_at_1000_max value: 21.319499999999998 - type: nauc_precision_at_1000_std value: 33.3312 - type: nauc_precision_at_1000_diff1 value: -7.0243 - type: nauc_mrr_at_1_max value: 34.801300000000005 - type: nauc_mrr_at_1_std value: 3.6539 - type: nauc_mrr_at_1_diff1 value: 42.9912 - type: nauc_mrr_at_3_max value: 39.8179 - type: nauc_mrr_at_3_std value: 4.4769000000000005 - type: nauc_mrr_at_3_diff1 value: 42.4358 - type: nauc_mrr_at_5_max value: 39.6822 - type: nauc_mrr_at_5_std value: 4.7865 - type: nauc_mrr_at_5_diff1 value: 41.9923 - type: nauc_mrr_at_10_max value: 39.2963 - type: nauc_mrr_at_10_std value: 4.8511 - type: nauc_mrr_at_10_diff1 value: 41.994 - type: nauc_mrr_at_20_max value: 39.395799999999994 - type: nauc_mrr_at_20_std value: 4.9907 - type: nauc_mrr_at_20_diff1 value: 42.1806 - type: nauc_mrr_at_100_max value: 39.3251 - type: nauc_mrr_at_100_std value: 4.948 - type: nauc_mrr_at_100_diff1 value: 42.1769 - type: nauc_mrr_at_1000_max value: 39.3153 - type: nauc_mrr_at_1000_std value: 4.9384999999999994 - type: nauc_mrr_at_1000_diff1 value: 42.1768 - type: main_score value: 62.81 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (ko) type: miracl/mmteb-miracl config: ko split: dev revision: main metrics: - type: ndcg_at_1 value: 52.581999999999994 - type: ndcg_at_3 value: 53.73 - type: ndcg_at_5 value: 55.886 - type: ndcg_at_10 value: 59.216 - type: ndcg_at_20 value: 62.427 - type: ndcg_at_100 value: 65.093 - type: ndcg_at_1000 value: 66.204 - type: map_at_1 value: 30.520999999999997 - type: map_at_3 value: 42.601 - type: map_at_5 value: 46.516000000000005 - type: map_at_10 value: 49.61 - type: map_at_20 value: 51.359 - type: map_at_100 value: 52.171 - type: map_at_1000 value: 52.249 - type: recall_at_1 value: 30.520999999999997 - type: recall_at_3 value: 51.5 - type: recall_at_5 value: 60.709999999999994 - type: recall_at_10 value: 71.15899999999999 - type: recall_at_20 value: 80.209 - type: recall_at_100 value: 90.203 - type: recall_at_1000 value: 96.714 - type: precision_at_1 value: 52.581999999999994 - type: precision_at_3 value: 33.019999999999996 - type: precision_at_5 value: 25.446 - type: precision_at_10 value: 16.244 - type: precision_at_20 value: 9.695 - type: precision_at_100 value: 2.286 - type: precision_at_1000 value: 0.248 - type: mrr_at_1 value: 52.5822 - type: mrr_at_3 value: 61.9718 - type: mrr_at_5 value: 63.450700000000005 - type: mrr_at_10 value: 64.50479999999999 - type: mrr_at_20 value: 64.7745 - type: mrr_at_100 value: 64.86840000000001 - type: mrr_at_1000 value: 64.8792 - type: nauc_ndcg_at_1_max value: 57.2789 - type: nauc_ndcg_at_1_std value: 34.9863 - type: nauc_ndcg_at_1_diff1 value: 44.0111 - type: nauc_ndcg_at_3_max value: 34.18 - type: nauc_ndcg_at_3_std value: 11.1503 - type: nauc_ndcg_at_3_diff1 value: 40.339999999999996 - type: nauc_ndcg_at_5_max value: 34.4364 - type: nauc_ndcg_at_5_std value: 8.7133 - type: nauc_ndcg_at_5_diff1 value: 43.3464 - type: nauc_ndcg_at_10_max value: 35.990899999999996 - type: nauc_ndcg_at_10_std value: 10.886700000000001 - type: nauc_ndcg_at_10_diff1 value: 43.3519 - type: nauc_ndcg_at_20_max value: 40.259499999999996 - type: nauc_ndcg_at_20_std value: 16.305600000000002 - type: nauc_ndcg_at_20_diff1 value: 43.526900000000005 - type: nauc_ndcg_at_100_max value: 44.4663 - type: nauc_ndcg_at_100_std value: 21.5157 - type: nauc_ndcg_at_100_diff1 value: 43.269999999999996 - type: nauc_ndcg_at_1000_max value: 44.5037 - type: nauc_ndcg_at_1000_std value: 21.6384 - type: nauc_ndcg_at_1000_diff1 value: 43.5169 - type: nauc_map_at_1_max value: 9.6775 - type: nauc_map_at_1_std value: -7.5287999999999995 - type: nauc_map_at_1_diff1 value: 56.714200000000005 - type: nauc_map_at_3_max value: 14.175199999999998 - type: nauc_map_at_3_std value: -9.251800000000001 - type: nauc_map_at_3_diff1 value: 47.239 - type: nauc_map_at_5_max value: 20.4059 - type: nauc_map_at_5_std value: -3.9799 - type: nauc_map_at_5_diff1 value: 46.5588 - type: nauc_map_at_10_max value: 26.7796 - type: nauc_map_at_10_std value: 2.3718 - type: nauc_map_at_10_diff1 value: 45.5976 - type: nauc_map_at_20_max value: 30.291400000000003 - type: nauc_map_at_20_std value: 6.3573 - type: nauc_map_at_20_diff1 value: 45.5914 - type: nauc_map_at_100_max value: 32.0062 - type: nauc_map_at_100_std value: 8.2968 - type: nauc_map_at_100_diff1 value: 45.6306 - type: nauc_map_at_1000_max value: 32.0482 - type: nauc_map_at_1000_std value: 8.3688 - type: nauc_map_at_1000_diff1 value: 45.6447 - type: nauc_recall_at_1_max value: 9.6775 - type: nauc_recall_at_1_std value: -7.5287999999999995 - type: nauc_recall_at_1_diff1 value: 56.714200000000005 - type: nauc_recall_at_3_max value: 4.7592 - type: nauc_recall_at_3_std value: -17.7268 - type: nauc_recall_at_3_diff1 value: 36.593599999999995 - type: nauc_recall_at_5_max value: 11.0166 - type: nauc_recall_at_5_std value: -14.832799999999999 - type: nauc_recall_at_5_diff1 value: 36.6471 - type: nauc_recall_at_10_max value: 20.272299999999998 - type: nauc_recall_at_10_std value: -3.9745000000000004 - type: nauc_recall_at_10_diff1 value: 34.875699999999995 - type: nauc_recall_at_20_max value: 27.0707 - type: nauc_recall_at_20_std value: 5.8709 - type: nauc_recall_at_20_diff1 value: 34.921600000000005 - type: nauc_recall_at_100_max value: 48.045100000000005 - type: nauc_recall_at_100_std value: 32.3099 - type: nauc_recall_at_100_diff1 value: 30.127 - type: nauc_recall_at_1000_max value: 60.827299999999994 - type: nauc_recall_at_1000_std value: 49.6791 - type: nauc_recall_at_1000_diff1 value: 32.2816 - type: nauc_precision_at_1_max value: 57.2789 - type: nauc_precision_at_1_std value: 34.9863 - type: nauc_precision_at_1_diff1 value: 44.0111 - type: nauc_precision_at_3_max value: 55.550900000000006 - type: nauc_precision_at_3_std value: 39.1605 - type: nauc_precision_at_3_diff1 value: 2.1411 - type: nauc_precision_at_5_max value: 60.1216 - type: nauc_precision_at_5_std value: 49.1925 - type: nauc_precision_at_5_diff1 value: -4.2296 - type: nauc_precision_at_10_max value: 63.53339999999999 - type: nauc_precision_at_10_std value: 57.2366 - type: nauc_precision_at_10_diff1 value: -9.1914 - type: nauc_precision_at_20_max value: 63.2997 - type: nauc_precision_at_20_std value: 62.778 - type: nauc_precision_at_20_diff1 value: -11.4618 - type: nauc_precision_at_100_max value: 61.345000000000006 - type: nauc_precision_at_100_std value: 66.3033 - type: nauc_precision_at_100_diff1 value: -14.8779 - type: nauc_precision_at_1000_max value: 56.28300000000001 - type: nauc_precision_at_1000_std value: 62.91290000000001 - type: nauc_precision_at_1000_diff1 value: -16.6149 - type: nauc_mrr_at_1_max value: 57.2789 - type: nauc_mrr_at_1_std value: 34.9863 - type: nauc_mrr_at_1_diff1 value: 44.0111 - type: nauc_mrr_at_3_max value: 57.678200000000004 - type: nauc_mrr_at_3_std value: 33.5744 - type: nauc_mrr_at_3_diff1 value: 39.5643 - type: nauc_mrr_at_5_max value: 58.668600000000005 - type: nauc_mrr_at_5_std value: 33.5118 - type: nauc_mrr_at_5_diff1 value: 40.888200000000005 - type: nauc_mrr_at_10_max value: 58.4754 - type: nauc_mrr_at_10_std value: 33.7964 - type: nauc_mrr_at_10_diff1 value: 41.314 - type: nauc_mrr_at_20_max value: 58.434 - type: nauc_mrr_at_20_std value: 33.903 - type: nauc_mrr_at_20_diff1 value: 41.217999999999996 - type: nauc_mrr_at_100_max value: 58.4576 - type: nauc_mrr_at_100_std value: 33.9478 - type: nauc_mrr_at_100_diff1 value: 41.172599999999996 - type: nauc_mrr_at_1000_max value: 58.444399999999995 - type: nauc_mrr_at_1000_std value: 33.9292 - type: nauc_mrr_at_1000_diff1 value: 41.166199999999996 - type: main_score value: 59.216 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (ru) type: miracl/mmteb-miracl config: ru split: dev revision: main metrics: - type: ndcg_at_1 value: 47.524 - type: ndcg_at_3 value: 46.812 - type: ndcg_at_5 value: 48.442 - type: ndcg_at_10 value: 52.349000000000004 - type: ndcg_at_20 value: 55.669000000000004 - type: ndcg_at_100 value: 59.724999999999994 - type: ndcg_at_1000 value: 61.312999999999995 - type: map_at_1 value: 24.337 - type: map_at_3 value: 35.765 - type: map_at_5 value: 39.153 - type: map_at_10 value: 42.225 - type: map_at_20 value: 43.782 - type: map_at_100 value: 44.887 - type: map_at_1000 value: 45.013 - type: recall_at_1 value: 24.337 - type: recall_at_3 value: 42.927 - type: recall_at_5 value: 51.258 - type: recall_at_10 value: 62.437 - type: recall_at_20 value: 71.411 - type: recall_at_100 value: 86.489 - type: recall_at_1000 value: 95.26599999999999 - type: precision_at_1 value: 47.524 - type: precision_at_3 value: 31.948999999999998 - type: precision_at_5 value: 24.121000000000002 - type: precision_at_10 value: 15.534999999999998 - type: precision_at_20 value: 9.408999999999999 - type: precision_at_100 value: 2.407 - type: precision_at_1000 value: 0.271 - type: mrr_at_1 value: 47.524 - type: mrr_at_3 value: 57.6012 - type: mrr_at_5 value: 59.130700000000004 - type: mrr_at_10 value: 60.1824 - type: mrr_at_20 value: 60.507200000000005 - type: mrr_at_100 value: 60.6675 - type: mrr_at_1000 value: 60.6789 - type: nauc_ndcg_at_1_max value: 32.3091 - type: nauc_ndcg_at_1_std value: 10.915700000000001 - type: nauc_ndcg_at_1_diff1 value: 35.0477 - type: nauc_ndcg_at_3_max value: 30.5579 - type: nauc_ndcg_at_3_std value: 9.9651 - type: nauc_ndcg_at_3_diff1 value: 28.537200000000002 - type: nauc_ndcg_at_5_max value: 30.7637 - type: nauc_ndcg_at_5_std value: 9.7618 - type: nauc_ndcg_at_5_diff1 value: 28.225699999999996 - type: nauc_ndcg_at_10_max value: 32.0146 - type: nauc_ndcg_at_10_std value: 9.681099999999999 - type: nauc_ndcg_at_10_diff1 value: 27.6866 - type: nauc_ndcg_at_20_max value: 34.7846 - type: nauc_ndcg_at_20_std value: 13.270599999999998 - type: nauc_ndcg_at_20_diff1 value: 27.8097 - type: nauc_ndcg_at_100_max value: 37.1031 - type: nauc_ndcg_at_100_std value: 16.512 - type: nauc_ndcg_at_100_diff1 value: 28.294200000000004 - type: nauc_ndcg_at_1000_max value: 36.5248 - type: nauc_ndcg_at_1000_std value: 16.1206 - type: nauc_ndcg_at_1000_diff1 value: 28.6308 - type: nauc_map_at_1_max value: 17.363300000000002 - type: nauc_map_at_1_std value: -3.3156 - type: nauc_map_at_1_diff1 value: 33.9402 - type: nauc_map_at_3_max value: 23.0235 - type: nauc_map_at_3_std value: 1.2713999999999999 - type: nauc_map_at_3_diff1 value: 28.946499999999997 - type: nauc_map_at_5_max value: 25.8014 - type: nauc_map_at_5_std value: 3.8541 - type: nauc_map_at_5_diff1 value: 28.526 - type: nauc_map_at_10_max value: 27.6617 - type: nauc_map_at_10_std value: 5.2938 - type: nauc_map_at_10_diff1 value: 28.122700000000002 - type: nauc_map_at_20_max value: 29.071399999999997 - type: nauc_map_at_20_std value: 7.005 - type: nauc_map_at_20_diff1 value: 28.075 - type: nauc_map_at_100_max value: 29.9533 - type: nauc_map_at_100_std value: 8.0838 - type: nauc_map_at_100_diff1 value: 28.2424 - type: nauc_map_at_1000_max value: 29.936200000000003 - type: nauc_map_at_1000_std value: 8.0967 - type: nauc_map_at_1000_diff1 value: 28.259 - type: nauc_recall_at_1_max value: 17.363300000000002 - type: nauc_recall_at_1_std value: -3.3156 - type: nauc_recall_at_1_diff1 value: 33.9402 - type: nauc_recall_at_3_max value: 20.7272 - type: nauc_recall_at_3_std value: 1.9171 - type: nauc_recall_at_3_diff1 value: 23.505300000000002 - type: nauc_recall_at_5_max value: 24.55 - type: nauc_recall_at_5_std value: 6.1491999999999996 - type: nauc_recall_at_5_diff1 value: 21.1769 - type: nauc_recall_at_10_max value: 26.6134 - type: nauc_recall_at_10_std value: 7.3684 - type: nauc_recall_at_10_diff1 value: 18.0016 - type: nauc_recall_at_20_max value: 33.744 - type: nauc_recall_at_20_std value: 17.2573 - type: nauc_recall_at_20_diff1 value: 17.3872 - type: nauc_recall_at_100_max value: 49.5745 - type: nauc_recall_at_100_std value: 39.4003 - type: nauc_recall_at_100_diff1 value: 16.1814 - type: nauc_recall_at_1000_max value: 62.5842 - type: nauc_recall_at_1000_std value: 64.7392 - type: nauc_recall_at_1000_diff1 value: 16.9464 - type: nauc_precision_at_1_max value: 32.3091 - type: nauc_precision_at_1_std value: 10.915700000000001 - type: nauc_precision_at_1_diff1 value: 35.0477 - type: nauc_precision_at_3_max value: 34.9888 - type: nauc_precision_at_3_std value: 22.009600000000002 - type: nauc_precision_at_3_diff1 value: 13.4801 - type: nauc_precision_at_5_max value: 34.1539 - type: nauc_precision_at_5_std value: 25.2388 - type: nauc_precision_at_5_diff1 value: 8.622 - type: nauc_precision_at_10_max value: 31.194 - type: nauc_precision_at_10_std value: 25.397100000000002 - type: nauc_precision_at_10_diff1 value: 3.4173 - type: nauc_precision_at_20_max value: 29.3116 - type: nauc_precision_at_20_std value: 28.8229 - type: nauc_precision_at_20_diff1 value: -0.4374 - type: nauc_precision_at_100_max value: 23.853099999999998 - type: nauc_precision_at_100_std value: 29.942800000000002 - type: nauc_precision_at_100_diff1 value: -3.9575 - type: nauc_precision_at_1000_max value: 16.5958 - type: nauc_precision_at_1000_std value: 25.208599999999997 - type: nauc_precision_at_1000_diff1 value: -6.1125 - type: nauc_mrr_at_1_max value: 32.3091 - type: nauc_mrr_at_1_std value: 10.915700000000001 - type: nauc_mrr_at_1_diff1 value: 35.0477 - type: nauc_mrr_at_3_max value: 36.9469 - type: nauc_mrr_at_3_std value: 15.4767 - type: nauc_mrr_at_3_diff1 value: 33.3922 - type: nauc_mrr_at_5_max value: 37.7043 - type: nauc_mrr_at_5_std value: 16.2089 - type: nauc_mrr_at_5_diff1 value: 33.3182 - type: nauc_mrr_at_10_max value: 37.5403 - type: nauc_mrr_at_10_std value: 16.229599999999998 - type: nauc_mrr_at_10_diff1 value: 33.2431 - type: nauc_mrr_at_20_max value: 37.4812 - type: nauc_mrr_at_20_std value: 16.278100000000002 - type: nauc_mrr_at_20_diff1 value: 33.3127 - type: nauc_mrr_at_100_max value: 37.43 - type: nauc_mrr_at_100_std value: 16.2077 - type: nauc_mrr_at_100_diff1 value: 33.3439 - type: nauc_mrr_at_1000_max value: 37.4133 - type: nauc_mrr_at_1000_std value: 16.1859 - type: nauc_mrr_at_1000_diff1 value: 33.353300000000004 - type: main_score value: 52.349000000000004 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (sw) type: miracl/mmteb-miracl config: sw split: dev revision: main metrics: - type: ndcg_at_1 value: 51.66 - type: ndcg_at_3 value: 54.827999999999996 - type: ndcg_at_5 value: 57.382 - type: ndcg_at_10 value: 61.271 - type: ndcg_at_20 value: 63.64300000000001 - type: ndcg_at_100 value: 66.09899999999999 - type: ndcg_at_1000 value: 66.867 - type: map_at_1 value: 35.276999999999994 - type: map_at_3 value: 48.260999999999996 - type: map_at_5 value: 51.029 - type: map_at_10 value: 53.405 - type: map_at_20 value: 54.298 - type: map_at_100 value: 54.836 - type: map_at_1000 value: 54.887 - type: recall_at_1 value: 35.276999999999994 - type: recall_at_3 value: 56.739 - type: recall_at_5 value: 64.21 - type: recall_at_10 value: 74.368 - type: recall_at_20 value: 81.888 - type: recall_at_100 value: 92.26100000000001 - type: recall_at_1000 value: 97.109 - type: precision_at_1 value: 51.66 - type: precision_at_3 value: 30.843999999999998 - type: precision_at_5 value: 21.743000000000002 - type: precision_at_10 value: 12.988 - type: precision_at_20 value: 7.364999999999999 - type: precision_at_100 value: 1.714 - type: precision_at_1000 value: 0.184 - type: mrr_at_1 value: 51.6598 - type: mrr_at_3 value: 60.338899999999995 - type: mrr_at_5 value: 61.7808 - type: mrr_at_10 value: 62.751599999999996 - type: mrr_at_20 value: 63.1412 - type: mrr_at_100 value: 63.309099999999994 - type: mrr_at_1000 value: 63.317299999999996 - type: nauc_ndcg_at_1_max value: 33.6073 - type: nauc_ndcg_at_1_std value: 6.1046000000000005 - type: nauc_ndcg_at_1_diff1 value: 41.1955 - type: nauc_ndcg_at_3_max value: 31.268400000000003 - type: nauc_ndcg_at_3_std value: -2.9395000000000002 - type: nauc_ndcg_at_3_diff1 value: 35.6186 - type: nauc_ndcg_at_5_max value: 32.3145 - type: nauc_ndcg_at_5_std value: -0.7283999999999999 - type: nauc_ndcg_at_5_diff1 value: 37.7602 - type: nauc_ndcg_at_10_max value: 35.1426 - type: nauc_ndcg_at_10_std value: -0.13829999999999998 - type: nauc_ndcg_at_10_diff1 value: 36.8929 - type: nauc_ndcg_at_20_max value: 35.4227 - type: nauc_ndcg_at_20_std value: 0.8394999999999999 - type: nauc_ndcg_at_20_diff1 value: 36.9758 - type: nauc_ndcg_at_100_max value: 36.9415 - type: nauc_ndcg_at_100_std value: 5.9117999999999995 - type: nauc_ndcg_at_100_diff1 value: 37.0021 - type: nauc_ndcg_at_1000_max value: 37.0195 - type: nauc_ndcg_at_1000_std value: 5.5642 - type: nauc_ndcg_at_1000_diff1 value: 37.1389 - type: nauc_map_at_1_max value: 14.893600000000001 - type: nauc_map_at_1_std value: -6.9723 - type: nauc_map_at_1_diff1 value: 47.328399999999995 - type: nauc_map_at_3_max value: 25.1304 - type: nauc_map_at_3_std value: -5.5777 - type: nauc_map_at_3_diff1 value: 39.5728 - type: nauc_map_at_5_max value: 28.206599999999998 - type: nauc_map_at_5_std value: -3.2870000000000004 - type: nauc_map_at_5_diff1 value: 39.868500000000004 - type: nauc_map_at_10_max value: 30.520999999999997 - type: nauc_map_at_10_std value: -2.539 - type: nauc_map_at_10_diff1 value: 39.1287 - type: nauc_map_at_20_max value: 30.712899999999998 - type: nauc_map_at_20_std value: -2.0093 - type: nauc_map_at_20_diff1 value: 39.0357 - type: nauc_map_at_100_max value: 31.0687 - type: nauc_map_at_100_std value: -1.0538 - type: nauc_map_at_100_diff1 value: 38.9851 - type: nauc_map_at_1000_max value: 31.0939 - type: nauc_map_at_1000_std value: -1.0348 - type: nauc_map_at_1000_diff1 value: 38.9719 - type: nauc_recall_at_1_max value: 14.893600000000001 - type: nauc_recall_at_1_std value: -6.9723 - type: nauc_recall_at_1_diff1 value: 47.328399999999995 - type: nauc_recall_at_3_max value: 25.0525 - type: nauc_recall_at_3_std value: -9.808300000000001 - type: nauc_recall_at_3_diff1 value: 32.9087 - type: nauc_recall_at_5_max value: 28.8065 - type: nauc_recall_at_5_std value: -4.5512999999999995 - type: nauc_recall_at_5_diff1 value: 32.9308 - type: nauc_recall_at_10_max value: 34.9121 - type: nauc_recall_at_10_std value: -5.8499 - type: nauc_recall_at_10_diff1 value: 29.791 - type: nauc_recall_at_20_max value: 35.6729 - type: nauc_recall_at_20_std value: -4.3512 - type: nauc_recall_at_20_diff1 value: 29.087600000000002 - type: nauc_recall_at_100_max value: 53.5866 - type: nauc_recall_at_100_std value: 49.692 - type: nauc_recall_at_100_diff1 value: 28.9725 - type: nauc_recall_at_1000_max value: 80.23949999999999 - type: nauc_recall_at_1000_std value: 86.7359 - type: nauc_recall_at_1000_diff1 value: 37.333 - type: nauc_precision_at_1_max value: 33.6073 - type: nauc_precision_at_1_std value: 6.1046000000000005 - type: nauc_precision_at_1_diff1 value: 41.1955 - type: nauc_precision_at_3_max value: 40.2515 - type: nauc_precision_at_3_std value: 12.1973 - type: nauc_precision_at_3_diff1 value: 3.9177999999999997 - type: nauc_precision_at_5_max value: 41.7312 - type: nauc_precision_at_5_std value: 17.921400000000002 - type: nauc_precision_at_5_diff1 value: -0.2405 - type: nauc_precision_at_10_max value: 39.9025 - type: nauc_precision_at_10_std value: 18.9909 - type: nauc_precision_at_10_diff1 value: -8.5406 - type: nauc_precision_at_20_max value: 34.1753 - type: nauc_precision_at_20_std value: 21.9853 - type: nauc_precision_at_20_diff1 value: -13.966700000000001 - type: nauc_precision_at_100_max value: 30.461 - type: nauc_precision_at_100_std value: 34.063900000000004 - type: nauc_precision_at_100_diff1 value: -21.1252 - type: nauc_precision_at_1000_max value: 26.5512 - type: nauc_precision_at_1000_std value: 30.7066 - type: nauc_precision_at_1000_diff1 value: -22.2902 - type: nauc_mrr_at_1_max value: 33.6073 - type: nauc_mrr_at_1_std value: 6.1046000000000005 - type: nauc_mrr_at_1_diff1 value: 41.1955 - type: nauc_mrr_at_3_max value: 37.6571 - type: nauc_mrr_at_3_std value: 5.2793 - type: nauc_mrr_at_3_diff1 value: 36.5302 - type: nauc_mrr_at_5_max value: 38.6239 - type: nauc_mrr_at_5_std value: 7.762700000000001 - type: nauc_mrr_at_5_diff1 value: 36.525 - type: nauc_mrr_at_10_max value: 38.4608 - type: nauc_mrr_at_10_std value: 7.131 - type: nauc_mrr_at_10_diff1 value: 36.4653 - type: nauc_mrr_at_20_max value: 38.2783 - type: nauc_mrr_at_20_std value: 6.9415000000000004 - type: nauc_mrr_at_20_diff1 value: 36.5089 - type: nauc_mrr_at_100_max value: 38.337199999999996 - type: nauc_mrr_at_100_std value: 7.2228 - type: nauc_mrr_at_100_diff1 value: 36.6891 - type: nauc_mrr_at_1000_max value: 38.327600000000004 - type: nauc_mrr_at_1000_std value: 7.206300000000001 - type: nauc_mrr_at_1000_diff1 value: 36.696400000000004 - type: main_score value: 61.271 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (te) type: miracl/mmteb-miracl config: te split: dev revision: main metrics: - type: ndcg_at_1 value: 63.647 - type: ndcg_at_3 value: 75.98700000000001 - type: ndcg_at_5 value: 77.86999999999999 - type: ndcg_at_10 value: 79.149 - type: ndcg_at_20 value: 79.50399999999999 - type: ndcg_at_100 value: 80.199 - type: ndcg_at_1000 value: 80.393 - type: map_at_1 value: 62.963 - type: map_at_3 value: 72.94699999999999 - type: map_at_5 value: 74.042 - type: map_at_10 value: 74.612 - type: map_at_20 value: 74.727 - type: map_at_100 value: 74.831 - type: map_at_1000 value: 74.839 - type: recall_at_1 value: 62.963 - type: recall_at_3 value: 84.15899999999999 - type: recall_at_5 value: 88.627 - type: recall_at_10 value: 92.411 - type: recall_at_20 value: 93.74 - type: recall_at_100 value: 97.363 - type: recall_at_1000 value: 98.833 - type: precision_at_1 value: 63.647 - type: precision_at_3 value: 28.622999999999998 - type: precision_at_5 value: 18.163999999999998 - type: precision_at_10 value: 9.481 - type: precision_at_20 value: 4.819 - type: precision_at_100 value: 1.001 - type: precision_at_1000 value: 0.10200000000000001 - type: mrr_at_1 value: 63.647299999999994 - type: mrr_at_3 value: 73.49029999999999 - type: mrr_at_5 value: 74.4626 - type: mrr_at_10 value: 74.98280000000001 - type: mrr_at_20 value: 75.0719 - type: mrr_at_100 value: 75.1695 - type: mrr_at_1000 value: 75.1769 - type: nauc_ndcg_at_1_max value: 33.3063 - type: nauc_ndcg_at_1_std value: -27.609699999999997 - type: nauc_ndcg_at_1_diff1 value: 64.8293 - type: nauc_ndcg_at_3_max value: 42.4738 - type: nauc_ndcg_at_3_std value: -23.8921 - type: nauc_ndcg_at_3_diff1 value: 56.43749999999999 - type: nauc_ndcg_at_5_max value: 43.132 - type: nauc_ndcg_at_5_std value: -23.2181 - type: nauc_ndcg_at_5_diff1 value: 55.722899999999996 - type: nauc_ndcg_at_10_max value: 43.036 - type: nauc_ndcg_at_10_std value: -22.880300000000002 - type: nauc_ndcg_at_10_diff1 value: 56.22279999999999 - type: nauc_ndcg_at_20_max value: 43.1538 - type: nauc_ndcg_at_20_std value: -22.7674 - type: nauc_ndcg_at_20_diff1 value: 56.4893 - type: nauc_ndcg_at_100_max value: 42.0908 - type: nauc_ndcg_at_100_std value: -22.3071 - type: nauc_ndcg_at_100_diff1 value: 57.5928 - type: nauc_ndcg_at_1000_max value: 41.6223 - type: nauc_ndcg_at_1000_std value: -22.747600000000002 - type: nauc_ndcg_at_1000_diff1 value: 57.6603 - type: nauc_map_at_1_max value: 31.9355 - type: nauc_map_at_1_std value: -29.4362 - type: nauc_map_at_1_diff1 value: 64.9802 - type: nauc_map_at_3_max value: 39.3304 - type: nauc_map_at_3_std value: -25.819 - type: nauc_map_at_3_diff1 value: 58.8664 - type: nauc_map_at_5_max value: 39.659800000000004 - type: nauc_map_at_5_std value: -25.3619 - type: nauc_map_at_5_diff1 value: 58.57449999999999 - type: nauc_map_at_10_max value: 39.6121 - type: nauc_map_at_10_std value: -25.2399 - type: nauc_map_at_10_diff1 value: 58.8083 - type: nauc_map_at_20_max value: 39.6958 - type: nauc_map_at_20_std value: -25.116 - type: nauc_map_at_20_diff1 value: 58.8995 - type: nauc_map_at_100_max value: 39.5617 - type: nauc_map_at_100_std value: -25.0319 - type: nauc_map_at_100_diff1 value: 59.053599999999996 - type: nauc_map_at_1000_max value: 39.5469 - type: nauc_map_at_1000_std value: -25.0473 - type: nauc_map_at_1000_diff1 value: 59.0556 - type: nauc_recall_at_1_max value: 31.9355 - type: nauc_recall_at_1_std value: -29.4362 - type: nauc_recall_at_1_diff1 value: 64.9802 - type: nauc_recall_at_3_max value: 54.57149999999999 - type: nauc_recall_at_3_std value: -17.9671 - type: nauc_recall_at_3_diff1 value: 45.4961 - type: nauc_recall_at_5_max value: 61.2002 - type: nauc_recall_at_5_std value: -13.9075 - type: nauc_recall_at_5_diff1 value: 39.1115 - type: nauc_recall_at_10_max value: 68.2226 - type: nauc_recall_at_10_std value: -7.230200000000001 - type: nauc_recall_at_10_diff1 value: 34.9241 - type: nauc_recall_at_20_max value: 74.08019999999999 - type: nauc_recall_at_20_std value: -4.4287 - type: nauc_recall_at_20_diff1 value: 33.4441 - type: nauc_recall_at_100_max value: 80.2462 - type: nauc_recall_at_100_std value: 30.9842 - type: nauc_recall_at_100_diff1 value: 38.0659 - type: nauc_recall_at_1000_max value: 77.5197 - type: nauc_recall_at_1000_std value: 51.5945 - type: nauc_recall_at_1000_diff1 value: 22.9724 - type: nauc_precision_at_1_max value: 33.3063 - type: nauc_precision_at_1_std value: -27.609699999999997 - type: nauc_precision_at_1_diff1 value: 64.8293 - type: nauc_precision_at_3_max value: 56.837199999999996 - type: nauc_precision_at_3_std value: -7.5578 - type: nauc_precision_at_3_diff1 value: 36.4516 - type: nauc_precision_at_5_max value: 57.3511 - type: nauc_precision_at_5_std value: 2.889 - type: nauc_precision_at_5_diff1 value: 23.0276 - type: nauc_precision_at_10_max value: 56.852999999999994 - type: nauc_precision_at_10_std value: 13.305900000000001 - type: nauc_precision_at_10_diff1 value: 12.1547 - type: nauc_precision_at_20_max value: 55.735299999999995 - type: nauc_precision_at_20_std value: 20.3483 - type: nauc_precision_at_20_diff1 value: 6.6423 - type: nauc_precision_at_100_max value: 43.358999999999995 - type: nauc_precision_at_100_std value: 44.4213 - type: nauc_precision_at_100_diff1 value: -5.556500000000001 - type: nauc_precision_at_1000_max value: 27.974 - type: nauc_precision_at_1000_std value: 47.254400000000004 - type: nauc_precision_at_1000_diff1 value: -21.8157 - type: nauc_mrr_at_1_max value: 33.3063 - type: nauc_mrr_at_1_std value: -27.609699999999997 - type: nauc_mrr_at_1_diff1 value: 64.8293 - type: nauc_mrr_at_3_max value: 40.129 - type: nauc_mrr_at_3_std value: -24.0152 - type: nauc_mrr_at_3_diff1 value: 58.9134 - type: nauc_mrr_at_5_max value: 40.1054 - type: nauc_mrr_at_5_std value: -24.0554 - type: nauc_mrr_at_5_diff1 value: 58.71920000000001 - type: nauc_mrr_at_10_max value: 40.0067 - type: nauc_mrr_at_10_std value: -23.9912 - type: nauc_mrr_at_10_diff1 value: 58.964099999999995 - type: nauc_mrr_at_20_max value: 39.9983 - type: nauc_mrr_at_20_std value: -24.0277 - type: nauc_mrr_at_20_diff1 value: 59.0425 - type: nauc_mrr_at_100_max value: 39.8766 - type: nauc_mrr_at_100_std value: -23.9296 - type: nauc_mrr_at_100_diff1 value: 59.1824 - type: nauc_mrr_at_1000_max value: 39.861799999999995 - type: nauc_mrr_at_1000_std value: -23.9468 - type: nauc_mrr_at_1000_diff1 value: 59.1847 - type: main_score value: 79.149 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (th) type: miracl/mmteb-miracl config: th split: dev revision: main metrics: - type: ndcg_at_1 value: 66.712 - type: ndcg_at_3 value: 67.393 - type: ndcg_at_5 value: 70.20100000000001 - type: ndcg_at_10 value: 73.324 - type: ndcg_at_20 value: 75.24300000000001 - type: ndcg_at_100 value: 76.633 - type: ndcg_at_1000 value: 77.119 - type: map_at_1 value: 47.105999999999995 - type: map_at_3 value: 60.67700000000001 - type: map_at_5 value: 63.81099999999999 - type: map_at_10 value: 65.998 - type: map_at_20 value: 66.914 - type: map_at_100 value: 67.258 - type: map_at_1000 value: 67.293 - type: recall_at_1 value: 47.105999999999995 - type: recall_at_3 value: 68.45599999999999 - type: recall_at_5 value: 75.91499999999999 - type: recall_at_10 value: 84.294 - type: recall_at_20 value: 90.08500000000001 - type: recall_at_100 value: 95.949 - type: recall_at_1000 value: 98.874 - type: precision_at_1 value: 66.712 - type: precision_at_3 value: 36.016 - type: precision_at_5 value: 25.157 - type: precision_at_10 value: 14.516000000000002 - type: precision_at_20 value: 7.994999999999999 - type: precision_at_100 value: 1.738 - type: precision_at_1000 value: 0.181 - type: mrr_at_1 value: 66.71209999999999 - type: mrr_at_3 value: 74.3747 - type: mrr_at_5 value: 75.3297 - type: mrr_at_10 value: 75.9858 - type: mrr_at_20 value: 76.1819 - type: mrr_at_100 value: 76.2551 - type: mrr_at_1000 value: 76.2587 - type: nauc_ndcg_at_1_max value: 43.199799999999996 - type: nauc_ndcg_at_1_std value: 8.6242 - type: nauc_ndcg_at_1_diff1 value: 49.3688 - type: nauc_ndcg_at_3_max value: 37.9248 - type: nauc_ndcg_at_3_std value: -1.3769 - type: nauc_ndcg_at_3_diff1 value: 39.9588 - type: nauc_ndcg_at_5_max value: 38.4241 - type: nauc_ndcg_at_5_std value: -1.0533000000000001 - type: nauc_ndcg_at_5_diff1 value: 40.0453 - type: nauc_ndcg_at_10_max value: 40.4105 - type: nauc_ndcg_at_10_std value: 1.4455 - type: nauc_ndcg_at_10_diff1 value: 40.6256 - type: nauc_ndcg_at_20_max value: 41.1133 - type: nauc_ndcg_at_20_std value: 2.931 - type: nauc_ndcg_at_20_diff1 value: 40.920899999999996 - type: nauc_ndcg_at_100_max value: 41.6336 - type: nauc_ndcg_at_100_std value: 4.9768 - type: nauc_ndcg_at_100_diff1 value: 41.3658 - type: nauc_ndcg_at_1000_max value: 41.6223 - type: nauc_ndcg_at_1000_std value: 5.2031 - type: nauc_ndcg_at_1000_diff1 value: 41.4062 - type: nauc_map_at_1_max value: 20.7626 - type: nauc_map_at_1_std value: -8.0023 - type: nauc_map_at_1_diff1 value: 44.4569 - type: nauc_map_at_3_max value: 32.5175 - type: nauc_map_at_3_std value: -7.458099999999999 - type: nauc_map_at_3_diff1 value: 40.2164 - type: nauc_map_at_5_max value: 34.4803 - type: nauc_map_at_5_std value: -5.149 - type: nauc_map_at_5_diff1 value: 39.7814 - type: nauc_map_at_10_max value: 36.0112 - type: nauc_map_at_10_std value: -2.7143 - type: nauc_map_at_10_diff1 value: 40.231 - type: nauc_map_at_20_max value: 36.574200000000005 - type: nauc_map_at_20_std value: -1.718 - type: nauc_map_at_20_diff1 value: 40.278000000000006 - type: nauc_map_at_100_max value: 36.7445 - type: nauc_map_at_100_std value: -1.208 - type: nauc_map_at_100_diff1 value: 40.4046 - type: nauc_map_at_1000_max value: 36.770199999999996 - type: nauc_map_at_1000_std value: -1.1672 - type: nauc_map_at_1000_diff1 value: 40.409099999999995 - type: nauc_recall_at_1_max value: 20.7626 - type: nauc_recall_at_1_std value: -8.0023 - type: nauc_recall_at_1_diff1 value: 44.4569 - type: nauc_recall_at_3_max value: 31.2938 - type: nauc_recall_at_3_std value: -12.4723 - type: nauc_recall_at_3_diff1 value: 35.0524 - type: nauc_recall_at_5_max value: 34.4221 - type: nauc_recall_at_5_std value: -9.0849 - type: nauc_recall_at_5_diff1 value: 33.6966 - type: nauc_recall_at_10_max value: 40.1481 - type: nauc_recall_at_10_std value: -2.4007 - type: nauc_recall_at_10_diff1 value: 32.398700000000005 - type: nauc_recall_at_20_max value: 43.068400000000004 - type: nauc_recall_at_20_std value: 0.4869 - type: nauc_recall_at_20_diff1 value: 31.7169 - type: nauc_recall_at_100_max value: 54.1481 - type: nauc_recall_at_100_std value: 28.3243 - type: nauc_recall_at_100_diff1 value: 29.1055 - type: nauc_recall_at_1000_max value: 82.51389999999999 - type: nauc_recall_at_1000_std value: 88.3602 - type: nauc_recall_at_1000_diff1 value: 14.9201 - type: nauc_precision_at_1_max value: 43.199799999999996 - type: nauc_precision_at_1_std value: 8.6242 - type: nauc_precision_at_1_diff1 value: 49.3688 - type: nauc_precision_at_3_max value: 35.1732 - type: nauc_precision_at_3_std value: 16.3941 - type: nauc_precision_at_3_diff1 value: 4.4193999999999996 - type: nauc_precision_at_5_max value: 28.2059 - type: nauc_precision_at_5_std value: 22.4744 - type: nauc_precision_at_5_diff1 value: -4.0808 - type: nauc_precision_at_10_max value: 22.7955 - type: nauc_precision_at_10_std value: 28.8744 - type: nauc_precision_at_10_diff1 value: -9.9309 - type: nauc_precision_at_20_max value: 17.2362 - type: nauc_precision_at_20_std value: 30.7132 - type: nauc_precision_at_20_diff1 value: -13.5708 - type: nauc_precision_at_100_max value: 13.3455 - type: nauc_precision_at_100_std value: 34.1715 - type: nauc_precision_at_100_diff1 value: -16.4298 - type: nauc_precision_at_1000_max value: 10.639700000000001 - type: nauc_precision_at_1000_std value: 33.1325 - type: nauc_precision_at_1000_diff1 value: -17.5938 - type: nauc_mrr_at_1_max value: 43.199799999999996 - type: nauc_mrr_at_1_std value: 8.6242 - type: nauc_mrr_at_1_diff1 value: 49.3688 - type: nauc_mrr_at_3_max value: 47.106500000000004 - type: nauc_mrr_at_3_std value: 10.3023 - type: nauc_mrr_at_3_diff1 value: 46.2565 - type: nauc_mrr_at_5_max value: 47.151900000000005 - type: nauc_mrr_at_5_std value: 11.2485 - type: nauc_mrr_at_5_diff1 value: 46.4519 - type: nauc_mrr_at_10_max value: 47.468700000000005 - type: nauc_mrr_at_10_std value: 11.5245 - type: nauc_mrr_at_10_diff1 value: 46.291399999999996 - type: nauc_mrr_at_20_max value: 47.3577 - type: nauc_mrr_at_20_std value: 11.3081 - type: nauc_mrr_at_20_diff1 value: 46.490700000000004 - type: nauc_mrr_at_100_max value: 47.3153 - type: nauc_mrr_at_100_std value: 11.2816 - type: nauc_mrr_at_100_diff1 value: 46.5288 - type: nauc_mrr_at_1000_max value: 47.308299999999996 - type: nauc_mrr_at_1000_std value: 11.2835 - type: nauc_mrr_at_1000_diff1 value: 46.5276 - type: main_score value: 73.324 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (yo) type: miracl/mmteb-miracl config: yo split: dev revision: main metrics: - type: ndcg_at_1 value: 49.58 - type: ndcg_at_3 value: 64.793 - type: ndcg_at_5 value: 66.709 - type: ndcg_at_10 value: 68.705 - type: ndcg_at_20 value: 69.8 - type: ndcg_at_100 value: 70.664 - type: ndcg_at_1000 value: 71.197 - type: map_at_1 value: 46.289 - type: map_at_3 value: 59.921 - type: map_at_5 value: 61.409000000000006 - type: map_at_10 value: 62.379 - type: map_at_20 value: 62.773 - type: map_at_100 value: 62.907000000000004 - type: map_at_1000 value: 62.922999999999995 - type: recall_at_1 value: 46.289 - type: recall_at_3 value: 75.07000000000001 - type: recall_at_5 value: 79.202 - type: recall_at_10 value: 85.154 - type: recall_at_20 value: 89.076 - type: recall_at_100 value: 93.557 - type: recall_at_1000 value: 97.479 - type: precision_at_1 value: 49.58 - type: precision_at_3 value: 28.571 - type: precision_at_5 value: 18.655 - type: precision_at_10 value: 10.084 - type: precision_at_20 value: 5.2940000000000005 - type: precision_at_100 value: 1.109 - type: precision_at_1000 value: 0.11800000000000001 - type: mrr_at_1 value: 49.5798 - type: mrr_at_3 value: 63.025200000000005 - type: mrr_at_5 value: 63.6134 - type: mrr_at_10 value: 64.2504 - type: mrr_at_20 value: 64.5152 - type: mrr_at_100 value: 64.6281 - type: mrr_at_1000 value: 64.63839999999999 - type: nauc_ndcg_at_1_max value: 18.5119 - type: nauc_ndcg_at_1_std value: -26.7799 - type: nauc_ndcg_at_1_diff1 value: 49.55 - type: nauc_ndcg_at_3_max value: 35.6833 - type: nauc_ndcg_at_3_std value: -19.023699999999998 - type: nauc_ndcg_at_3_diff1 value: 51.4553 - type: nauc_ndcg_at_5_max value: 34.252700000000004 - type: nauc_ndcg_at_5_std value: -16.9909 - type: nauc_ndcg_at_5_diff1 value: 50.034 - type: nauc_ndcg_at_10_max value: 35.115899999999996 - type: nauc_ndcg_at_10_std value: -15.454300000000002 - type: nauc_ndcg_at_10_diff1 value: 51.13419999999999 - type: nauc_ndcg_at_20_max value: 36.3127 - type: nauc_ndcg_at_20_std value: -13.5123 - type: nauc_ndcg_at_20_diff1 value: 52.505100000000006 - type: nauc_ndcg_at_100_max value: 35.0788 - type: nauc_ndcg_at_100_std value: -15.118 - type: nauc_ndcg_at_100_diff1 value: 52.2994 - type: nauc_ndcg_at_1000_max value: 34.1448 - type: nauc_ndcg_at_1000_std value: -15.695300000000001 - type: nauc_ndcg_at_1000_diff1 value: 51.7561 - type: nauc_map_at_1_max value: 17.9766 - type: nauc_map_at_1_std value: -26.0689 - type: nauc_map_at_1_diff1 value: 51.3004 - type: nauc_map_at_3_max value: 30.426 - type: nauc_map_at_3_std value: -21.5618 - type: nauc_map_at_3_diff1 value: 51.9665 - type: nauc_map_at_5_max value: 30.3093 - type: nauc_map_at_5_std value: -19.1582 - type: nauc_map_at_5_diff1 value: 50.9919 - type: nauc_map_at_10_max value: 31.1197 - type: nauc_map_at_10_std value: -18.5626 - type: nauc_map_at_10_diff1 value: 51.3278 - type: nauc_map_at_20_max value: 31.3984 - type: nauc_map_at_20_std value: -17.8214 - type: nauc_map_at_20_diff1 value: 51.5951 - type: nauc_map_at_100_max value: 31.1974 - type: nauc_map_at_100_std value: -18.0483 - type: nauc_map_at_100_diff1 value: 51.51559999999999 - type: nauc_map_at_1000_max value: 31.167699999999996 - type: nauc_map_at_1000_std value: -18.076800000000002 - type: nauc_map_at_1000_diff1 value: 51.50130000000001 - type: nauc_recall_at_1_max value: 17.9766 - type: nauc_recall_at_1_std value: -26.0689 - type: nauc_recall_at_1_diff1 value: 51.3004 - type: nauc_recall_at_3_max value: 48.720200000000006 - type: nauc_recall_at_3_std value: -12.1143 - type: nauc_recall_at_3_diff1 value: 49.863800000000005 - type: nauc_recall_at_5_max value: 48.1997 - type: nauc_recall_at_5_std value: -5.8457 - type: nauc_recall_at_5_diff1 value: 46.062599999999996 - type: nauc_recall_at_10_max value: 56.5698 - type: nauc_recall_at_10_std value: 6.0906 - type: nauc_recall_at_10_diff1 value: 51.9053 - type: nauc_recall_at_20_max value: 73.61569999999999 - type: nauc_recall_at_20_std value: 25.8535 - type: nauc_recall_at_20_diff1 value: 64.7516 - type: nauc_recall_at_100_max value: 78.054 - type: nauc_recall_at_100_std value: 23.7984 - type: nauc_recall_at_100_diff1 value: 71.61999999999999 - type: nauc_recall_at_1000_max value: 92.5519 - type: nauc_recall_at_1000_std value: 59.609100000000005 - type: nauc_recall_at_1000_diff1 value: 78.6415 - type: nauc_precision_at_1_max value: 18.5119 - type: nauc_precision_at_1_std value: -26.7799 - type: nauc_precision_at_1_diff1 value: 49.55 - type: nauc_precision_at_3_max value: 45.402100000000004 - type: nauc_precision_at_3_std value: -5.331 - type: nauc_precision_at_3_diff1 value: 20.6481 - type: nauc_precision_at_5_max value: 33.7262 - type: nauc_precision_at_5_std value: 10.3483 - type: nauc_precision_at_5_diff1 value: 5.9393 - type: nauc_precision_at_10_max value: 35.3715 - type: nauc_precision_at_10_std value: 17.0809 - type: nauc_precision_at_10_diff1 value: 0.9325 - type: nauc_precision_at_20_max value: 35.2666 - type: nauc_precision_at_20_std value: 26.3214 - type: nauc_precision_at_20_diff1 value: -1.8064 - type: nauc_precision_at_100_max value: 29.0385 - type: nauc_precision_at_100_std value: 23.416500000000003 - type: nauc_precision_at_100_diff1 value: -10.83 - type: nauc_precision_at_1000_max value: 13.825299999999999 - type: nauc_precision_at_1000_std value: 16.7663 - type: nauc_precision_at_1000_diff1 value: -24.854200000000002 - type: nauc_mrr_at_1_max value: 18.5119 - type: nauc_mrr_at_1_std value: -26.7799 - type: nauc_mrr_at_1_diff1 value: 49.55 - type: nauc_mrr_at_3_max value: 29.916500000000003 - type: nauc_mrr_at_3_std value: -21.5719 - type: nauc_mrr_at_3_diff1 value: 50.2057 - type: nauc_mrr_at_5_max value: 28.929 - type: nauc_mrr_at_5_std value: -21.9015 - type: nauc_mrr_at_5_diff1 value: 49.6675 - type: nauc_mrr_at_10_max value: 28.6377 - type: nauc_mrr_at_10_std value: -21.4266 - type: nauc_mrr_at_10_diff1 value: 50.034800000000004 - type: nauc_mrr_at_20_max value: 28.7905 - type: nauc_mrr_at_20_std value: -21.192 - type: nauc_mrr_at_20_diff1 value: 50.3745 - type: nauc_mrr_at_100_max value: 28.5717 - type: nauc_mrr_at_100_std value: -21.3735 - type: nauc_mrr_at_100_diff1 value: 50.3333 - type: nauc_mrr_at_1000_max value: 28.5655 - type: nauc_mrr_at_1000_std value: -21.373 - type: nauc_mrr_at_1000_diff1 value: 50.3215 - type: main_score value: 68.705 - task: type: Retrieval dataset: name: MTEB MIRACLRetrieval (zh) type: miracl/mmteb-miracl config: zh split: dev revision: main metrics: - type: ndcg_at_1 value: 47.583 - type: ndcg_at_3 value: 45.839 - type: ndcg_at_5 value: 48.126999999999995 - type: ndcg_at_10 value: 52.553000000000004 - type: ndcg_at_20 value: 55.66799999999999 - type: ndcg_at_100 value: 60.0 - type: ndcg_at_1000 value: 61.415 - type: map_at_1 value: 24.488 - type: map_at_3 value: 36.202 - type: map_at_5 value: 39.771 - type: map_at_10 value: 42.725 - type: map_at_20 value: 44.163999999999994 - type: map_at_100 value: 45.269 - type: map_at_1000 value: 45.372 - type: recall_at_1 value: 24.488 - type: recall_at_3 value: 42.827 - type: recall_at_5 value: 52.081 - type: recall_at_10 value: 63.659 - type: recall_at_20 value: 72.652 - type: recall_at_100 value: 89.702 - type: recall_at_1000 value: 97.99600000000001 - type: precision_at_1 value: 47.583 - type: precision_at_3 value: 30.789 - type: precision_at_5 value: 23.206 - type: precision_at_10 value: 14.885000000000002 - type: precision_at_20 value: 8.803999999999998 - type: precision_at_100 value: 2.237 - type: precision_at_1000 value: 0.247 - type: mrr_at_1 value: 47.5827 - type: mrr_at_3 value: 56.4461 - type: mrr_at_5 value: 58.036500000000004 - type: mrr_at_10 value: 59.2419 - type: mrr_at_20 value: 59.5684 - type: mrr_at_100 value: 59.8496 - type: mrr_at_1000 value: 59.868500000000004 - type: nauc_ndcg_at_1_max value: 30.3153 - type: nauc_ndcg_at_1_std value: 16.1917 - type: nauc_ndcg_at_1_diff1 value: 33.1291 - type: nauc_ndcg_at_3_max value: 29.9473 - type: nauc_ndcg_at_3_std value: 9.9602 - type: nauc_ndcg_at_3_diff1 value: 26.354899999999997 - type: nauc_ndcg_at_5_max value: 27.5364 - type: nauc_ndcg_at_5_std value: 9.0106 - type: nauc_ndcg_at_5_diff1 value: 26.4299 - type: nauc_ndcg_at_10_max value: 30.1141 - type: nauc_ndcg_at_10_std value: 10.6319 - type: nauc_ndcg_at_10_diff1 value: 26.1015 - type: nauc_ndcg_at_20_max value: 31.864700000000003 - type: nauc_ndcg_at_20_std value: 14.376 - type: nauc_ndcg_at_20_diff1 value: 24.278 - type: nauc_ndcg_at_100_max value: 33.8328 - type: nauc_ndcg_at_100_std value: 17.1646 - type: nauc_ndcg_at_100_diff1 value: 24.7582 - type: nauc_ndcg_at_1000_max value: 33.0653 - type: nauc_ndcg_at_1000_std value: 15.717400000000001 - type: nauc_ndcg_at_1000_diff1 value: 25.708399999999997 - type: nauc_map_at_1_max value: 14.5636 - type: nauc_map_at_1_std value: -0.5065 - type: nauc_map_at_1_diff1 value: 37.5816 - type: nauc_map_at_3_max value: 21.752 - type: nauc_map_at_3_std value: 0.2942 - type: nauc_map_at_3_diff1 value: 29.662100000000002 - type: nauc_map_at_5_max value: 23.3994 - type: nauc_map_at_5_std value: 3.2369000000000003 - type: nauc_map_at_5_diff1 value: 28.479 - type: nauc_map_at_10_max value: 26.969500000000004 - type: nauc_map_at_10_std value: 6.4338999999999995 - type: nauc_map_at_10_diff1 value: 27.548000000000002 - type: nauc_map_at_20_max value: 28.2804 - type: nauc_map_at_20_std value: 8.3557 - type: nauc_map_at_20_diff1 value: 26.561600000000002 - type: nauc_map_at_100_max value: 28.979899999999997 - type: nauc_map_at_100_std value: 9.3446 - type: nauc_map_at_100_diff1 value: 26.539099999999998 - type: nauc_map_at_1000_max value: 28.9572 - type: nauc_map_at_1000_std value: 9.3017 - type: nauc_map_at_1000_diff1 value: 26.6029 - type: nauc_recall_at_1_max value: 14.5636 - type: nauc_recall_at_1_std value: -0.5065 - type: nauc_recall_at_1_diff1 value: 37.5816 - type: nauc_recall_at_3_max value: 19.8958 - type: nauc_recall_at_3_std value: -1.7080000000000002 - type: nauc_recall_at_3_diff1 value: 24.4885 - type: nauc_recall_at_5_max value: 18.8426 - type: nauc_recall_at_5_std value: 3.5769 - type: nauc_recall_at_5_diff1 value: 21.253700000000002 - type: nauc_recall_at_10_max value: 25.061299999999996 - type: nauc_recall_at_10_std value: 7.1753 - type: nauc_recall_at_10_diff1 value: 18.7378 - type: nauc_recall_at_20_max value: 28.6096 - type: nauc_recall_at_20_std value: 18.5789 - type: nauc_recall_at_20_diff1 value: 11.686 - type: nauc_recall_at_100_max value: 45.903 - type: nauc_recall_at_100_std value: 46.9916 - type: nauc_recall_at_100_diff1 value: 9.813600000000001 - type: nauc_recall_at_1000_max value: 62.512699999999995 - type: nauc_recall_at_1000_std value: 67.9442 - type: nauc_recall_at_1000_diff1 value: 34.3912 - type: nauc_precision_at_1_max value: 30.3153 - type: nauc_precision_at_1_std value: 16.1917 - type: nauc_precision_at_1_diff1 value: 33.1291 - type: nauc_precision_at_3_max value: 35.6697 - type: nauc_precision_at_3_std value: 18.0247 - type: nauc_precision_at_3_diff1 value: 7.0163 - type: nauc_precision_at_5_max value: 34.0555 - type: nauc_precision_at_5_std value: 23.5324 - type: nauc_precision_at_5_diff1 value: 0.44270000000000004 - type: nauc_precision_at_10_max value: 37.8515 - type: nauc_precision_at_10_std value: 31.657000000000004 - type: nauc_precision_at_10_diff1 value: -5.2642 - type: nauc_precision_at_20_max value: 36.025 - type: nauc_precision_at_20_std value: 35.236000000000004 - type: nauc_precision_at_20_diff1 value: -10.6916 - type: nauc_precision_at_100_max value: 29.678900000000002 - type: nauc_precision_at_100_std value: 35.2162 - type: nauc_precision_at_100_diff1 value: -13.7845 - type: nauc_precision_at_1000_max value: 22.2855 - type: nauc_precision_at_1000_std value: 27.221600000000002 - type: nauc_precision_at_1000_diff1 value: -13.4482 - type: nauc_mrr_at_1_max value: 30.3153 - type: nauc_mrr_at_1_std value: 16.1917 - type: nauc_mrr_at_1_diff1 value: 33.1291 - type: nauc_mrr_at_3_max value: 33.2966 - type: nauc_mrr_at_3_std value: 16.9755 - type: nauc_mrr_at_3_diff1 value: 29.814 - type: nauc_mrr_at_5_max value: 32.920300000000005 - type: nauc_mrr_at_5_std value: 17.832600000000003 - type: nauc_mrr_at_5_diff1 value: 29.683300000000003 - type: nauc_mrr_at_10_max value: 32.9394 - type: nauc_mrr_at_10_std value: 17.5036 - type: nauc_mrr_at_10_diff1 value: 29.6425 - type: nauc_mrr_at_20_max value: 32.852599999999995 - type: nauc_mrr_at_20_std value: 17.8307 - type: nauc_mrr_at_20_diff1 value: 29.4502 - type: nauc_mrr_at_100_max value: 32.9242 - type: nauc_mrr_at_100_std value: 17.7699 - type: nauc_mrr_at_100_diff1 value: 29.504399999999997 - type: nauc_mrr_at_1000_max value: 32.9303 - type: nauc_mrr_at_1000_std value: 17.7636 - type: nauc_mrr_at_1000_diff1 value: 29.526799999999998 - type: main_score value: 52.553000000000004 - task: type: Retrieval dataset: name: MTEB MSMARCO (default) type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: ndcg_at_1 value: 14.155000000000001 - type: ndcg_at_3 value: 22.499 - type: ndcg_at_5 value: 26.233 - type: ndcg_at_10 value: 29.866999999999997 - type: ndcg_at_20 value: 32.616 - type: ndcg_at_100 value: 36.301 - type: ndcg_at_1000 value: 38.318999999999996 - type: map_at_1 value: 13.793 - type: map_at_3 value: 20.237 - type: map_at_5 value: 22.32 - type: map_at_10 value: 23.829 - type: map_at_20 value: 24.596999999999998 - type: map_at_100 value: 25.117 - type: map_at_1000 value: 25.194 - type: recall_at_1 value: 13.793 - type: recall_at_3 value: 28.592000000000002 - type: recall_at_5 value: 37.556 - type: recall_at_10 value: 48.669000000000004 - type: recall_at_20 value: 59.379000000000005 - type: recall_at_100 value: 78.927 - type: recall_at_1000 value: 94.568 - type: precision_at_1 value: 14.155000000000001 - type: precision_at_3 value: 9.828000000000001 - type: precision_at_5 value: 7.785 - type: precision_at_10 value: 5.06 - type: precision_at_20 value: 3.097 - type: precision_at_100 value: 0.83 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 14.1547 - type: mrr_at_3 value: 20.7139 - type: mrr_at_5 value: 22.8028 - type: mrr_at_10 value: 24.3047 - type: mrr_at_20 value: 25.0548 - type: mrr_at_100 value: 25.552000000000003 - type: mrr_at_1000 value: 25.622 - type: nauc_ndcg_at_1_max value: 1.4238 - type: nauc_ndcg_at_1_std value: -13.091800000000001 - type: nauc_ndcg_at_1_diff1 value: 29.1051 - type: nauc_ndcg_at_3_max value: 2.6131 - type: nauc_ndcg_at_3_std value: -14.6122 - type: nauc_ndcg_at_3_diff1 value: 24.0988 - type: nauc_ndcg_at_5_max value: 2.3456 - type: nauc_ndcg_at_5_std value: -15.092500000000001 - type: nauc_ndcg_at_5_diff1 value: 23.5516 - type: nauc_ndcg_at_10_max value: 2.8182 - type: nauc_ndcg_at_10_std value: -14.623700000000001 - type: nauc_ndcg_at_10_diff1 value: 23.1711 - type: nauc_ndcg_at_20_max value: 3.5518 - type: nauc_ndcg_at_20_std value: -12.931500000000002 - type: nauc_ndcg_at_20_diff1 value: 23.1818 - type: nauc_ndcg_at_100_max value: 4.7755 - type: nauc_ndcg_at_100_std value: -9.851899999999999 - type: nauc_ndcg_at_100_diff1 value: 23.340700000000002 - type: nauc_ndcg_at_1000_max value: 4.5916 - type: nauc_ndcg_at_1000_std value: -10.4923 - type: nauc_ndcg_at_1000_diff1 value: 23.5174 - type: nauc_map_at_1_max value: 1.4764 - type: nauc_map_at_1_std value: -13.2414 - type: nauc_map_at_1_diff1 value: 29.1169 - type: nauc_map_at_3_max value: 2.3523 - type: nauc_map_at_3_std value: -14.453 - type: nauc_map_at_3_diff1 value: 25.0786 - type: nauc_map_at_5_max value: 2.1924 - type: nauc_map_at_5_std value: -14.7681 - type: nauc_map_at_5_diff1 value: 24.7695 - type: nauc_map_at_10_max value: 2.3542 - type: nauc_map_at_10_std value: -14.6287 - type: nauc_map_at_10_diff1 value: 24.6169 - type: nauc_map_at_20_max value: 2.5815 - type: nauc_map_at_20_std value: -14.141699999999998 - type: nauc_map_at_20_diff1 value: 24.6406 - type: nauc_map_at_100_max value: 2.7435 - type: nauc_map_at_100_std value: -13.7208 - type: nauc_map_at_100_diff1 value: 24.6504 - type: nauc_map_at_1000_max value: 2.7392 - type: nauc_map_at_1000_std value: -13.7302 - type: nauc_map_at_1000_diff1 value: 24.654300000000003 - type: nauc_recall_at_1_max value: 1.4764 - type: nauc_recall_at_1_std value: -13.2414 - type: nauc_recall_at_1_diff1 value: 29.1169 - type: nauc_recall_at_3_max value: 3.2174 - type: nauc_recall_at_3_std value: -15.143300000000002 - type: nauc_recall_at_3_diff1 value: 21.593899999999998 - type: nauc_recall_at_5_max value: 2.6845 - type: nauc_recall_at_5_std value: -15.9795 - type: nauc_recall_at_5_diff1 value: 20.567 - type: nauc_recall_at_10_max value: 3.913 - type: nauc_recall_at_10_std value: -14.566899999999999 - type: nauc_recall_at_10_diff1 value: 19.4393 - type: nauc_recall_at_20_max value: 6.5038 - type: nauc_recall_at_20_std value: -8.572799999999999 - type: nauc_recall_at_20_diff1 value: 19.0899 - type: nauc_recall_at_100_max value: 16.7968 - type: nauc_recall_at_100_std value: 15.837200000000001 - type: nauc_recall_at_100_diff1 value: 18.3296 - type: nauc_recall_at_1000_max value: 39.6225 - type: nauc_recall_at_1000_std value: 53.9736 - type: nauc_recall_at_1000_diff1 value: 12.565499999999998 - type: nauc_precision_at_1_max value: 1.4238 - type: nauc_precision_at_1_std value: -13.091800000000001 - type: nauc_precision_at_1_diff1 value: 29.1051 - type: nauc_precision_at_3_max value: 3.3477 - type: nauc_precision_at_3_std value: -14.8784 - type: nauc_precision_at_3_diff1 value: 21.8029 - type: nauc_precision_at_5_max value: 2.8493 - type: nauc_precision_at_5_std value: -15.767000000000001 - type: nauc_precision_at_5_diff1 value: 20.5677 - type: nauc_precision_at_10_max value: 4.2772 - type: nauc_precision_at_10_std value: -14.0627 - type: nauc_precision_at_10_diff1 value: 19.1205 - type: nauc_precision_at_20_max value: 7.135800000000001 - type: nauc_precision_at_20_std value: -7.5076 - type: nauc_precision_at_20_diff1 value: 18.0149 - type: nauc_precision_at_100_max value: 16.791 - type: nauc_precision_at_100_std value: 16.2346 - type: nauc_precision_at_100_diff1 value: 13.9316 - type: nauc_precision_at_1000_max value: 20.7529 - type: nauc_precision_at_1000_std value: 27.4859 - type: nauc_precision_at_1000_diff1 value: 3.9303 - type: nauc_mrr_at_1_max value: 1.4238 - type: nauc_mrr_at_1_std value: -13.091800000000001 - type: nauc_mrr_at_1_diff1 value: 29.1051 - type: nauc_mrr_at_3_max value: 2.3397 - type: nauc_mrr_at_3_std value: -14.1544 - type: nauc_mrr_at_3_diff1 value: 25.208799999999997 - type: nauc_mrr_at_5_max value: 2.1534 - type: nauc_mrr_at_5_std value: -14.4094 - type: nauc_mrr_at_5_diff1 value: 24.8258 - type: nauc_mrr_at_10_max value: 2.4274 - type: nauc_mrr_at_10_std value: -14.2121 - type: nauc_mrr_at_10_diff1 value: 24.6847 - type: nauc_mrr_at_20_max value: 2.6235999999999997 - type: nauc_mrr_at_20_std value: -13.736400000000001 - type: nauc_mrr_at_20_diff1 value: 24.6859 - type: nauc_mrr_at_100_max value: 2.7653 - type: nauc_mrr_at_100_std value: -13.358600000000001 - type: nauc_mrr_at_100_diff1 value: 24.7238 - type: nauc_mrr_at_1000_max value: 2.7588999999999997 - type: nauc_mrr_at_1000_std value: -13.373199999999999 - type: nauc_mrr_at_1000_diff1 value: 24.7274 - type: main_score value: 29.866999999999997 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 89.89970000000001 - type: f1 value: 89.6705 - type: f1_weighted value: 89.8682 - type: main_score value: 89.89970000000001 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 60.26899999999999 - type: f1 value: 40.8003 - type: f1_weighted value: 63.033899999999996 - type: main_score value: 60.26899999999999 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 4672e20407010da34463acc759c162ca9734bca6 metrics: - type: accuracy value: 63.9509 - type: f1 value: 60.7828 - type: f1_weighted value: 62.8 - type: main_score value: 63.9509 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 metrics: - type: accuracy value: 70.928 - type: f1 value: 69.4755 - type: f1_weighted value: 70.6366 - type: main_score value: 70.928 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P (default) type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 31.522 - type: v_measure_std value: 1.5528 - type: main_score value: 31.522 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S (default) type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.572599999999998 - type: v_measure_std value: 1.8154 - type: main_score value: 28.572599999999998 - task: type: Reranking dataset: name: MTEB MindSmallReranking (default) type: mteb/mind_small config: default split: test revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7 metrics: - type: map value: 30.5381 - type: mrr value: 31.574099999999998 - type: nAUC_map_max value: -19.592000000000002 - type: nAUC_map_std value: -3.0272 - type: nAUC_map_diff1 value: 14.0537 - type: nAUC_mrr_max value: -13.974900000000002 - type: nAUC_mrr_std value: -0.8847 - type: nAUC_mrr_diff1 value: 13.2721 - type: main_score value: 30.5381 - task: type: Retrieval dataset: name: MTEB NFCorpus (default) type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: ndcg_at_1 value: 38.080000000000005 - type: ndcg_at_3 value: 34.405 - type: ndcg_at_5 value: 32.019999999999996 - type: ndcg_at_10 value: 28.903000000000002 - type: ndcg_at_20 value: 26.693 - type: ndcg_at_100 value: 26.662999999999997 - type: ndcg_at_1000 value: 35.698 - type: map_at_1 value: 4.423 - type: map_at_3 value: 7.733 - type: map_at_5 value: 9.006 - type: map_at_10 value: 10.366 - type: map_at_20 value: 11.333 - type: map_at_100 value: 12.811 - type: map_at_1000 value: 14.066 - type: recall_at_1 value: 4.423 - type: recall_at_3 value: 8.908000000000001 - type: recall_at_5 value: 11.179 - type: recall_at_10 value: 14.280999999999999 - type: recall_at_20 value: 17.192 - type: recall_at_100 value: 27.685 - type: recall_at_1000 value: 59.108000000000004 - type: precision_at_1 value: 40.248 - type: precision_at_3 value: 33.127 - type: precision_at_5 value: 27.864 - type: precision_at_10 value: 21.053 - type: precision_at_20 value: 15.356 - type: precision_at_100 value: 6.709 - type: precision_at_1000 value: 1.9529999999999998 - type: mrr_at_1 value: 40.247699999999995 - type: mrr_at_3 value: 47.7812 - type: mrr_at_5 value: 48.8958 - type: mrr_at_10 value: 49.4034 - type: mrr_at_20 value: 49.8468 - type: mrr_at_100 value: 50.104800000000004 - type: mrr_at_1000 value: 50.1703 - type: nauc_ndcg_at_1_max value: 34.5735 - type: nauc_ndcg_at_1_std value: 15.1084 - type: nauc_ndcg_at_1_diff1 value: 37.779 - type: nauc_ndcg_at_3_max value: 38.8071 - type: nauc_ndcg_at_3_std value: 24.7697 - type: nauc_ndcg_at_3_diff1 value: 29.5807 - type: nauc_ndcg_at_5_max value: 39.128800000000005 - type: nauc_ndcg_at_5_std value: 26.398 - type: nauc_ndcg_at_5_diff1 value: 30.3835 - type: nauc_ndcg_at_10_max value: 37.7665 - type: nauc_ndcg_at_10_std value: 27.5455 - type: nauc_ndcg_at_10_diff1 value: 30.1575 - type: nauc_ndcg_at_20_max value: 36.3537 - type: nauc_ndcg_at_20_std value: 28.4047 - type: nauc_ndcg_at_20_diff1 value: 27.9553 - type: nauc_ndcg_at_100_max value: 39.0086 - type: nauc_ndcg_at_100_std value: 28.4221 - type: nauc_ndcg_at_100_diff1 value: 27.833799999999997 - type: nauc_ndcg_at_1000_max value: 44.7295 - type: nauc_ndcg_at_1000_std value: 35.369 - type: nauc_ndcg_at_1000_diff1 value: 29.4449 - type: nauc_map_at_1_max value: 12.645100000000001 - type: nauc_map_at_1_std value: -13.536999999999999 - type: nauc_map_at_1_diff1 value: 45.0881 - type: nauc_map_at_3_max value: 14.6862 - type: nauc_map_at_3_std value: -6.6259 - type: nauc_map_at_3_diff1 value: 34.2575 - type: nauc_map_at_5_max value: 18.6559 - type: nauc_map_at_5_std value: -2.8853 - type: nauc_map_at_5_diff1 value: 32.9187 - type: nauc_map_at_10_max value: 22.1906 - type: nauc_map_at_10_std value: 1.8654 - type: nauc_map_at_10_diff1 value: 31.3784 - type: nauc_map_at_20_max value: 24.696199999999997 - type: nauc_map_at_20_std value: 6.1949 - type: nauc_map_at_20_diff1 value: 30.9956 - type: nauc_map_at_100_max value: 27.2011 - type: nauc_map_at_100_std value: 12.3619 - type: nauc_map_at_100_diff1 value: 30.811500000000002 - type: nauc_map_at_1000_max value: 27.6972 - type: nauc_map_at_1000_std value: 15.845999999999998 - type: nauc_map_at_1000_diff1 value: 30.5315 - type: nauc_recall_at_1_max value: 12.645100000000001 - type: nauc_recall_at_1_std value: -13.536999999999999 - type: nauc_recall_at_1_diff1 value: 45.0881 - type: nauc_recall_at_3_max value: 14.2305 - type: nauc_recall_at_3_std value: -2.4143000000000003 - type: nauc_recall_at_3_diff1 value: 27.1661 - type: nauc_recall_at_5_max value: 20.62 - type: nauc_recall_at_5_std value: 3.1332 - type: nauc_recall_at_5_diff1 value: 26.7813 - type: nauc_recall_at_10_max value: 22.0278 - type: nauc_recall_at_10_std value: 4.587 - type: nauc_recall_at_10_diff1 value: 22.0275 - type: nauc_recall_at_20_max value: 23.4161 - type: nauc_recall_at_20_std value: 8.2901 - type: nauc_recall_at_20_diff1 value: 20.9799 - type: nauc_recall_at_100_max value: 24.5345 - type: nauc_recall_at_100_std value: 17.1618 - type: nauc_recall_at_100_diff1 value: 15.586500000000001 - type: nauc_recall_at_1000_max value: 22.3168 - type: nauc_recall_at_1000_std value: 22.6961 - type: nauc_recall_at_1000_diff1 value: 9.9602 - type: nauc_precision_at_1_max value: 36.549 - type: nauc_precision_at_1_std value: 16.6789 - type: nauc_precision_at_1_diff1 value: 35.6095 - type: nauc_precision_at_3_max value: 42.6539 - type: nauc_precision_at_3_std value: 33.0974 - type: nauc_precision_at_3_diff1 value: 21.9208 - type: nauc_precision_at_5_max value: 41.787800000000004 - type: nauc_precision_at_5_std value: 35.2286 - type: nauc_precision_at_5_diff1 value: 21.104899999999997 - type: nauc_precision_at_10_max value: 37.7473 - type: nauc_precision_at_10_std value: 39.887 - type: nauc_precision_at_10_diff1 value: 18.9082 - type: nauc_precision_at_20_max value: 32.0874 - type: nauc_precision_at_20_std value: 44.798100000000005 - type: nauc_precision_at_20_diff1 value: 12.953000000000001 - type: nauc_precision_at_100_max value: 19.108900000000002 - type: nauc_precision_at_100_std value: 44.49 - type: nauc_precision_at_100_diff1 value: 6.4374 - type: nauc_precision_at_1000_max value: 2.5292 - type: nauc_precision_at_1000_std value: 30.523400000000002 - type: nauc_precision_at_1000_diff1 value: -0.6787 - type: nauc_mrr_at_1_max value: 36.549 - type: nauc_mrr_at_1_std value: 16.6789 - type: nauc_mrr_at_1_diff1 value: 35.6095 - type: nauc_mrr_at_3_max value: 43.425599999999996 - type: nauc_mrr_at_3_std value: 28.8242 - type: nauc_mrr_at_3_diff1 value: 33.4411 - type: nauc_mrr_at_5_max value: 44.5717 - type: nauc_mrr_at_5_std value: 29.5765 - type: nauc_mrr_at_5_diff1 value: 34.463899999999995 - type: nauc_mrr_at_10_max value: 44.6062 - type: nauc_mrr_at_10_std value: 29.5773 - type: nauc_mrr_at_10_diff1 value: 34.5158 - type: nauc_mrr_at_20_max value: 44.6961 - type: nauc_mrr_at_20_std value: 29.5126 - type: nauc_mrr_at_20_diff1 value: 34.2436 - type: nauc_mrr_at_100_max value: 44.8207 - type: nauc_mrr_at_100_std value: 29.649700000000003 - type: nauc_mrr_at_100_diff1 value: 34.3576 - type: nauc_mrr_at_1000_max value: 44.7763 - type: nauc_mrr_at_1000_std value: 29.6044 - type: nauc_mrr_at_1000_diff1 value: 34.3718 - type: main_score value: 28.903000000000002 - task: type: Retrieval dataset: name: MTEB NQ (default) type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: ndcg_at_1 value: 34.589 - type: ndcg_at_3 value: 45.289 - type: ndcg_at_5 value: 49.919000000000004 - type: ndcg_at_10 value: 53.410000000000004 - type: ndcg_at_20 value: 55.786 - type: ndcg_at_100 value: 57.75599999999999 - type: ndcg_at_1000 value: 58.51499999999999 - type: map_at_1 value: 30.503999999999998 - type: map_at_3 value: 41.396 - type: map_at_5 value: 44.216 - type: map_at_10 value: 45.802 - type: map_at_20 value: 46.542 - type: map_at_100 value: 46.867999999999995 - type: map_at_1000 value: 46.903 - type: recall_at_1 value: 30.503999999999998 - type: recall_at_3 value: 53.244 - type: recall_at_5 value: 63.912 - type: recall_at_10 value: 74.06099999999999 - type: recall_at_20 value: 82.819 - type: recall_at_100 value: 92.51599999999999 - type: recall_at_1000 value: 98.156 - type: precision_at_1 value: 34.589 - type: precision_at_3 value: 20.693 - type: precision_at_5 value: 15.058 - type: precision_at_10 value: 8.818 - type: precision_at_20 value: 4.9799999999999995 - type: precision_at_100 value: 1.125 - type: precision_at_1000 value: 0.11900000000000001 - type: mrr_at_1 value: 34.617599999999996 - type: mrr_at_3 value: 44.7277 - type: mrr_at_5 value: 47.0408 - type: mrr_at_10 value: 48.335499999999996 - type: mrr_at_20 value: 48.8925 - type: mrr_at_100 value: 49.1307 - type: mrr_at_1000 value: 49.154199999999996 - type: nauc_ndcg_at_1_max value: 23.8893 - type: nauc_ndcg_at_1_std value: -3.0092 - type: nauc_ndcg_at_1_diff1 value: 36.789899999999996 - type: nauc_ndcg_at_3_max value: 26.161800000000003 - type: nauc_ndcg_at_3_std value: -3.6557 - type: nauc_ndcg_at_3_diff1 value: 31.381500000000003 - type: nauc_ndcg_at_5_max value: 28.4273 - type: nauc_ndcg_at_5_std value: -2.6271 - type: nauc_ndcg_at_5_diff1 value: 30.960700000000003 - type: nauc_ndcg_at_10_max value: 29.1744 - type: nauc_ndcg_at_10_std value: -0.9882 - type: nauc_ndcg_at_10_diff1 value: 30.9664 - type: nauc_ndcg_at_20_max value: 30.1188 - type: nauc_ndcg_at_20_std value: 0.6556000000000001 - type: nauc_ndcg_at_20_diff1 value: 30.8734 - type: nauc_ndcg_at_100_max value: 29.822 - type: nauc_ndcg_at_100_std value: 1.1388 - type: nauc_ndcg_at_100_diff1 value: 31.348300000000002 - type: nauc_ndcg_at_1000_max value: 29.1591 - type: nauc_ndcg_at_1000_std value: 0.22569999999999998 - type: nauc_ndcg_at_1000_diff1 value: 31.7286 - type: nauc_map_at_1_max value: 22.2587 - type: nauc_map_at_1_std value: -4.6109 - type: nauc_map_at_1_diff1 value: 37.0942 - type: nauc_map_at_3_max value: 25.3764 - type: nauc_map_at_3_std value: -4.1876 - type: nauc_map_at_3_diff1 value: 32.752700000000004 - type: nauc_map_at_5_max value: 26.6367 - type: nauc_map_at_5_std value: -3.6224 - type: nauc_map_at_5_diff1 value: 32.4957 - type: nauc_map_at_10_max value: 27.0304 - type: nauc_map_at_10_std value: -2.852 - type: nauc_map_at_10_diff1 value: 32.548899999999996 - type: nauc_map_at_20_max value: 27.2991 - type: nauc_map_at_20_std value: -2.3765 - type: nauc_map_at_20_diff1 value: 32.5216 - type: nauc_map_at_100_max value: 27.2665 - type: nauc_map_at_100_std value: -2.2849999999999997 - type: nauc_map_at_100_diff1 value: 32.5791 - type: nauc_map_at_1000_max value: 27.243499999999997 - type: nauc_map_at_1000_std value: -2.3154999999999997 - type: nauc_map_at_1000_diff1 value: 32.5925 - type: nauc_recall_at_1_max value: 22.2587 - type: nauc_recall_at_1_std value: -4.6109 - type: nauc_recall_at_1_diff1 value: 37.0942 - type: nauc_recall_at_3_max value: 27.0818 - type: nauc_recall_at_3_std value: -3.5904 - type: nauc_recall_at_3_diff1 value: 26.6279 - type: nauc_recall_at_5_max value: 32.6179 - type: nauc_recall_at_5_std value: -1.2186000000000001 - type: nauc_recall_at_5_diff1 value: 24.7151 - type: nauc_recall_at_10_max value: 36.105599999999995 - type: nauc_recall_at_10_std value: 4.5315 - type: nauc_recall_at_10_diff1 value: 23.4044 - type: nauc_recall_at_20_max value: 45.2605 - type: nauc_recall_at_20_std value: 17.092299999999998 - type: nauc_recall_at_20_diff1 value: 20.5304 - type: nauc_recall_at_100_max value: 57.85829999999999 - type: nauc_recall_at_100_std value: 42.517500000000005 - type: nauc_recall_at_100_diff1 value: 19.6591 - type: nauc_recall_at_1000_max value: 75.3601 - type: nauc_recall_at_1000_std value: 69.4265 - type: nauc_recall_at_1000_diff1 value: 29.8635 - type: nauc_precision_at_1_max value: 23.8893 - type: nauc_precision_at_1_std value: -3.0092 - type: nauc_precision_at_1_diff1 value: 36.789899999999996 - type: nauc_precision_at_3_max value: 27.1749 - type: nauc_precision_at_3_std value: -0.9776 - type: nauc_precision_at_3_diff1 value: 22.9551 - type: nauc_precision_at_5_max value: 28.6992 - type: nauc_precision_at_5_std value: 2.1732 - type: nauc_precision_at_5_diff1 value: 17.6422 - type: nauc_precision_at_10_max value: 27.2755 - type: nauc_precision_at_10_std value: 8.4934 - type: nauc_precision_at_10_diff1 value: 12.1581 - type: nauc_precision_at_20_max value: 26.858900000000002 - type: nauc_precision_at_20_std value: 15.7942 - type: nauc_precision_at_20_diff1 value: 5.8980999999999995 - type: nauc_precision_at_100_max value: 18.8392 - type: nauc_precision_at_100_std value: 19.7054 - type: nauc_precision_at_100_diff1 value: -0.8163 - type: nauc_precision_at_1000_max value: 9.8054 - type: nauc_precision_at_1000_std value: 14.4735 - type: nauc_precision_at_1000_diff1 value: -4.7447 - type: nauc_mrr_at_1_max value: 23.8759 - type: nauc_mrr_at_1_std value: -3.0908 - type: nauc_mrr_at_1_diff1 value: 36.7027 - type: nauc_mrr_at_3_max value: 25.9165 - type: nauc_mrr_at_3_std value: -2.3997 - type: nauc_mrr_at_3_diff1 value: 32.5473 - type: nauc_mrr_at_5_max value: 27.1119 - type: nauc_mrr_at_5_std value: -1.8426999999999998 - type: nauc_mrr_at_5_diff1 value: 32.4999 - type: nauc_mrr_at_10_max value: 27.2217 - type: nauc_mrr_at_10_std value: -1.3365 - type: nauc_mrr_at_10_diff1 value: 32.5293 - type: nauc_mrr_at_20_max value: 27.3157 - type: nauc_mrr_at_20_std value: -1.1132 - type: nauc_mrr_at_20_diff1 value: 32.554300000000005 - type: nauc_mrr_at_100_max value: 27.2621 - type: nauc_mrr_at_100_std value: -1.0897000000000001 - type: nauc_mrr_at_100_diff1 value: 32.6073 - type: nauc_mrr_at_1000_max value: 27.2409 - type: nauc_mrr_at_1000_std value: -1.1176 - type: nauc_mrr_at_1000_diff1 value: 32.6192 - type: main_score value: 53.410000000000004 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval (default) type: mteb/quora config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: ndcg_at_1 value: 79.64 - type: ndcg_at_3 value: 83.67599999999999 - type: ndcg_at_5 value: 85.52 - type: ndcg_at_10 value: 86.871 - type: ndcg_at_20 value: 87.59 - type: ndcg_at_100 value: 88.211 - type: ndcg_at_1000 value: 88.36 - type: map_at_1 value: 69.133 - type: map_at_3 value: 79.776 - type: map_at_5 value: 81.747 - type: map_at_10 value: 82.852 - type: map_at_20 value: 83.282 - type: map_at_100 value: 83.5 - type: map_at_1000 value: 83.519 - type: recall_at_1 value: 69.133 - type: recall_at_3 value: 85.526 - type: recall_at_5 value: 90.596 - type: recall_at_10 value: 94.613 - type: recall_at_20 value: 96.92699999999999 - type: recall_at_100 value: 99.24300000000001 - type: recall_at_1000 value: 99.96000000000001 - type: precision_at_1 value: 79.64 - type: precision_at_3 value: 36.516999999999996 - type: precision_at_5 value: 24.194 - type: precision_at_10 value: 13.203000000000001 - type: precision_at_20 value: 7.02 - type: precision_at_100 value: 1.514 - type: precision_at_1000 value: 0.156 - type: mrr_at_1 value: 79.60000000000001 - type: mrr_at_3 value: 84.955 - type: mrr_at_5 value: 85.74000000000001 - type: mrr_at_10 value: 86.0913 - type: mrr_at_20 value: 86.1768 - type: mrr_at_100 value: 86.2076 - type: mrr_at_1000 value: 86.2092 - type: nauc_ndcg_at_1_max value: 39.4509 - type: nauc_ndcg_at_1_std value: -30.6309 - type: nauc_ndcg_at_1_diff1 value: 76.5171 - type: nauc_ndcg_at_3_max value: 37.9586 - type: nauc_ndcg_at_3_std value: -35.8174 - type: nauc_ndcg_at_3_diff1 value: 74.5992 - type: nauc_ndcg_at_5_max value: 38.541799999999995 - type: nauc_ndcg_at_5_std value: -36.456300000000006 - type: nauc_ndcg_at_5_diff1 value: 75.0506 - type: nauc_ndcg_at_10_max value: 38.996199999999995 - type: nauc_ndcg_at_10_std value: -35.6649 - type: nauc_ndcg_at_10_diff1 value: 75.3601 - type: nauc_ndcg_at_20_max value: 39.1758 - type: nauc_ndcg_at_20_std value: -34.7636 - type: nauc_ndcg_at_20_diff1 value: 75.3846 - type: nauc_ndcg_at_100_max value: 39.6116 - type: nauc_ndcg_at_100_std value: -33.2361 - type: nauc_ndcg_at_100_diff1 value: 75.31 - type: nauc_ndcg_at_1000_max value: 39.6171 - type: nauc_ndcg_at_1000_std value: -33.1588 - type: nauc_ndcg_at_1000_diff1 value: 75.2929 - type: nauc_map_at_1_max value: 28.8061 - type: nauc_map_at_1_std value: -33.7016 - type: nauc_map_at_1_diff1 value: 78.7612 - type: nauc_map_at_3_max value: 35.2541 - type: nauc_map_at_3_std value: -37.741400000000006 - type: nauc_map_at_3_diff1 value: 75.8173 - type: nauc_map_at_5_max value: 36.822500000000005 - type: nauc_map_at_5_std value: -37.710300000000004 - type: nauc_map_at_5_diff1 value: 75.7355 - type: nauc_map_at_10_max value: 37.5769 - type: nauc_map_at_10_std value: -36.5907 - type: nauc_map_at_10_diff1 value: 75.60040000000001 - type: nauc_map_at_20_max value: 37.8409 - type: nauc_map_at_20_std value: -35.7977 - type: nauc_map_at_20_diff1 value: 75.4885 - type: nauc_map_at_100_max value: 38.0097 - type: nauc_map_at_100_std value: -35.1815 - type: nauc_map_at_100_diff1 value: 75.4349 - type: nauc_map_at_1000_max value: 38.0191 - type: nauc_map_at_1000_std value: -35.1434 - type: nauc_map_at_1000_diff1 value: 75.4325 - type: nauc_recall_at_1_max value: 28.8061 - type: nauc_recall_at_1_std value: -33.7016 - type: nauc_recall_at_1_diff1 value: 78.7612 - type: nauc_recall_at_3_max value: 32.889 - type: nauc_recall_at_3_std value: -41.323100000000004 - type: nauc_recall_at_3_diff1 value: 71.73570000000001 - type: nauc_recall_at_5_max value: 34.6917 - type: nauc_recall_at_5_std value: -44.5216 - type: nauc_recall_at_5_diff1 value: 70.42540000000001 - type: nauc_recall_at_10_max value: 36.0356 - type: nauc_recall_at_10_std value: -45.073 - type: nauc_recall_at_10_diff1 value: 70.1776 - type: nauc_recall_at_20_max value: 35.714800000000004 - type: nauc_recall_at_20_std value: -44.0962 - type: nauc_recall_at_20_diff1 value: 71.23620000000001 - type: nauc_recall_at_100_max value: 43.105199999999996 - type: nauc_recall_at_100_std value: -18.800900000000002 - type: nauc_recall_at_100_diff1 value: 70.7888 - type: nauc_recall_at_1000_max value: 64.4844 - type: nauc_recall_at_1000_std value: 41.486200000000004 - type: nauc_recall_at_1000_diff1 value: 69.0643 - type: nauc_precision_at_1_max value: 39.4509 - type: nauc_precision_at_1_std value: -30.6309 - type: nauc_precision_at_1_diff1 value: 76.5171 - type: nauc_precision_at_3_max value: 12.514800000000001 - type: nauc_precision_at_3_std value: 3.2272000000000003 - type: nauc_precision_at_3_diff1 value: -11.8298 - type: nauc_precision_at_5_max value: 6.0901 - type: nauc_precision_at_5_std value: 12.6778 - type: nauc_precision_at_5_diff1 value: -26.570300000000003 - type: nauc_precision_at_10_max value: 0.9773999999999999 - type: nauc_precision_at_10_std value: 21.1764 - type: nauc_precision_at_10_diff1 value: -35.2909 - type: nauc_precision_at_20_max value: -2.2387 - type: nauc_precision_at_20_std value: 26.571099999999998 - type: nauc_precision_at_20_diff1 value: -39.0582 - type: nauc_precision_at_100_max value: -4.9125000000000005 - type: nauc_precision_at_100_std value: 31.9907 - type: nauc_precision_at_100_diff1 value: -41.5916 - type: nauc_precision_at_1000_max value: -6.0841 - type: nauc_precision_at_1000_std value: 32.8504 - type: nauc_precision_at_1000_diff1 value: -42.25 - type: nauc_mrr_at_1_max value: 39.285599999999995 - type: nauc_mrr_at_1_std value: -30.799100000000003 - type: nauc_mrr_at_1_diff1 value: 76.6113 - type: nauc_mrr_at_3_max value: 40.7492 - type: nauc_mrr_at_3_std value: -31.933699999999998 - type: nauc_mrr_at_3_diff1 value: 75.593 - type: nauc_mrr_at_5_max value: 40.87 - type: nauc_mrr_at_5_std value: -31.9333 - type: nauc_mrr_at_5_diff1 value: 75.7331 - type: nauc_mrr_at_10_max value: 40.7704 - type: nauc_mrr_at_10_std value: -31.839699999999997 - type: nauc_mrr_at_10_diff1 value: 75.8249 - type: nauc_mrr_at_20_max value: 40.7107 - type: nauc_mrr_at_20_std value: -31.7701 - type: nauc_mrr_at_20_diff1 value: 75.8463 - type: nauc_mrr_at_100_max value: 40.6937 - type: nauc_mrr_at_100_std value: -31.735999999999997 - type: nauc_mrr_at_100_diff1 value: 75.84309999999999 - type: nauc_mrr_at_1000_max value: 40.691 - type: nauc_mrr_at_1000_std value: -31.7368 - type: nauc_mrr_at_1000_diff1 value: 75.84349999999999 - type: main_score value: 86.871 - task: type: Clustering dataset: name: MTEB RedditClustering (default) type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 45.8568 - type: v_measure_std value: 5.685 - type: main_score value: 45.8568 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P (default) type: mteb/reddit-clustering-p2p config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: v_measure value: 54.9896 - type: v_measure_std value: 12.0517 - type: main_score value: 54.9896 - task: type: Retrieval dataset: name: MTEB SCIDOCS (default) type: mteb/scidocs config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: ndcg_at_1 value: 20.599999999999998 - type: ndcg_at_3 value: 17.214 - type: ndcg_at_5 value: 14.93 - type: ndcg_at_10 value: 17.721 - type: ndcg_at_20 value: 20.619 - type: ndcg_at_100 value: 25.46 - type: ndcg_at_1000 value: 30.846 - type: map_at_1 value: 4.175 - type: map_at_3 value: 7.611 - type: map_at_5 value: 8.955 - type: map_at_10 value: 10.360999999999999 - type: map_at_20 value: 11.414 - type: map_at_100 value: 12.3 - type: map_at_1000 value: 12.595999999999998 - type: recall_at_1 value: 4.175 - type: recall_at_3 value: 9.868 - type: recall_at_5 value: 13.303 - type: recall_at_10 value: 18.397 - type: recall_at_20 value: 25.162000000000003 - type: recall_at_100 value: 40.99 - type: recall_at_1000 value: 67.322 - type: precision_at_1 value: 20.599999999999998 - type: precision_at_3 value: 16.2 - type: precision_at_5 value: 13.120000000000001 - type: precision_at_10 value: 9.06 - type: precision_at_20 value: 6.1899999999999995 - type: precision_at_100 value: 2.017 - type: precision_at_1000 value: 0.331 - type: mrr_at_1 value: 20.599999999999998 - type: mrr_at_3 value: 28.1833 - type: mrr_at_5 value: 30.043300000000002 - type: mrr_at_10 value: 31.1391 - type: mrr_at_20 value: 31.9095 - type: mrr_at_100 value: 32.3914 - type: mrr_at_1000 value: 32.4509 - type: nauc_ndcg_at_1_max value: 26.9024 - type: nauc_ndcg_at_1_std value: 4.1442 - type: nauc_ndcg_at_1_diff1 value: 25.9169 - type: nauc_ndcg_at_3_max value: 33.2338 - type: nauc_ndcg_at_3_std value: 7.0103 - type: nauc_ndcg_at_3_diff1 value: 24.8464 - type: nauc_ndcg_at_5_max value: 33.833999999999996 - type: nauc_ndcg_at_5_std value: 8.515 - type: nauc_ndcg_at_5_diff1 value: 22.7135 - type: nauc_ndcg_at_10_max value: 34.6873 - type: nauc_ndcg_at_10_std value: 12.3294 - type: nauc_ndcg_at_10_diff1 value: 20.4198 - type: nauc_ndcg_at_20_max value: 36.889 - type: nauc_ndcg_at_20_std value: 15.5519 - type: nauc_ndcg_at_20_diff1 value: 20.7428 - type: nauc_ndcg_at_100_max value: 39.0403 - type: nauc_ndcg_at_100_std value: 20.2488 - type: nauc_ndcg_at_100_diff1 value: 20.572 - type: nauc_ndcg_at_1000_max value: 38.7458 - type: nauc_ndcg_at_1000_std value: 21.7088 - type: nauc_ndcg_at_1000_diff1 value: 20.5603 - type: nauc_map_at_1_max value: 27.091199999999997 - type: nauc_map_at_1_std value: 4.3355999999999995 - type: nauc_map_at_1_diff1 value: 25.7587 - type: nauc_map_at_3_max value: 33.602900000000005 - type: nauc_map_at_3_std value: 5.8709 - type: nauc_map_at_3_diff1 value: 25.5351 - type: nauc_map_at_5_max value: 34.414 - type: nauc_map_at_5_std value: 6.914199999999999 - type: nauc_map_at_5_diff1 value: 23.7741 - type: nauc_map_at_10_max value: 35.1586 - type: nauc_map_at_10_std value: 10.078800000000001 - type: nauc_map_at_10_diff1 value: 21.628600000000002 - type: nauc_map_at_20_max value: 36.7719 - type: nauc_map_at_20_std value: 12.1807 - type: nauc_map_at_20_diff1 value: 22.0201 - type: nauc_map_at_100_max value: 37.5971 - type: nauc_map_at_100_std value: 13.828299999999999 - type: nauc_map_at_100_diff1 value: 21.8011 - type: nauc_map_at_1000_max value: 37.6524 - type: nauc_map_at_1000_std value: 14.0603 - type: nauc_map_at_1000_diff1 value: 21.87 - type: nauc_recall_at_1_max value: 27.091199999999997 - type: nauc_recall_at_1_std value: 4.3355999999999995 - type: nauc_recall_at_1_diff1 value: 25.7587 - type: nauc_recall_at_3_max value: 35.0346 - type: nauc_recall_at_3_std value: 7.6722 - type: nauc_recall_at_3_diff1 value: 23.8398 - type: nauc_recall_at_5_max value: 34.7429 - type: nauc_recall_at_5_std value: 9.8479 - type: nauc_recall_at_5_diff1 value: 19.9693 - type: nauc_recall_at_10_max value: 34.1188 - type: nauc_recall_at_10_std value: 16.0443 - type: nauc_recall_at_10_diff1 value: 14.844399999999998 - type: nauc_recall_at_20_max value: 36.9825 - type: nauc_recall_at_20_std value: 21.5553 - type: nauc_recall_at_20_diff1 value: 15.4056 - type: nauc_recall_at_100_max value: 37.238 - type: nauc_recall_at_100_std value: 30.425400000000003 - type: nauc_recall_at_100_diff1 value: 12.839 - type: nauc_recall_at_1000_max value: 30.188599999999997 - type: nauc_recall_at_1000_std value: 34.7768 - type: nauc_recall_at_1000_diff1 value: 8.337 - type: nauc_precision_at_1_max value: 26.9024 - type: nauc_precision_at_1_std value: 4.1442 - type: nauc_precision_at_1_diff1 value: 25.9169 - type: nauc_precision_at_3_max value: 35.3949 - type: nauc_precision_at_3_std value: 7.818300000000001 - type: nauc_precision_at_3_diff1 value: 24.4077 - type: nauc_precision_at_5_max value: 35.0653 - type: nauc_precision_at_5_std value: 10.1252 - type: nauc_precision_at_5_diff1 value: 20.4485 - type: nauc_precision_at_10_max value: 34.5799 - type: nauc_precision_at_10_std value: 16.2893 - type: nauc_precision_at_10_diff1 value: 15.337600000000002 - type: nauc_precision_at_20_max value: 37.47 - type: nauc_precision_at_20_std value: 21.7447 - type: nauc_precision_at_20_diff1 value: 15.644 - type: nauc_precision_at_100_max value: 37.8956 - type: nauc_precision_at_100_std value: 30.6388 - type: nauc_precision_at_100_diff1 value: 13.5011 - type: nauc_precision_at_1000_max value: 30.456699999999998 - type: nauc_precision_at_1000_std value: 34.3528 - type: nauc_precision_at_1000_diff1 value: 8.963899999999999 - type: nauc_mrr_at_1_max value: 26.9024 - type: nauc_mrr_at_1_std value: 4.1442 - type: nauc_mrr_at_1_diff1 value: 25.9169 - type: nauc_mrr_at_3_max value: 30.214999999999996 - type: nauc_mrr_at_3_std value: 7.4483 - type: nauc_mrr_at_3_diff1 value: 23.7169 - type: nauc_mrr_at_5_max value: 30.1892 - type: nauc_mrr_at_5_std value: 8.319 - type: nauc_mrr_at_5_diff1 value: 23.4187 - type: nauc_mrr_at_10_max value: 30.5879 - type: nauc_mrr_at_10_std value: 8.9701 - type: nauc_mrr_at_10_diff1 value: 23.4357 - type: nauc_mrr_at_20_max value: 30.579800000000002 - type: nauc_mrr_at_20_std value: 9.3186 - type: nauc_mrr_at_20_diff1 value: 23.2358 - type: nauc_mrr_at_100_max value: 30.660500000000003 - type: nauc_mrr_at_100_std value: 9.404 - type: nauc_mrr_at_100_diff1 value: 23.3937 - type: nauc_mrr_at_1000_max value: 30.6315 - type: nauc_mrr_at_1000_std value: 9.363299999999999 - type: nauc_mrr_at_1000_diff1 value: 23.392599999999998 - type: main_score value: 17.721 - task: type: STS dataset: name: MTEB SICK-R (default) type: mteb/sickr-sts config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: pearson value: 75.5378 - type: spearman value: 68.7448 - type: cosine_pearson value: 75.5378 - type: cosine_spearman value: 68.7448 - type: manhattan_pearson value: 72.905 - type: manhattan_spearman value: 68.9036 - type: euclidean_pearson value: 72.7586 - type: euclidean_spearman value: 68.7448 - type: main_score value: 68.7448 - task: type: STS dataset: name: MTEB STS12 (default) type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: pearson value: 81.6341 - type: spearman value: 75.1911 - type: cosine_pearson value: 81.6341 - type: cosine_spearman value: 75.1911 - type: manhattan_pearson value: 78.4046 - type: manhattan_spearman value: 75.1706 - type: euclidean_pearson value: 78.3649 - type: euclidean_spearman value: 75.1934 - type: main_score value: 75.1911 - task: type: STS dataset: name: MTEB STS13 (default) type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: pearson value: 76.4378 - type: spearman value: 77.3053 - type: cosine_pearson value: 76.4378 - type: cosine_spearman value: 77.3053 - type: manhattan_pearson value: 77.1958 - type: manhattan_spearman value: 77.2543 - type: euclidean_pearson value: 77.2317 - type: euclidean_spearman value: 77.3053 - type: main_score value: 77.3053 - task: type: STS dataset: name: MTEB STS14 (default) type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: pearson value: 78.4342 - type: spearman value: 74.9479 - type: cosine_pearson value: 78.4342 - type: cosine_spearman value: 74.9479 - type: manhattan_pearson value: 77.12219999999999 - type: manhattan_spearman value: 74.924 - type: euclidean_pearson value: 77.14800000000001 - type: euclidean_spearman value: 74.94800000000001 - type: main_score value: 74.9479 - task: type: STS dataset: name: MTEB STS15 (default) type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: pearson value: 85.1908 - type: spearman value: 86.0174 - type: cosine_pearson value: 85.1908 - type: cosine_spearman value: 86.0174 - type: manhattan_pearson value: 85.4436 - type: manhattan_spearman value: 86.0332 - type: euclidean_pearson value: 85.4339 - type: euclidean_spearman value: 86.0174 - type: main_score value: 86.0174 - task: type: STS dataset: name: MTEB STS16 (default) type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: pearson value: 80.5421 - type: spearman value: 81.9568 - type: cosine_pearson value: 80.5421 - type: cosine_spearman value: 81.9568 - type: manhattan_pearson value: 81.1013 - type: manhattan_spearman value: 81.8165 - type: euclidean_pearson value: 81.24510000000001 - type: euclidean_spearman value: 81.9568 - type: main_score value: 81.9568 - task: type: STS dataset: name: MTEB STS17 (en-tr) type: mteb/sts17-crosslingual-sts config: en-tr split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 48.2717 - type: spearman value: 44.642900000000004 - type: cosine_pearson value: 48.2717 - type: cosine_spearman value: 44.642900000000004 - type: manhattan_pearson value: 50.314400000000006 - type: manhattan_spearman value: 44.982299999999995 - type: euclidean_pearson value: 50.1685 - type: euclidean_spearman value: 44.642900000000004 - type: main_score value: 44.642900000000004 - task: type: STS dataset: name: MTEB STS17 (it-en) type: mteb/sts17-crosslingual-sts config: it-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 67.8601 - type: spearman value: 68.2763 - type: cosine_pearson value: 67.8601 - type: cosine_spearman value: 68.2763 - type: manhattan_pearson value: 68.1563 - type: manhattan_spearman value: 68.4724 - type: euclidean_pearson value: 68.1026 - type: euclidean_spearman value: 68.2763 - type: main_score value: 68.2763 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 78.05539999999999 - type: spearman value: 78.5929 - type: cosine_pearson value: 78.05539999999999 - type: cosine_spearman value: 78.5929 - type: manhattan_pearson value: 78.408 - type: manhattan_spearman value: 78.8622 - type: euclidean_pearson value: 78.1413 - type: euclidean_spearman value: 78.5929 - type: main_score value: 78.5929 - task: type: STS dataset: name: MTEB STS17 (en-ar) type: mteb/sts17-crosslingual-sts config: en-ar split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 59.4349 - type: spearman value: 59.838800000000006 - type: cosine_pearson value: 59.4349 - type: cosine_spearman value: 59.838800000000006 - type: manhattan_pearson value: 60.7565 - type: manhattan_spearman value: 60.5824 - type: euclidean_pearson value: 60.247099999999996 - type: euclidean_spearman value: 59.838800000000006 - type: main_score value: 59.838800000000006 - task: type: STS dataset: name: MTEB STS17 (fr-en) type: mteb/sts17-crosslingual-sts config: fr-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 73.84039999999999 - type: spearman value: 74.2498 - type: cosine_pearson value: 73.84039999999999 - type: cosine_spearman value: 74.2498 - type: manhattan_pearson value: 74.6784 - type: manhattan_spearman value: 74.4608 - type: euclidean_pearson value: 74.5596 - type: euclidean_spearman value: 74.2498 - type: main_score value: 74.2498 - task: type: STS dataset: name: MTEB STS17 (nl-en) type: mteb/sts17-crosslingual-sts config: nl-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 67.9218 - type: spearman value: 68.0418 - type: cosine_pearson value: 67.9218 - type: cosine_spearman value: 68.0418 - type: manhattan_pearson value: 68.51 - type: manhattan_spearman value: 68.1968 - type: euclidean_pearson value: 68.343 - type: euclidean_spearman value: 68.0418 - type: main_score value: 68.0418 - task: type: STS dataset: name: MTEB STS17 (es-en) type: mteb/sts17-crosslingual-sts config: es-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 70.381 - type: spearman value: 69.5729 - type: cosine_pearson value: 70.381 - type: cosine_spearman value: 69.5729 - type: manhattan_pearson value: 70.8688 - type: manhattan_spearman value: 69.4406 - type: euclidean_pearson value: 71.0267 - type: euclidean_spearman value: 69.5729 - type: main_score value: 69.5729 - task: type: STS dataset: name: MTEB STS17 (en-de) type: mteb/sts17-crosslingual-sts config: en-de split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 70.0196 - type: spearman value: 69.7175 - type: cosine_pearson value: 70.0196 - type: cosine_spearman value: 69.7175 - type: manhattan_pearson value: 71.40990000000001 - type: manhattan_spearman value: 70.1461 - type: euclidean_pearson value: 70.88799999999999 - type: euclidean_spearman value: 69.7175 - type: main_score value: 69.7175 - task: type: STS dataset: name: MTEB STS22 (de-en) type: mteb/sts22-crosslingual-sts config: de-en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: pearson value: 65.7536 - type: spearman value: 60.04429999999999 - type: cosine_pearson value: 65.7536 - type: cosine_spearman value: 60.04429999999999 - type: manhattan_pearson value: 68.58579999999999 - type: manhattan_spearman value: 60.3699 - type: euclidean_pearson value: 68.3761 - type: euclidean_spearman value: 60.04429999999999 - type: main_score value: 60.04429999999999 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: pearson value: 68.997 - type: spearman value: 68.1508 - type: cosine_pearson value: 68.997 - type: cosine_spearman value: 68.1508 - type: manhattan_pearson value: 68.9229 - type: manhattan_spearman value: 68.0124 - type: euclidean_pearson value: 69.0519 - type: euclidean_spearman value: 68.1508 - type: main_score value: 68.1508 - task: type: STS dataset: name: MTEB STS22 (es-en) type: mteb/sts22-crosslingual-sts config: es-en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: pearson value: 80.2006 - type: spearman value: 80.4702 - type: cosine_pearson value: 80.2006 - type: cosine_spearman value: 80.4702 - type: manhattan_pearson value: 80.81009999999999 - type: manhattan_spearman value: 80.6037 - type: euclidean_pearson value: 80.66290000000001 - type: euclidean_spearman value: 80.4702 - type: main_score value: 80.4702 - task: type: STS dataset: name: MTEB STS22 (zh-en) type: mteb/sts22-crosslingual-sts config: zh-en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: pearson value: 74.0885 - type: spearman value: 72.4574 - type: cosine_pearson value: 74.0885 - type: cosine_spearman value: 72.4574 - type: manhattan_pearson value: 75.25659999999999 - type: manhattan_spearman value: 71.9695 - type: euclidean_pearson value: 75.4999 - type: euclidean_spearman value: 72.4574 - type: main_score value: 72.4574 - task: type: STS dataset: name: MTEB STS22 (pl-en) type: mteb/sts22-crosslingual-sts config: pl-en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: pearson value: 74.1794 - type: spearman value: 70.6749 - type: cosine_pearson value: 74.1794 - type: cosine_spearman value: 70.6749 - type: manhattan_pearson value: 74.3245 - type: manhattan_spearman value: 71.2375 - type: euclidean_pearson value: 73.221 - type: euclidean_spearman value: 70.6749 - type: main_score value: 70.6749 - task: type: STS dataset: name: MTEB STSBenchmark (default) type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: pearson value: 76.7328 - type: spearman value: 78.4076 - type: cosine_pearson value: 76.7328 - type: cosine_spearman value: 78.4076 - type: manhattan_pearson value: 78.24950000000001 - type: manhattan_spearman value: 78.23400000000001 - type: euclidean_pearson value: 78.3628 - type: euclidean_spearman value: 78.4076 - type: main_score value: 78.4076 - task: type: Reranking dataset: name: MTEB SciDocsRR (default) type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.6097 - type: mrr value: 94.12939999999999 - type: nAUC_map_max value: 58.7937 - type: nAUC_map_std value: 69.6785 - type: nAUC_map_diff1 value: 7.4891 - type: nAUC_mrr_max value: 84.7821 - type: nAUC_mrr_std value: 77.6636 - type: nAUC_mrr_diff1 value: 49.763600000000004 - type: main_score value: 79.6097 - task: type: Retrieval dataset: name: MTEB SciFact (default) type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: ndcg_at_1 value: 54.0 - type: ndcg_at_3 value: 60.851 - type: ndcg_at_5 value: 63.410999999999994 - type: ndcg_at_10 value: 65.847 - type: ndcg_at_20 value: 66.937 - type: ndcg_at_100 value: 68.262 - type: ndcg_at_1000 value: 69.341 - type: map_at_1 value: 51.093999999999994 - type: map_at_3 value: 58.044 - type: map_at_5 value: 59.702999999999996 - type: map_at_10 value: 60.885999999999996 - type: map_at_20 value: 61.266 - type: map_at_100 value: 61.482000000000006 - type: map_at_1000 value: 61.519 - type: recall_at_1 value: 51.093999999999994 - type: recall_at_3 value: 66.128 - type: recall_at_5 value: 72.456 - type: recall_at_10 value: 79.3 - type: recall_at_20 value: 83.2 - type: recall_at_100 value: 90.0 - type: recall_at_1000 value: 98.667 - type: precision_at_1 value: 54.0 - type: precision_at_3 value: 23.778 - type: precision_at_5 value: 15.933 - type: precision_at_10 value: 8.967 - type: precision_at_20 value: 4.75 - type: precision_at_100 value: 1.03 - type: precision_at_1000 value: 0.11199999999999999 - type: mrr_at_1 value: 54.0 - type: mrr_at_3 value: 60.3889 - type: mrr_at_5 value: 61.7556 - type: mrr_at_10 value: 62.5984 - type: mrr_at_20 value: 62.85039999999999 - type: mrr_at_100 value: 63.0155 - type: mrr_at_1000 value: 63.052699999999994 - type: nauc_ndcg_at_1_max value: 56.6373 - type: nauc_ndcg_at_1_std value: 2.1765 - type: nauc_ndcg_at_1_diff1 value: 71.14829999999999 - type: nauc_ndcg_at_3_max value: 53.7965 - type: nauc_ndcg_at_3_std value: -3.4057999999999997 - type: nauc_ndcg_at_3_diff1 value: 63.712199999999996 - type: nauc_ndcg_at_5_max value: 56.96059999999999 - type: nauc_ndcg_at_5_std value: 1.4794 - type: nauc_ndcg_at_5_diff1 value: 64.65419999999999 - type: nauc_ndcg_at_10_max value: 59.4154 - type: nauc_ndcg_at_10_std value: 5.2752 - type: nauc_ndcg_at_10_diff1 value: 64.3098 - type: nauc_ndcg_at_20_max value: 59.7717 - type: nauc_ndcg_at_20_std value: 6.2032 - type: nauc_ndcg_at_20_diff1 value: 64.18599999999999 - type: nauc_ndcg_at_100_max value: 59.2146 - type: nauc_ndcg_at_100_std value: 6.0138 - type: nauc_ndcg_at_100_diff1 value: 64.0895 - type: nauc_ndcg_at_1000_max value: 58.5714 - type: nauc_ndcg_at_1000_std value: 4.8872 - type: nauc_ndcg_at_1000_diff1 value: 64.66969999999999 - type: nauc_map_at_1_max value: 51.2417 - type: nauc_map_at_1_std value: -5.42 - type: nauc_map_at_1_diff1 value: 70.0616 - type: nauc_map_at_3_max value: 51.9587 - type: nauc_map_at_3_std value: -5.3035 - type: nauc_map_at_3_diff1 value: 65.282 - type: nauc_map_at_5_max value: 54.1516 - type: nauc_map_at_5_std value: -2.2858 - type: nauc_map_at_5_diff1 value: 65.86659999999999 - type: nauc_map_at_10_max value: 55.5412 - type: nauc_map_at_10_std value: -0.34299999999999997 - type: nauc_map_at_10_diff1 value: 65.89620000000001 - type: nauc_map_at_20_max value: 55.7967 - type: nauc_map_at_20_std value: 0.13799999999999998 - type: nauc_map_at_20_diff1 value: 65.8685 - type: nauc_map_at_100_max value: 55.74550000000001 - type: nauc_map_at_100_std value: 0.211 - type: nauc_map_at_100_diff1 value: 65.8557 - type: nauc_map_at_1000_max value: 55.728 - type: nauc_map_at_1000_std value: 0.1875 - type: nauc_map_at_1000_diff1 value: 65.8748 - type: nauc_recall_at_1_max value: 51.2417 - type: nauc_recall_at_1_std value: -5.42 - type: nauc_recall_at_1_diff1 value: 70.0616 - type: nauc_recall_at_3_max value: 52.4327 - type: nauc_recall_at_3_std value: -6.7153 - type: nauc_recall_at_3_diff1 value: 57.111999999999995 - type: nauc_recall_at_5_max value: 60.5827 - type: nauc_recall_at_5_std value: 7.1365 - type: nauc_recall_at_5_diff1 value: 58.3449 - type: nauc_recall_at_10_max value: 70.24770000000001 - type: nauc_recall_at_10_std value: 22.0896 - type: nauc_recall_at_10_diff1 value: 55.7264 - type: nauc_recall_at_20_max value: 73.483 - type: nauc_recall_at_20_std value: 29.653299999999998 - type: nauc_recall_at_20_diff1 value: 53.54750000000001 - type: nauc_recall_at_100_max value: 74.0321 - type: nauc_recall_at_100_std value: 37.491400000000006 - type: nauc_recall_at_100_diff1 value: 47.3918 - type: nauc_recall_at_1000_max value: 69.5378 - type: nauc_recall_at_1000_std value: 60.5042 - type: nauc_recall_at_1000_diff1 value: 19.5028 - type: nauc_precision_at_1_max value: 56.6373 - type: nauc_precision_at_1_std value: 2.1765 - type: nauc_precision_at_1_diff1 value: 71.14829999999999 - type: nauc_precision_at_3_max value: 51.811099999999996 - type: nauc_precision_at_3_std value: 8.4319 - type: nauc_precision_at_3_diff1 value: 48.545500000000004 - type: nauc_precision_at_5_max value: 55.4685 - type: nauc_precision_at_5_std value: 26.387 - type: nauc_precision_at_5_diff1 value: 39.6201 - type: nauc_precision_at_10_max value: 53.2436 - type: nauc_precision_at_10_std value: 41.6957 - type: nauc_precision_at_10_diff1 value: 24.6115 - type: nauc_precision_at_20_max value: 48.353699999999996 - type: nauc_precision_at_20_std value: 47.253 - type: nauc_precision_at_20_diff1 value: 15.687599999999998 - type: nauc_precision_at_100_max value: 36.771100000000004 - type: nauc_precision_at_100_std value: 48.1335 - type: nauc_precision_at_100_diff1 value: 2.6454 - type: nauc_precision_at_1000_max value: 23.0391 - type: nauc_precision_at_1000_std value: 53.26499999999999 - type: nauc_precision_at_1000_diff1 value: -15.0974 - type: nauc_mrr_at_1_max value: 56.6373 - type: nauc_mrr_at_1_std value: 2.1765 - type: nauc_mrr_at_1_diff1 value: 71.14829999999999 - type: nauc_mrr_at_3_max value: 57.6843 - type: nauc_mrr_at_3_std value: 2.4692 - type: nauc_mrr_at_3_diff1 value: 66.10340000000001 - type: nauc_mrr_at_5_max value: 59.2453 - type: nauc_mrr_at_5_std value: 5.1308 - type: nauc_mrr_at_5_diff1 value: 66.7377 - type: nauc_mrr_at_10_max value: 59.5575 - type: nauc_mrr_at_10_std value: 5.7778 - type: nauc_mrr_at_10_diff1 value: 66.36149999999999 - type: nauc_mrr_at_20_max value: 59.466300000000004 - type: nauc_mrr_at_20_std value: 5.6867 - type: nauc_mrr_at_20_diff1 value: 66.37100000000001 - type: nauc_mrr_at_100_max value: 59.404999999999994 - type: nauc_mrr_at_100_std value: 5.6528 - type: nauc_mrr_at_100_diff1 value: 66.41040000000001 - type: nauc_mrr_at_1000_max value: 59.3919 - type: nauc_mrr_at_1000_std value: 5.6358 - type: nauc_mrr_at_1000_diff1 value: 66.43050000000001 - type: main_score value: 65.847 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions (default) type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: similarity_accuracy value: 99.7386 - type: similarity_accuracy_threshold value: 84.1442 - type: similarity_f1 value: 86.41980000000001 - type: similarity_f1_threshold value: 84.1442 - type: similarity_precision value: 88.98310000000001 - type: similarity_recall value: 84.0 - type: similarity_ap value: 93.50309999999999 - type: cosine_accuracy value: 99.7386 - type: cosine_accuracy_threshold value: 84.1442 - type: cosine_f1 value: 86.41980000000001 - type: cosine_f1_threshold value: 84.1442 - type: cosine_precision value: 88.98310000000001 - type: cosine_recall value: 84.0 - type: cosine_ap value: 93.50309999999999 - type: manhattan_accuracy value: 99.7406 - type: manhattan_accuracy_threshold value: 1243.0971 - type: manhattan_f1 value: 86.5641 - type: manhattan_f1_threshold value: 1243.0971 - type: manhattan_precision value: 88.8421 - type: manhattan_recall value: 84.39999999999999 - type: manhattan_ap value: 93.50840000000001 - type: euclidean_accuracy value: 99.7386 - type: euclidean_accuracy_threshold value: 56.313 - type: euclidean_f1 value: 86.41980000000001 - type: euclidean_f1_threshold value: 56.313 - type: euclidean_precision value: 88.98310000000001 - type: euclidean_recall value: 84.0 - type: euclidean_ap value: 93.50309999999999 - type: dot_accuracy value: 99.7386 - type: dot_accuracy_threshold value: 84.1442 - type: dot_f1 value: 86.41980000000001 - type: dot_f1_threshold value: 84.1442 - type: dot_precision value: 88.98310000000001 - type: dot_recall value: 84.0 - type: dot_ap value: 93.50309999999999 - type: max_accuracy value: 99.7406 - type: max_f1 value: 86.5641 - type: max_precision value: 88.98310000000001 - type: max_recall value: 84.39999999999999 - type: max_ap value: 93.50840000000001 - type: main_score value: 93.50840000000001 - task: type: Clustering dataset: name: MTEB StackExchangeClustering (default) type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 55.9311 - type: v_measure_std value: 5.0881 - type: main_score value: 55.9311 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P (default) type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 32.9298 - type: v_measure_std value: 1.7169 - type: main_score value: 32.9298 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions (default) type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 51.7759 - type: mrr value: 52.7456 - type: nAUC_map_max value: 15.138499999999999 - type: nAUC_map_std value: 9.876999999999999 - type: nAUC_map_diff1 value: 37.8337 - type: nAUC_mrr_max value: 16.128600000000002 - type: nAUC_mrr_std value: 10.4175 - type: nAUC_mrr_diff1 value: 37.3753 - type: main_score value: 51.7759 - task: type: Retrieval dataset: name: MTEB StackOverflowQA (default) type: CoIR-Retrieval/stackoverflow-qa config: default split: test revision: db8f169f3894c14a00251061f957b2063eef2bd5 metrics: - type: ndcg_at_1 value: 68.205 - type: ndcg_at_3 value: 75.473 - type: ndcg_at_5 value: 77.118 - type: ndcg_at_10 value: 78.45 - type: ndcg_at_20 value: 79.181 - type: ndcg_at_100 value: 80.259 - type: ndcg_at_1000 value: 80.518 - type: map_at_1 value: 68.205 - type: map_at_3 value: 73.763 - type: map_at_5 value: 74.68299999999999 - type: map_at_10 value: 75.234 - type: map_at_20 value: 75.43900000000001 - type: map_at_100 value: 75.59 - type: map_at_1000 value: 75.599 - type: recall_at_1 value: 68.205 - type: recall_at_3 value: 80.391 - type: recall_at_5 value: 84.353 - type: recall_at_10 value: 88.465 - type: recall_at_20 value: 91.32400000000001 - type: recall_at_100 value: 97.09100000000001 - type: recall_at_1000 value: 99.14699999999999 - type: precision_at_1 value: 68.205 - type: precision_at_3 value: 26.796999999999997 - type: precision_at_5 value: 16.871 - type: precision_at_10 value: 8.847 - type: precision_at_20 value: 4.566 - type: precision_at_100 value: 0.971 - type: precision_at_1000 value: 0.099 - type: mrr_at_1 value: 68.2046 - type: mrr_at_3 value: 73.763 - type: mrr_at_5 value: 74.6832 - type: mrr_at_10 value: 75.23440000000001 - type: mrr_at_20 value: 75.4389 - type: mrr_at_100 value: 75.5901 - type: mrr_at_1000 value: 75.59909999999999 - type: nauc_ndcg_at_1_max value: 70.0997 - type: nauc_ndcg_at_1_std value: -6.6174 - type: nauc_ndcg_at_1_diff1 value: 80.8018 - type: nauc_ndcg_at_3_max value: 71.8713 - type: nauc_ndcg_at_3_std value: -5.7584 - type: nauc_ndcg_at_3_diff1 value: 76.6152 - type: nauc_ndcg_at_5_max value: 71.7906 - type: nauc_ndcg_at_5_std value: -5.6573 - type: nauc_ndcg_at_5_diff1 value: 76.6923 - type: nauc_ndcg_at_10_max value: 71.4058 - type: nauc_ndcg_at_10_std value: -4.8043000000000005 - type: nauc_ndcg_at_10_diff1 value: 76.4267 - type: nauc_ndcg_at_20_max value: 71.5511 - type: nauc_ndcg_at_20_std value: -4.8308 - type: nauc_ndcg_at_20_diff1 value: 76.49669999999999 - type: nauc_ndcg_at_100_max value: 71.5604 - type: nauc_ndcg_at_100_std value: -4.8645000000000005 - type: nauc_ndcg_at_100_diff1 value: 77.022 - type: nauc_ndcg_at_1000_max value: 71.4953 - type: nauc_ndcg_at_1000_std value: -4.8631 - type: nauc_ndcg_at_1000_diff1 value: 77.1952 - type: nauc_map_at_1_max value: 70.0997 - type: nauc_map_at_1_std value: -6.6174 - type: nauc_map_at_1_diff1 value: 80.8018 - type: nauc_map_at_3_max value: 71.46329999999999 - type: nauc_map_at_3_std value: -5.9901 - type: nauc_map_at_3_diff1 value: 77.7281 - type: nauc_map_at_5_max value: 71.4046 - type: nauc_map_at_5_std value: -5.9794 - type: nauc_map_at_5_diff1 value: 77.8163 - type: nauc_map_at_10_max value: 71.2618 - type: nauc_map_at_10_std value: -5.702999999999999 - type: nauc_map_at_10_diff1 value: 77.73780000000001 - type: nauc_map_at_20_max value: 71.30330000000001 - type: nauc_map_at_20_std value: -5.691 - type: nauc_map_at_20_diff1 value: 77.7683 - type: nauc_map_at_100_max value: 71.3035 - type: nauc_map_at_100_std value: -5.680000000000001 - type: nauc_map_at_100_diff1 value: 77.8324 - type: nauc_map_at_1000_max value: 71.3013 - type: nauc_map_at_1000_std value: -5.6772 - type: nauc_map_at_1000_diff1 value: 77.837 - type: nauc_recall_at_1_max value: 70.0997 - type: nauc_recall_at_1_std value: -6.6174 - type: nauc_recall_at_1_diff1 value: 80.8018 - type: nauc_recall_at_3_max value: 73.3015 - type: nauc_recall_at_3_std value: -4.9247 - type: nauc_recall_at_3_diff1 value: 72.6201 - type: nauc_recall_at_5_max value: 73.3818 - type: nauc_recall_at_5_std value: -4.196 - type: nauc_recall_at_5_diff1 value: 71.8984 - type: nauc_recall_at_10_max value: 71.8002 - type: nauc_recall_at_10_std value: 1.0328 - type: nauc_recall_at_10_diff1 value: 69.0552 - type: nauc_recall_at_20_max value: 72.9934 - type: nauc_recall_at_20_std value: 2.0923000000000003 - type: nauc_recall_at_20_diff1 value: 67.3481 - type: nauc_recall_at_100_max value: 76.0971 - type: nauc_recall_at_100_std value: 12.4217 - type: nauc_recall_at_100_diff1 value: 66.6112 - type: nauc_recall_at_1000_max value: 76.7462 - type: nauc_recall_at_1000_std value: 50.754200000000004 - type: nauc_recall_at_1000_diff1 value: 69.8675 - type: nauc_precision_at_1_max value: 70.0997 - type: nauc_precision_at_1_std value: -6.6174 - type: nauc_precision_at_1_diff1 value: 80.8018 - type: nauc_precision_at_3_max value: 73.3015 - type: nauc_precision_at_3_std value: -4.9247 - type: nauc_precision_at_3_diff1 value: 72.6201 - type: nauc_precision_at_5_max value: 73.3818 - type: nauc_precision_at_5_std value: -4.196 - type: nauc_precision_at_5_diff1 value: 71.8984 - type: nauc_precision_at_10_max value: 71.8002 - type: nauc_precision_at_10_std value: 1.0328 - type: nauc_precision_at_10_diff1 value: 69.0552 - type: nauc_precision_at_20_max value: 72.9934 - type: nauc_precision_at_20_std value: 2.0923000000000003 - type: nauc_precision_at_20_diff1 value: 67.3481 - type: nauc_precision_at_100_max value: 76.0971 - type: nauc_precision_at_100_std value: 12.4217 - type: nauc_precision_at_100_diff1 value: 66.6112 - type: nauc_precision_at_1000_max value: 76.7462 - type: nauc_precision_at_1000_std value: 50.754200000000004 - type: nauc_precision_at_1000_diff1 value: 69.8675 - type: nauc_mrr_at_1_max value: 70.0997 - type: nauc_mrr_at_1_std value: -6.6174 - type: nauc_mrr_at_1_diff1 value: 80.8018 - type: nauc_mrr_at_3_max value: 71.46329999999999 - type: nauc_mrr_at_3_std value: -5.9901 - type: nauc_mrr_at_3_diff1 value: 77.7281 - type: nauc_mrr_at_5_max value: 71.4046 - type: nauc_mrr_at_5_std value: -5.9794 - type: nauc_mrr_at_5_diff1 value: 77.8163 - type: nauc_mrr_at_10_max value: 71.2618 - type: nauc_mrr_at_10_std value: -5.702999999999999 - type: nauc_mrr_at_10_diff1 value: 77.73780000000001 - type: nauc_mrr_at_20_max value: 71.30330000000001 - type: nauc_mrr_at_20_std value: -5.691 - type: nauc_mrr_at_20_diff1 value: 77.7683 - type: nauc_mrr_at_100_max value: 71.3035 - type: nauc_mrr_at_100_std value: -5.680000000000001 - type: nauc_mrr_at_100_diff1 value: 77.8324 - type: nauc_mrr_at_1000_max value: 71.3013 - type: nauc_mrr_at_1000_std value: -5.6772 - type: nauc_mrr_at_1000_diff1 value: 77.837 - type: main_score value: 78.45 - task: type: Summarization dataset: name: MTEB SummEval (default) type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: pearson value: 31.7097 - type: spearman value: 32.0256 - type: cosine_spearman value: 32.0256 - type: cosine_pearson value: 31.7097 - type: dot_spearman value: 32.0256 - type: dot_pearson value: 31.7097 - type: main_score value: 32.0256 - task: type: Retrieval dataset: name: MTEB SyntheticText2SQL (default) type: CoIR-Retrieval/synthetic-text2sql config: default split: test revision: 686b87296c3a0191b5d9415a00526c62db9fce09 metrics: - type: ndcg_at_1 value: 3.5549999999999997 - type: ndcg_at_3 value: 41.534 - type: ndcg_at_5 value: 44.847 - type: ndcg_at_10 value: 47.344 - type: ndcg_at_20 value: 48.826 - type: ndcg_at_100 value: 50.442 - type: ndcg_at_1000 value: 50.937 - type: map_at_1 value: 3.5549999999999997 - type: map_at_3 value: 33.083 - type: map_at_5 value: 34.928 - type: map_at_10 value: 35.964 - type: map_at_20 value: 36.376 - type: map_at_100 value: 36.61 - type: map_at_1000 value: 36.63 - type: recall_at_1 value: 3.5549999999999997 - type: recall_at_3 value: 65.63 - type: recall_at_5 value: 73.646 - type: recall_at_10 value: 81.337 - type: recall_at_20 value: 87.165 - type: recall_at_100 value: 95.71 - type: recall_at_1000 value: 99.556 - type: precision_at_1 value: 3.5549999999999997 - type: precision_at_3 value: 21.877 - type: precision_at_5 value: 14.729000000000001 - type: precision_at_10 value: 8.134 - type: precision_at_20 value: 4.358 - type: precision_at_100 value: 0.9570000000000001 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 31.721100000000003 - type: mrr_at_3 value: 48.6754 - type: mrr_at_5 value: 50.3093 - type: mrr_at_10 value: 51.2454 - type: mrr_at_20 value: 51.629999999999995 - type: mrr_at_100 value: 51.8552 - type: mrr_at_1000 value: 51.8747 - type: nauc_ndcg_at_1_max value: 6.543 - type: nauc_ndcg_at_1_std value: -11.0614 - type: nauc_ndcg_at_1_diff1 value: 77.4191 - type: nauc_ndcg_at_3_max value: 35.9842 - type: nauc_ndcg_at_3_std value: -16.258200000000002 - type: nauc_ndcg_at_3_diff1 value: -62.2219 - type: nauc_ndcg_at_5_max value: 35.0885 - type: nauc_ndcg_at_5_std value: -14.935699999999999 - type: nauc_ndcg_at_5_diff1 value: -58.3931 - type: nauc_ndcg_at_10_max value: 33.7926 - type: nauc_ndcg_at_10_std value: -14.2862 - type: nauc_ndcg_at_10_diff1 value: -55.5325 - type: nauc_ndcg_at_20_max value: 33.631899999999995 - type: nauc_ndcg_at_20_std value: -14.061499999999999 - type: nauc_ndcg_at_20_diff1 value: -53.7148 - type: nauc_ndcg_at_100_max value: 32.736900000000006 - type: nauc_ndcg_at_100_std value: -13.7486 - type: nauc_ndcg_at_100_diff1 value: -52.0744 - type: nauc_ndcg_at_1000_max value: 32.941500000000005 - type: nauc_ndcg_at_1000_std value: -14.186099999999998 - type: nauc_ndcg_at_1000_diff1 value: -51.6402 - type: nauc_map_at_1_max value: 6.543 - type: nauc_map_at_1_std value: -11.0614 - type: nauc_map_at_1_diff1 value: 77.4191 - type: nauc_map_at_3_max value: 33.901399999999995 - type: nauc_map_at_3_std value: -15.789 - type: nauc_map_at_3_diff1 value: -53.5257 - type: nauc_map_at_5_max value: 33.1725 - type: nauc_map_at_5_std value: -14.948400000000001 - type: nauc_map_at_5_diff1 value: -50.5361 - type: nauc_map_at_10_max value: 32.5273 - type: nauc_map_at_10_std value: -14.648 - type: nauc_map_at_10_diff1 value: -48.928 - type: nauc_map_at_20_max value: 32.4474 - type: nauc_map_at_20_std value: -14.6155 - type: nauc_map_at_20_diff1 value: -48.2673 - type: nauc_map_at_100_max value: 32.2692 - type: nauc_map_at_100_std value: -14.5789 - type: nauc_map_at_100_diff1 value: -47.9677 - type: nauc_map_at_1000_max value: 32.2805 - type: nauc_map_at_1000_std value: -14.594999999999999 - type: nauc_map_at_1000_diff1 value: -47.944700000000005 - type: nauc_recall_at_1_max value: 6.543 - type: nauc_recall_at_1_std value: -11.0614 - type: nauc_recall_at_1_diff1 value: 77.4191 - type: nauc_recall_at_3_max value: 39.704899999999995 - type: nauc_recall_at_3_std value: -17.1274 - type: nauc_recall_at_3_diff1 value: -77.3937 - type: nauc_recall_at_5_max value: 38.8786 - type: nauc_recall_at_5_std value: -14.7304 - type: nauc_recall_at_5_diff1 value: -73.366 - type: nauc_recall_at_10_max value: 36.2642 - type: nauc_recall_at_10_std value: -12.828800000000001 - type: nauc_recall_at_10_diff1 value: -69.7955 - type: nauc_recall_at_20_max value: 36.5493 - type: nauc_recall_at_20_std value: -10.9359 - type: nauc_recall_at_20_diff1 value: -66.8099 - type: nauc_recall_at_100_max value: 29.1291 - type: nauc_recall_at_100_std value: 0.3365 - type: nauc_recall_at_100_diff1 value: -63.8938 - type: nauc_recall_at_1000_max value: 37.589800000000004 - type: nauc_recall_at_1000_std value: 17.3579 - type: nauc_recall_at_1000_diff1 value: -68.429 - type: nauc_precision_at_1_max value: 6.543 - type: nauc_precision_at_1_std value: -11.0614 - type: nauc_precision_at_1_diff1 value: 77.4191 - type: nauc_precision_at_3_max value: 39.704899999999995 - type: nauc_precision_at_3_std value: -17.1274 - type: nauc_precision_at_3_diff1 value: -77.3937 - type: nauc_precision_at_5_max value: 38.8786 - type: nauc_precision_at_5_std value: -14.7304 - type: nauc_precision_at_5_diff1 value: -73.366 - type: nauc_precision_at_10_max value: 36.2642 - type: nauc_precision_at_10_std value: -12.828800000000001 - type: nauc_precision_at_10_diff1 value: -69.7955 - type: nauc_precision_at_20_max value: 36.5493 - type: nauc_precision_at_20_std value: -10.9359 - type: nauc_precision_at_20_diff1 value: -66.8099 - type: nauc_precision_at_100_max value: 29.1291 - type: nauc_precision_at_100_std value: 0.3365 - type: nauc_precision_at_100_diff1 value: -63.8938 - type: nauc_precision_at_1000_max value: 37.589800000000004 - type: nauc_precision_at_1000_std value: 17.3579 - type: nauc_precision_at_1000_diff1 value: -68.429 - type: nauc_mrr_at_1_max value: 18.7616 - type: nauc_mrr_at_1_std value: -9.332600000000001 - type: nauc_mrr_at_1_diff1 value: -38.775 - type: nauc_mrr_at_3_max value: 27.9627 - type: nauc_mrr_at_3_std value: -12.1163 - type: nauc_mrr_at_3_diff1 value: -56.172900000000006 - type: nauc_mrr_at_5_max value: 27.385900000000003 - type: nauc_mrr_at_5_std value: -11.7823 - type: nauc_mrr_at_5_diff1 value: -55.085300000000004 - type: nauc_mrr_at_10_max value: 26.9297 - type: nauc_mrr_at_10_std value: -11.5899 - type: nauc_mrr_at_10_diff1 value: -54.352900000000005 - type: nauc_mrr_at_20_max value: 26.8231 - type: nauc_mrr_at_20_std value: -11.5438 - type: nauc_mrr_at_20_diff1 value: -54.101 - type: nauc_mrr_at_100_max value: 26.6888 - type: nauc_mrr_at_100_std value: -11.5184 - type: nauc_mrr_at_100_diff1 value: -53.9839 - type: nauc_mrr_at_1000_max value: 26.691399999999998 - type: nauc_mrr_at_1000_std value: -11.5244 - type: nauc_mrr_at_1000_diff1 value: -53.976 - type: main_score value: 47.344 - task: type: Retrieval dataset: name: MTEB TRECCOVID (default) type: mteb/trec-covid config: default split: test revision: bb9466bac8153a0349341eb1b22e06409e78ef4e metrics: - type: ndcg_at_1 value: 70.0 - type: ndcg_at_3 value: 70.877 - type: ndcg_at_5 value: 70.735 - type: ndcg_at_10 value: 68.573 - type: ndcg_at_20 value: 65.635 - type: ndcg_at_100 value: 53.501 - type: ndcg_at_1000 value: 49.288 - type: map_at_1 value: 0.207 - type: map_at_3 value: 0.551 - type: map_at_5 value: 0.8909999999999999 - type: map_at_10 value: 1.635 - type: map_at_20 value: 2.952 - type: map_at_100 value: 9.713 - type: map_at_1000 value: 24.064 - type: recall_at_1 value: 0.207 - type: recall_at_3 value: 0.602 - type: recall_at_5 value: 0.992 - type: recall_at_10 value: 1.9009999999999998 - type: recall_at_20 value: 3.5709999999999997 - type: recall_at_100 value: 13.297999999999998 - type: recall_at_1000 value: 47.067 - type: precision_at_1 value: 80.0 - type: precision_at_3 value: 76.667 - type: precision_at_5 value: 76.4 - type: precision_at_10 value: 73.2 - type: precision_at_20 value: 70.1 - type: precision_at_100 value: 55.04 - type: precision_at_1000 value: 22.046 - type: mrr_at_1 value: 80.0 - type: mrr_at_3 value: 88.66669999999999 - type: mrr_at_5 value: 89.16669999999999 - type: mrr_at_10 value: 89.16669999999999 - type: mrr_at_20 value: 89.16669999999999 - type: mrr_at_100 value: 89.16669999999999 - type: mrr_at_1000 value: 89.16669999999999 - type: nauc_ndcg_at_1_max value: 9.0505 - type: nauc_ndcg_at_1_std value: 17.7341 - type: nauc_ndcg_at_1_diff1 value: -17.272399999999998 - type: nauc_ndcg_at_3_max value: 27.3702 - type: nauc_ndcg_at_3_std value: 43.432500000000005 - type: nauc_ndcg_at_3_diff1 value: -5.716600000000001 - type: nauc_ndcg_at_5_max value: 24.6447 - type: nauc_ndcg_at_5_std value: 48.0114 - type: nauc_ndcg_at_5_diff1 value: -7.0447999999999995 - type: nauc_ndcg_at_10_max value: 31.5589 - type: nauc_ndcg_at_10_std value: 60.242 - type: nauc_ndcg_at_10_diff1 value: -4.827 - type: nauc_ndcg_at_20_max value: 39.195600000000006 - type: nauc_ndcg_at_20_std value: 67.9313 - type: nauc_ndcg_at_20_diff1 value: -10.0317 - type: nauc_ndcg_at_100_max value: 43.8896 - type: nauc_ndcg_at_100_std value: 76.6623 - type: nauc_ndcg_at_100_diff1 value: -14.7694 - type: nauc_ndcg_at_1000_max value: 46.935 - type: nauc_ndcg_at_1000_std value: 79.9247 - type: nauc_ndcg_at_1000_diff1 value: -12.9885 - type: nauc_map_at_1_max value: 5.587899999999999 - type: nauc_map_at_1_std value: -6.5333000000000006 - type: nauc_map_at_1_diff1 value: 7.8414 - type: nauc_map_at_3_max value: 14.21 - type: nauc_map_at_3_std value: 7.9614 - type: nauc_map_at_3_diff1 value: 11.9467 - type: nauc_map_at_5_max value: 14.514299999999999 - type: nauc_map_at_5_std value: 10.6974 - type: nauc_map_at_5_diff1 value: 11.732800000000001 - type: nauc_map_at_10_max value: 17.5629 - type: nauc_map_at_10_std value: 21.4707 - type: nauc_map_at_10_diff1 value: 10.9138 - type: nauc_map_at_20_max value: 23.891399999999997 - type: nauc_map_at_20_std value: 32.5254 - type: nauc_map_at_20_diff1 value: 5.6072999999999995 - type: nauc_map_at_100_max value: 37.247 - type: nauc_map_at_100_std value: 66.2197 - type: nauc_map_at_100_diff1 value: -6.0896 - type: nauc_map_at_1000_max value: 51.590599999999995 - type: nauc_map_at_1000_std value: 83.3358 - type: nauc_map_at_1000_diff1 value: -18.7689 - type: nauc_recall_at_1_max value: 5.587899999999999 - type: nauc_recall_at_1_std value: -6.5333000000000006 - type: nauc_recall_at_1_diff1 value: 7.8414 - type: nauc_recall_at_3_max value: 10.6036 - type: nauc_recall_at_3_std value: 8.7269 - type: nauc_recall_at_3_diff1 value: 13.296 - type: nauc_recall_at_5_max value: 9.3121 - type: nauc_recall_at_5_std value: 9.9978 - type: nauc_recall_at_5_diff1 value: 12.5994 - type: nauc_recall_at_10_max value: 10.0265 - type: nauc_recall_at_10_std value: 16.8073 - type: nauc_recall_at_10_diff1 value: 10.8776 - type: nauc_recall_at_20_max value: 16.3788 - type: nauc_recall_at_20_std value: 23.7003 - type: nauc_recall_at_20_diff1 value: 7.832 - type: nauc_recall_at_100_max value: 25.289 - type: nauc_recall_at_100_std value: 51.6757 - type: nauc_recall_at_100_diff1 value: 0.4044 - type: nauc_recall_at_1000_max value: 42.1531 - type: nauc_recall_at_1000_std value: 72.10419999999999 - type: nauc_recall_at_1000_diff1 value: -12.410499999999999 - type: nauc_precision_at_1_max value: 31.203799999999998 - type: nauc_precision_at_1_std value: 23.1918 - type: nauc_precision_at_1_diff1 value: -32.057900000000004 - type: nauc_precision_at_3_max value: 40.368300000000005 - type: nauc_precision_at_3_std value: 50.225699999999996 - type: nauc_precision_at_3_diff1 value: -2.2047 - type: nauc_precision_at_5_max value: 29.592200000000002 - type: nauc_precision_at_5_std value: 49.6822 - type: nauc_precision_at_5_diff1 value: -4.1202000000000005 - type: nauc_precision_at_10_max value: 41.876400000000004 - type: nauc_precision_at_10_std value: 67.3955 - type: nauc_precision_at_10_diff1 value: 1.8023 - type: nauc_precision_at_20_max value: 49.011500000000005 - type: nauc_precision_at_20_std value: 72.0322 - type: nauc_precision_at_20_diff1 value: -8.0818 - type: nauc_precision_at_100_max value: 49.385200000000005 - type: nauc_precision_at_100_std value: 79.20660000000001 - type: nauc_precision_at_100_diff1 value: -12.9969 - type: nauc_precision_at_1000_max value: 41.5596 - type: nauc_precision_at_1000_std value: 51.89470000000001 - type: nauc_precision_at_1000_diff1 value: -24.5507 - type: nauc_mrr_at_1_max value: 31.203799999999998 - type: nauc_mrr_at_1_std value: 23.1918 - type: nauc_mrr_at_1_diff1 value: -32.057900000000004 - type: nauc_mrr_at_3_max value: 37.7018 - type: nauc_mrr_at_3_std value: 31.9141 - type: nauc_mrr_at_3_diff1 value: -22.4835 - type: nauc_mrr_at_5_max value: 35.284 - type: nauc_mrr_at_5_std value: 28.569899999999997 - type: nauc_mrr_at_5_diff1 value: -26.309700000000003 - type: nauc_mrr_at_10_max value: 35.284 - type: nauc_mrr_at_10_std value: 28.569899999999997 - type: nauc_mrr_at_10_diff1 value: -26.309700000000003 - type: nauc_mrr_at_20_max value: 35.284 - type: nauc_mrr_at_20_std value: 28.569899999999997 - type: nauc_mrr_at_20_diff1 value: -26.309700000000003 - type: nauc_mrr_at_100_max value: 35.284 - type: nauc_mrr_at_100_std value: 28.569899999999997 - type: nauc_mrr_at_100_diff1 value: -26.309700000000003 - type: nauc_mrr_at_1000_max value: 35.284 - type: nauc_mrr_at_1000_std value: 28.569899999999997 - type: nauc_mrr_at_1000_diff1 value: -26.309700000000003 - type: main_score value: 68.573 - task: type: Retrieval dataset: name: MTEB Touche2020 (default) type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: ndcg_at_1 value: 41.837 - type: ndcg_at_3 value: 34.675 - type: ndcg_at_5 value: 30.017 - type: ndcg_at_10 value: 27.306 - type: ndcg_at_20 value: 27.009 - type: ndcg_at_100 value: 38.037 - type: ndcg_at_1000 value: 49.413000000000004 - type: map_at_1 value: 3.304 - type: map_at_3 value: 6.0569999999999995 - type: map_at_5 value: 7.856000000000001 - type: map_at_10 value: 10.869 - type: map_at_20 value: 12.824 - type: map_at_100 value: 16.631999999999998 - type: map_at_1000 value: 18.138 - type: recall_at_1 value: 3.304 - type: recall_at_3 value: 7.13 - type: recall_at_5 value: 9.995999999999999 - type: recall_at_10 value: 16.766000000000002 - type: recall_at_20 value: 22.933 - type: recall_at_100 value: 47.427 - type: recall_at_1000 value: 81.527 - type: precision_at_1 value: 42.857 - type: precision_at_3 value: 35.374 - type: precision_at_5 value: 28.163 - type: precision_at_10 value: 23.061 - type: precision_at_20 value: 16.633 - type: precision_at_100 value: 7.632999999999999 - type: precision_at_1000 value: 1.51 - type: mrr_at_1 value: 42.857099999999996 - type: mrr_at_3 value: 54.4218 - type: mrr_at_5 value: 54.4218 - type: mrr_at_10 value: 56.431 - type: mrr_at_20 value: 56.880900000000004 - type: mrr_at_100 value: 57.0526 - type: mrr_at_1000 value: 57.0526 - type: nauc_ndcg_at_1_max value: -44.2104 - type: nauc_ndcg_at_1_std value: -2.3875 - type: nauc_ndcg_at_1_diff1 value: -23.4197 - type: nauc_ndcg_at_3_max value: -40.1986 - type: nauc_ndcg_at_3_std value: -4.3845 - type: nauc_ndcg_at_3_diff1 value: -26.881100000000004 - type: nauc_ndcg_at_5_max value: -37.8693 - type: nauc_ndcg_at_5_std value: -5.817 - type: nauc_ndcg_at_5_diff1 value: -30.292599999999997 - type: nauc_ndcg_at_10_max value: -35.0514 - type: nauc_ndcg_at_10_std value: -12.628 - type: nauc_ndcg_at_10_diff1 value: -28.5171 - type: nauc_ndcg_at_20_max value: -36.829499999999996 - type: nauc_ndcg_at_20_std value: -10.9047 - type: nauc_ndcg_at_20_diff1 value: -25.590200000000003 - type: nauc_ndcg_at_100_max value: -33.1224 - type: nauc_ndcg_at_100_std value: 14.3094 - type: nauc_ndcg_at_100_diff1 value: -17.6544 - type: nauc_ndcg_at_1000_max value: -30.8819 - type: nauc_ndcg_at_1000_std value: 22.3523 - type: nauc_ndcg_at_1000_diff1 value: -19.5741 - type: nauc_map_at_1_max value: -38.6863 - type: nauc_map_at_1_std value: -15.0366 - type: nauc_map_at_1_diff1 value: -8.5063 - type: nauc_map_at_3_max value: -38.9161 - type: nauc_map_at_3_std value: -16.71 - type: nauc_map_at_3_diff1 value: -21.3221 - type: nauc_map_at_5_max value: -35.0036 - type: nauc_map_at_5_std value: -18.4668 - type: nauc_map_at_5_diff1 value: -27.6758 - type: nauc_map_at_10_max value: -29.7816 - type: nauc_map_at_10_std value: -20.890900000000002 - type: nauc_map_at_10_diff1 value: -27.380100000000002 - type: nauc_map_at_20_max value: -29.3362 - type: nauc_map_at_20_std value: -18.9281 - type: nauc_map_at_20_diff1 value: -27.058500000000002 - type: nauc_map_at_100_max value: -27.9555 - type: nauc_map_at_100_std value: -7.222 - type: nauc_map_at_100_diff1 value: -22.7849 - type: nauc_map_at_1000_max value: -26.954 - type: nauc_map_at_1000_std value: -4.0097000000000005 - type: nauc_map_at_1000_diff1 value: -22.855 - type: nauc_recall_at_1_max value: -38.6863 - type: nauc_recall_at_1_std value: -15.0366 - type: nauc_recall_at_1_diff1 value: -8.5063 - type: nauc_recall_at_3_max value: -42.2532 - type: nauc_recall_at_3_std value: -20.399 - type: nauc_recall_at_3_diff1 value: -23.8415 - type: nauc_recall_at_5_max value: -35.3457 - type: nauc_recall_at_5_std value: -20.0969 - type: nauc_recall_at_5_diff1 value: -29.5907 - type: nauc_recall_at_10_max value: -31.7181 - type: nauc_recall_at_10_std value: -22.9559 - type: nauc_recall_at_10_diff1 value: -22.564400000000003 - type: nauc_recall_at_20_max value: -34.5273 - type: nauc_recall_at_20_std value: -15.6335 - type: nauc_recall_at_20_diff1 value: -22.9889 - type: nauc_recall_at_100_max value: -28.2509 - type: nauc_recall_at_100_std value: 30.481399999999997 - type: nauc_recall_at_100_diff1 value: -6.9437999999999995 - type: nauc_recall_at_1000_max value: -12.5952 - type: nauc_recall_at_1000_std value: 69.9957 - type: nauc_recall_at_1000_diff1 value: 2.2129 - type: nauc_precision_at_1_max value: -45.3657 - type: nauc_precision_at_1_std value: -4.4435 - type: nauc_precision_at_1_diff1 value: -18.6647 - type: nauc_precision_at_3_max value: -39.1078 - type: nauc_precision_at_3_std value: -8.047600000000001 - type: nauc_precision_at_3_diff1 value: -27.322200000000002 - type: nauc_precision_at_5_max value: -32.8848 - type: nauc_precision_at_5_std value: -8.5508 - type: nauc_precision_at_5_diff1 value: -31.567600000000002 - type: nauc_precision_at_10_max value: -28.719499999999996 - type: nauc_precision_at_10_std value: -14.498800000000001 - type: nauc_precision_at_10_diff1 value: -27.8402 - type: nauc_precision_at_20_max value: -26.466 - type: nauc_precision_at_20_std value: 3.3133000000000004 - type: nauc_precision_at_20_diff1 value: -31.5367 - type: nauc_precision_at_100_max value: -5.4186 - type: nauc_precision_at_100_std value: 61.58709999999999 - type: nauc_precision_at_100_diff1 value: -8.8049 - type: nauc_precision_at_1000_max value: 37.745400000000004 - type: nauc_precision_at_1000_std value: 48.7776 - type: nauc_precision_at_1000_diff1 value: 6.4595 - type: nauc_mrr_at_1_max value: -45.3657 - type: nauc_mrr_at_1_std value: -4.4435 - type: nauc_mrr_at_1_diff1 value: -18.6647 - type: nauc_mrr_at_3_max value: -52.9035 - type: nauc_mrr_at_3_std value: -13.174800000000001 - type: nauc_mrr_at_3_diff1 value: -20.045299999999997 - type: nauc_mrr_at_5_max value: -52.9035 - type: nauc_mrr_at_5_std value: -13.174800000000001 - type: nauc_mrr_at_5_diff1 value: -20.045299999999997 - type: nauc_mrr_at_10_max value: -51.358599999999996 - type: nauc_mrr_at_10_std value: -11.266 - type: nauc_mrr_at_10_diff1 value: -19.4274 - type: nauc_mrr_at_20_max value: -51.648799999999994 - type: nauc_mrr_at_20_std value: -10.9663 - type: nauc_mrr_at_20_diff1 value: -19.5931 - type: nauc_mrr_at_100_max value: -51.669200000000004 - type: nauc_mrr_at_100_std value: -10.9424 - type: nauc_mrr_at_100_diff1 value: -19.7412 - type: nauc_mrr_at_1000_max value: -51.669200000000004 - type: nauc_mrr_at_1000_std value: -10.9424 - type: nauc_mrr_at_1000_diff1 value: -19.7412 - type: main_score value: 27.306 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification (default) type: mteb/toxic_conversations_50k config: default split: test revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de metrics: - type: accuracy value: 62.480500000000006 - type: f1 value: 48.201100000000004 - type: f1_weighted value: 70.8591 - type: ap value: 10.9948 - type: ap_weighted value: 10.9948 - type: main_score value: 62.480500000000006 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification (default) type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 58.3616 - type: f1 value: 58.5596 - type: f1_weighted value: 57.801 - type: main_score value: 58.3616 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering (default) type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 38.6199 - type: v_measure_std value: 2.3855999999999997 - type: main_score value: 38.6199 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 (default) type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: similarity_accuracy value: 82.9886 - type: similarity_accuracy_threshold value: 86.3901 - type: similarity_f1 value: 60.866200000000006 - type: similarity_f1_threshold value: 83.9821 - type: similarity_precision value: 59.333499999999994 - type: similarity_recall value: 62.480199999999996 - type: similarity_ap value: 64.413 - type: cosine_accuracy value: 82.9886 - type: cosine_accuracy_threshold value: 86.3901 - type: cosine_f1 value: 60.866200000000006 - type: cosine_f1_threshold value: 83.9821 - type: cosine_precision value: 59.333499999999994 - type: cosine_recall value: 62.480199999999996 - type: cosine_ap value: 64.413 - type: manhattan_accuracy value: 82.9409 - type: manhattan_accuracy_threshold value: 1144.7468000000001 - type: manhattan_f1 value: 60.760400000000004 - type: manhattan_f1_threshold value: 1291.7232999999999 - type: manhattan_precision value: 54.7126 - type: manhattan_recall value: 68.3113 - type: manhattan_ap value: 64.3592 - type: euclidean_accuracy value: 82.9886 - type: euclidean_accuracy_threshold value: 52.1726 - type: euclidean_f1 value: 60.866200000000006 - type: euclidean_f1_threshold value: 56.6001 - type: euclidean_precision value: 59.333499999999994 - type: euclidean_recall value: 62.480199999999996 - type: euclidean_ap value: 64.4131 - type: dot_accuracy value: 82.9886 - type: dot_accuracy_threshold value: 86.3901 - type: dot_f1 value: 60.866200000000006 - type: dot_f1_threshold value: 83.9821 - type: dot_precision value: 59.333499999999994 - type: dot_recall value: 62.480199999999996 - type: dot_ap value: 64.413 - type: max_accuracy value: 82.9886 - type: max_f1 value: 60.866200000000006 - type: max_precision value: 59.333499999999994 - type: max_recall value: 68.3113 - type: max_ap value: 64.4131 - type: main_score value: 64.4131 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus (default) type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: similarity_accuracy value: 88.95100000000001 - type: similarity_accuracy_threshold value: 82.18520000000001 - type: similarity_f1 value: 77.9051 - type: similarity_f1_threshold value: 80.3369 - type: similarity_precision value: 76.07310000000001 - type: similarity_recall value: 79.8275 - type: similarity_ap value: 86.1545 - type: cosine_accuracy value: 88.95100000000001 - type: cosine_accuracy_threshold value: 82.18520000000001 - type: cosine_f1 value: 77.9051 - type: cosine_f1_threshold value: 80.3369 - type: cosine_precision value: 76.07310000000001 - type: cosine_recall value: 79.8275 - type: cosine_ap value: 86.1545 - type: manhattan_accuracy value: 88.9277 - type: manhattan_accuracy_threshold value: 1338.2836 - type: manhattan_f1 value: 77.8186 - type: manhattan_f1_threshold value: 1372.5978 - type: manhattan_precision value: 76.5745 - type: manhattan_recall value: 79.1038 - type: manhattan_ap value: 86.114 - type: euclidean_accuracy value: 88.95100000000001 - type: euclidean_accuracy_threshold value: 59.6905 - type: euclidean_f1 value: 77.9051 - type: euclidean_f1_threshold value: 62.71060000000001 - type: euclidean_precision value: 76.07310000000001 - type: euclidean_recall value: 79.8275 - type: euclidean_ap value: 86.1544 - type: dot_accuracy value: 88.95100000000001 - type: dot_accuracy_threshold value: 82.18520000000001 - type: dot_f1 value: 77.9051 - type: dot_f1_threshold value: 80.3369 - type: dot_precision value: 76.07310000000001 - type: dot_recall value: 79.8275 - type: dot_ap value: 86.1544 - type: max_accuracy value: 88.95100000000001 - type: max_f1 value: 77.9051 - type: max_precision value: 76.5745 - type: max_recall value: 79.8275 - type: max_ap value: 86.1545 - type: main_score value: 86.1545 --- # hongkeon/granite-embedding-278m-multilingual-Q4_K_M-GGUF This model was converted to GGUF format from [`ibm-granite/granite-embedding-278m-multilingual`](https://huggingface.co/ibm-granite/granite-embedding-278m-multilingual) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/ibm-granite/granite-embedding-278m-multilingual) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo hongkeon/granite-embedding-278m-multilingual-Q4_K_M-GGUF --hf-file granite-embedding-278m-multilingual-q4_k_m.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo hongkeon/granite-embedding-278m-multilingual-Q4_K_M-GGUF --hf-file granite-embedding-278m-multilingual-q4_k_m.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo hongkeon/granite-embedding-278m-multilingual-Q4_K_M-GGUF --hf-file granite-embedding-278m-multilingual-q4_k_m.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo hongkeon/granite-embedding-278m-multilingual-Q4_K_M-GGUF --hf-file granite-embedding-278m-multilingual-q4_k_m.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
bhavnicksm/brown-fairy-base-v0
bhavnicksm
null
[ "model2vec", "safetensors", "embeddings", "static-embeddings", "sentence-transformers", "mteb", "en", "license:mit", "model-index", "region:us" ]
2025-01-30T21:43:50Z
2025-02-01T12:11:52+00:00
23
1
--- base_model: baai/bge-base-en-v1.5 language: - en library_name: model2vec license: mit tags: - embeddings - static-embeddings - sentence-transformers - mteb model-index: - name: bhavnicksm/brown-fairy-base-v0 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 69.52239999999999 - type: f1 value: 63.4127 - type: f1_weighted value: 72.48599999999999 - type: ap value: 31.8446 - type: ap_weighted value: 31.8446 - type: main_score value: 69.52239999999999 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification (default) type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 68.709 - type: f1 value: 68.2583 - type: f1_weighted value: 68.2583 - type: ap value: 63.728899999999996 - type: ap_weighted value: 63.728899999999996 - type: main_score value: 68.709 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 34.014 - type: f1 value: 33.4588 - type: f1_weighted value: 33.4588 - type: main_score value: 34.014 - task: type: Retrieval dataset: name: MTEB ArguAna (default) type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: ndcg_at_1 value: 20.341 - type: ndcg_at_3 value: 30.547 - type: ndcg_at_5 value: 34.963 - type: ndcg_at_10 value: 39.805 - type: ndcg_at_20 value: 42.397 - type: ndcg_at_100 value: 45.216 - type: ndcg_at_1000 value: 46.339999999999996 - type: map_at_1 value: 20.341 - type: map_at_3 value: 27.962999999999997 - type: map_at_5 value: 30.409999999999997 - type: map_at_10 value: 32.4 - type: map_at_20 value: 33.113 - type: map_at_100 value: 33.512 - type: map_at_1000 value: 33.556000000000004 - type: recall_at_1 value: 20.341 - type: recall_at_3 value: 38.051 - type: recall_at_5 value: 48.791000000000004 - type: recall_at_10 value: 63.798 - type: recall_at_20 value: 74.03999999999999 - type: recall_at_100 value: 89.118 - type: recall_at_1000 value: 97.866 - type: precision_at_1 value: 20.341 - type: precision_at_3 value: 12.684000000000001 - type: precision_at_5 value: 9.758 - type: precision_at_10 value: 6.38 - type: precision_at_20 value: 3.702 - type: precision_at_100 value: 0.8909999999999999 - type: precision_at_1000 value: 0.098 - type: mrr_at_1 value: 20.6259 - type: mrr_at_3 value: 28.058300000000003 - type: mrr_at_5 value: 30.4979 - type: mrr_at_10 value: 32.5131 - type: mrr_at_20 value: 33.222699999999996 - type: mrr_at_100 value: 33.6243 - type: mrr_at_1000 value: 33.6687 - type: nauc_ndcg_at_1_max value: -6.208 - type: nauc_ndcg_at_1_std value: 0.6887 - type: nauc_ndcg_at_1_diff1 value: 5.5123 - type: nauc_ndcg_at_3_max value: -1.8608 - type: nauc_ndcg_at_3_std value: 3.7832999999999997 - type: nauc_ndcg_at_3_diff1 value: 7.5778 - type: nauc_ndcg_at_5_max value: 0.0929 - type: nauc_ndcg_at_5_std value: 5.8453 - type: nauc_ndcg_at_5_diff1 value: 9.316 - type: nauc_ndcg_at_10_max value: 0.557 - type: nauc_ndcg_at_10_std value: 5.8692 - type: nauc_ndcg_at_10_diff1 value: 8.3828 - type: nauc_ndcg_at_20_max value: 1.567 - type: nauc_ndcg_at_20_std value: 8.2355 - type: nauc_ndcg_at_20_diff1 value: 9.1907 - type: nauc_ndcg_at_100_max value: 1.0833000000000002 - type: nauc_ndcg_at_100_std value: 8.6248 - type: nauc_ndcg_at_100_diff1 value: 9.0073 - type: nauc_ndcg_at_1000_max value: -0.166 - type: nauc_ndcg_at_1000_std value: 7.394100000000001 - type: nauc_ndcg_at_1000_diff1 value: 8.1955 - type: nauc_map_at_1_max value: -6.208 - type: nauc_map_at_1_std value: 0.6887 - type: nauc_map_at_1_diff1 value: 5.5123 - type: nauc_map_at_3_max value: -3.0332999999999997 - type: nauc_map_at_3_std value: 2.9010000000000002 - type: nauc_map_at_3_diff1 value: 6.8088 - type: nauc_map_at_5_max value: -1.9215 - type: nauc_map_at_5_std value: 4.023000000000001 - type: nauc_map_at_5_diff1 value: 7.8248999999999995 - type: nauc_map_at_10_max value: -1.8037 - type: nauc_map_at_10_std value: 3.9838 - type: nauc_map_at_10_diff1 value: 7.3617 - type: nauc_map_at_20_max value: -1.5614 - type: nauc_map_at_20_std value: 4.6065000000000005 - type: nauc_map_at_20_diff1 value: 7.5846 - type: nauc_map_at_100_max value: -1.6330999999999998 - type: nauc_map_at_100_std value: 4.693 - type: nauc_map_at_100_diff1 value: 7.5309 - type: nauc_map_at_1000_max value: -1.6847999999999999 - type: nauc_map_at_1000_std value: 4.6508 - type: nauc_map_at_1000_diff1 value: 7.5036000000000005 - type: nauc_recall_at_1_max value: -6.208 - type: nauc_recall_at_1_std value: 0.6887 - type: nauc_recall_at_1_diff1 value: 5.5123 - type: nauc_recall_at_3_max value: 1.2662 - type: nauc_recall_at_3_std value: 6.1506 - type: nauc_recall_at_3_diff1 value: 9.6919 - type: nauc_recall_at_5_max value: 5.7511 - type: nauc_recall_at_5_std value: 11.0652 - type: nauc_recall_at_5_diff1 value: 13.5713 - type: nauc_recall_at_10_max value: 8.5342 - type: nauc_recall_at_10_std value: 12.2161 - type: nauc_recall_at_10_diff1 value: 11.6188 - type: nauc_recall_at_20_max value: 15.7488 - type: nauc_recall_at_20_std value: 25.6755 - type: nauc_recall_at_20_diff1 value: 16.3568 - type: nauc_recall_at_100_max value: 24.424799999999998 - type: nauc_recall_at_100_std value: 47.6945 - type: nauc_recall_at_100_diff1 value: 22.4622 - type: nauc_recall_at_1000_max value: 3.0951 - type: nauc_recall_at_1000_std value: 84.10419999999999 - type: nauc_recall_at_1000_diff1 value: -2.6364 - type: nauc_precision_at_1_max value: -6.208 - type: nauc_precision_at_1_std value: 0.6887 - type: nauc_precision_at_1_diff1 value: 5.5123 - type: nauc_precision_at_3_max value: 1.2662 - type: nauc_precision_at_3_std value: 6.1506 - type: nauc_precision_at_3_diff1 value: 9.6919 - type: nauc_precision_at_5_max value: 5.7511 - type: nauc_precision_at_5_std value: 11.0652 - type: nauc_precision_at_5_diff1 value: 13.5713 - type: nauc_precision_at_10_max value: 8.5342 - type: nauc_precision_at_10_std value: 12.2161 - type: nauc_precision_at_10_diff1 value: 11.6188 - type: nauc_precision_at_20_max value: 15.7488 - type: nauc_precision_at_20_std value: 25.6755 - type: nauc_precision_at_20_diff1 value: 16.3568 - type: nauc_precision_at_100_max value: 24.424799999999998 - type: nauc_precision_at_100_std value: 47.6945 - type: nauc_precision_at_100_diff1 value: 22.4622 - type: nauc_precision_at_1000_max value: 3.0951 - type: nauc_precision_at_1000_std value: 84.10419999999999 - type: nauc_precision_at_1000_diff1 value: -2.6364 - type: nauc_mrr_at_1_max value: -5.611800000000001 - type: nauc_mrr_at_1_std value: 0.2596 - type: nauc_mrr_at_1_diff1 value: 4.5101 - type: nauc_mrr_at_3_max value: -3.1917 - type: nauc_mrr_at_3_std value: 2.7559 - type: nauc_mrr_at_3_diff1 value: 5.756 - type: nauc_mrr_at_5_max value: -2.1292999999999997 - type: nauc_mrr_at_5_std value: 3.7653 - type: nauc_mrr_at_5_diff1 value: 6.7995 - type: nauc_mrr_at_10_max value: -1.8915000000000002 - type: nauc_mrr_at_10_std value: 3.778 - type: nauc_mrr_at_10_diff1 value: 6.4253 - type: nauc_mrr_at_20_max value: -1.6753 - type: nauc_mrr_at_20_std value: 4.389 - type: nauc_mrr_at_20_diff1 value: 6.6081 - type: nauc_mrr_at_100_max value: -1.7302000000000002 - type: nauc_mrr_at_100_std value: 4.4796000000000005 - type: nauc_mrr_at_100_diff1 value: 6.563199999999999 - type: nauc_mrr_at_1000_max value: -1.7819000000000003 - type: nauc_mrr_at_1000_std value: 4.4372 - type: nauc_mrr_at_1000_diff1 value: 6.5346 - type: main_score value: 39.805 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P (default) type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 30.9023 - type: v_measure_std value: 14.6095 - type: main_score value: 30.9023 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S (default) type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 19.1012 - type: v_measure_std value: 15.511800000000001 - type: main_score value: 19.1012 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions (default) type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 54.0474 - type: mrr value: 67.00150000000001 - type: nAUC_map_max value: 14.266100000000002 - type: nAUC_map_std value: 11.7906 - type: nAUC_map_diff1 value: 7.5044 - type: nAUC_mrr_max value: 20.1721 - type: nAUC_mrr_std value: 13.1225 - type: nAUC_mrr_diff1 value: 14.3512 - type: main_score value: 54.0474 - task: type: STS dataset: name: MTEB BIOSSES (default) type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: pearson value: 73.3465 - type: spearman value: 69.6932 - type: cosine_pearson value: 73.3465 - type: cosine_spearman value: 69.6932 - type: manhattan_pearson value: 54.115899999999996 - type: manhattan_spearman value: 54.1759 - type: euclidean_pearson value: 54.2153 - type: euclidean_spearman value: 54.0488 - type: main_score value: 69.6932 - task: type: Classification dataset: name: MTEB Banking77Classification (default) type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 74.2987 - type: f1 value: 73.85119999999999 - type: f1_weighted value: 73.85119999999999 - type: main_score value: 74.2987 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P (default) type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 29.8415 - type: v_measure_std value: 0.7605 - type: main_score value: 29.8415 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S (default) type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 16.4917 - type: v_measure_std value: 1.2364 - type: main_score value: 16.4917 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval (default) type: CQADupstackRetrieval_is_a_combined_dataset config: default split: test revision: '1' metrics: - type: ndcg_at_10 value: 21.9561 - type: main_score value: 21.9561 - task: type: Retrieval dataset: name: MTEB ClimateFEVER (default) type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: ndcg_at_1 value: 18.826999999999998 - type: ndcg_at_3 value: 16.482 - type: ndcg_at_5 value: 17.9 - type: ndcg_at_10 value: 20.948 - type: ndcg_at_20 value: 23.665 - type: ndcg_at_100 value: 28.192 - type: ndcg_at_1000 value: 31.846999999999998 - type: map_at_1 value: 8.221 - type: map_at_3 value: 11.72 - type: map_at_5 value: 12.844 - type: map_at_10 value: 14.17 - type: map_at_20 value: 15.043000000000001 - type: map_at_100 value: 15.842 - type: map_at_1000 value: 16.04 - type: recall_at_1 value: 8.221 - type: recall_at_3 value: 15.214 - type: recall_at_5 value: 19.185 - type: recall_at_10 value: 26.14 - type: recall_at_20 value: 33.931 - type: recall_at_100 value: 51.429 - type: recall_at_1000 value: 72.269 - type: precision_at_1 value: 18.826999999999998 - type: precision_at_3 value: 12.4 - type: precision_at_5 value: 9.707 - type: precision_at_10 value: 6.84 - type: precision_at_20 value: 4.557 - type: precision_at_100 value: 1.461 - type: precision_at_1000 value: 0.212 - type: mrr_at_1 value: 18.8274 - type: mrr_at_3 value: 25.2226 - type: mrr_at_5 value: 27.163999999999998 - type: mrr_at_10 value: 28.6116 - type: mrr_at_20 value: 29.3082 - type: mrr_at_100 value: 29.7302 - type: mrr_at_1000 value: 29.786600000000004 - type: nauc_ndcg_at_1_max value: 23.3019 - type: nauc_ndcg_at_1_std value: 14.4153 - type: nauc_ndcg_at_1_diff1 value: 21.8879 - type: nauc_ndcg_at_3_max value: 22.2746 - type: nauc_ndcg_at_3_std value: 15.487300000000001 - type: nauc_ndcg_at_3_diff1 value: 17.8275 - type: nauc_ndcg_at_5_max value: 23.0993 - type: nauc_ndcg_at_5_std value: 16.4617 - type: nauc_ndcg_at_5_diff1 value: 16.7855 - type: nauc_ndcg_at_10_max value: 24.7783 - type: nauc_ndcg_at_10_std value: 20.1484 - type: nauc_ndcg_at_10_diff1 value: 17.0753 - type: nauc_ndcg_at_20_max value: 26.1465 - type: nauc_ndcg_at_20_std value: 22.3842 - type: nauc_ndcg_at_20_diff1 value: 16.777900000000002 - type: nauc_ndcg_at_100_max value: 27.703100000000003 - type: nauc_ndcg_at_100_std value: 25.3223 - type: nauc_ndcg_at_100_diff1 value: 16.1821 - type: nauc_ndcg_at_1000_max value: 28.778599999999997 - type: nauc_ndcg_at_1000_std value: 27.9877 - type: nauc_ndcg_at_1000_diff1 value: 16.223499999999998 - type: nauc_map_at_1_max value: 22.4083 - type: nauc_map_at_1_std value: 9.546000000000001 - type: nauc_map_at_1_diff1 value: 29.008499999999998 - type: nauc_map_at_3_max value: 22.0196 - type: nauc_map_at_3_std value: 11.7774 - type: nauc_map_at_3_diff1 value: 21.7038 - type: nauc_map_at_5_max value: 22.7222 - type: nauc_map_at_5_std value: 12.8126 - type: nauc_map_at_5_diff1 value: 20.288 - type: nauc_map_at_10_max value: 23.566200000000002 - type: nauc_map_at_10_std value: 14.8877 - type: nauc_map_at_10_diff1 value: 19.9221 - type: nauc_map_at_20_max value: 24.1809 - type: nauc_map_at_20_std value: 15.9395 - type: nauc_map_at_20_diff1 value: 19.6606 - type: nauc_map_at_100_max value: 24.7213 - type: nauc_map_at_100_std value: 16.8474 - type: nauc_map_at_100_diff1 value: 19.5227 - type: nauc_map_at_1000_max value: 24.8168 - type: nauc_map_at_1000_std value: 17.0802 - type: nauc_map_at_1000_diff1 value: 19.496199999999998 - type: nauc_recall_at_1_max value: 22.4083 - type: nauc_recall_at_1_std value: 9.546000000000001 - type: nauc_recall_at_1_diff1 value: 29.008499999999998 - type: nauc_recall_at_3_max value: 19.4585 - type: nauc_recall_at_3_std value: 14.3753 - type: nauc_recall_at_3_diff1 value: 15.7 - type: nauc_recall_at_5_max value: 20.5273 - type: nauc_recall_at_5_std value: 16.2058 - type: nauc_recall_at_5_diff1 value: 12.1747 - type: nauc_recall_at_10_max value: 22.6961 - type: nauc_recall_at_10_std value: 22.400000000000002 - type: nauc_recall_at_10_diff1 value: 13.2301 - type: nauc_recall_at_20_max value: 23.9165 - type: nauc_recall_at_20_std value: 25.392300000000002 - type: nauc_recall_at_20_diff1 value: 11.8797 - type: nauc_recall_at_100_max value: 26.6031 - type: nauc_recall_at_100_std value: 31.7759 - type: nauc_recall_at_100_diff1 value: 8.9369 - type: nauc_recall_at_1000_max value: 32.4917 - type: nauc_recall_at_1000_std value: 47.7736 - type: nauc_recall_at_1000_diff1 value: 9.5485 - type: nauc_precision_at_1_max value: 23.3019 - type: nauc_precision_at_1_std value: 14.4153 - type: nauc_precision_at_1_diff1 value: 21.8879 - type: nauc_precision_at_3_max value: 23.9748 - type: nauc_precision_at_3_std value: 21.5474 - type: nauc_precision_at_3_diff1 value: 10.6452 - type: nauc_precision_at_5_max value: 24.9076 - type: nauc_precision_at_5_std value: 23.9797 - type: nauc_precision_at_5_diff1 value: 7.1156999999999995 - type: nauc_precision_at_10_max value: 26.721 - type: nauc_precision_at_10_std value: 30.1734 - type: nauc_precision_at_10_diff1 value: 7.0459 - type: nauc_precision_at_20_max value: 27.9059 - type: nauc_precision_at_20_std value: 33.1933 - type: nauc_precision_at_20_diff1 value: 5.7082 - type: nauc_precision_at_100_max value: 25.7203 - type: nauc_precision_at_100_std value: 35.108 - type: nauc_precision_at_100_diff1 value: 2.2525 - type: nauc_precision_at_1000_max value: 23.6155 - type: nauc_precision_at_1000_std value: 39.4567 - type: nauc_precision_at_1000_diff1 value: -1.2073 - type: nauc_mrr_at_1_max value: 23.3019 - type: nauc_mrr_at_1_std value: 14.4153 - type: nauc_mrr_at_1_diff1 value: 21.8879 - type: nauc_mrr_at_3_max value: 23.340700000000002 - type: nauc_mrr_at_3_std value: 18.1166 - type: nauc_mrr_at_3_diff1 value: 16.4821 - type: nauc_mrr_at_5_max value: 23.5278 - type: nauc_mrr_at_5_std value: 19.023200000000003 - type: nauc_mrr_at_5_diff1 value: 15.7295 - type: nauc_mrr_at_10_max value: 24.199 - type: nauc_mrr_at_10_std value: 20.218600000000002 - type: nauc_mrr_at_10_diff1 value: 16.173199999999998 - type: nauc_mrr_at_20_max value: 24.4813 - type: nauc_mrr_at_20_std value: 20.5169 - type: nauc_mrr_at_20_diff1 value: 16.2274 - type: nauc_mrr_at_100_max value: 24.378800000000002 - type: nauc_mrr_at_100_std value: 20.4327 - type: nauc_mrr_at_100_diff1 value: 16.220499999999998 - type: nauc_mrr_at_1000_max value: 24.3802 - type: nauc_mrr_at_1000_std value: 20.4123 - type: nauc_mrr_at_1000_diff1 value: 16.2191 - type: main_score value: 20.948 - task: type: Retrieval dataset: name: MTEB DBPedia (default) type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: ndcg_at_1 value: 30.375000000000004 - type: ndcg_at_3 value: 26.590999999999998 - type: ndcg_at_5 value: 24.586 - type: ndcg_at_10 value: 23.246 - type: ndcg_at_20 value: 23.025000000000002 - type: ndcg_at_100 value: 26.994 - type: ndcg_at_1000 value: 33.591 - type: map_at_1 value: 4.104 - type: map_at_3 value: 6.869 - type: map_at_5 value: 7.949000000000001 - type: map_at_10 value: 9.511 - type: map_at_20 value: 10.959000000000001 - type: map_at_100 value: 13.444999999999999 - type: map_at_1000 value: 14.482999999999999 - type: recall_at_1 value: 4.104 - type: recall_at_3 value: 8.394 - type: recall_at_5 value: 10.453 - type: recall_at_10 value: 14.413 - type: recall_at_20 value: 19.421 - type: recall_at_100 value: 34.134 - type: recall_at_1000 value: 56.74 - type: precision_at_1 value: 43.0 - type: precision_at_3 value: 32.25 - type: precision_at_5 value: 26.650000000000002 - type: precision_at_10 value: 20.575 - type: precision_at_20 value: 15.587000000000002 - type: precision_at_100 value: 6.784999999999999 - type: precision_at_1000 value: 1.465 - type: mrr_at_1 value: 43.0 - type: mrr_at_3 value: 50.416700000000006 - type: mrr_at_5 value: 51.554199999999994 - type: mrr_at_10 value: 52.5436 - type: mrr_at_20 value: 53.0818 - type: mrr_at_100 value: 53.3559 - type: mrr_at_1000 value: 53.3775 - type: nauc_ndcg_at_1_max value: 32.3654 - type: nauc_ndcg_at_1_std value: 10.134799999999998 - type: nauc_ndcg_at_1_diff1 value: 30.7456 - type: nauc_ndcg_at_3_max value: 35.7454 - type: nauc_ndcg_at_3_std value: 11.2598 - type: nauc_ndcg_at_3_diff1 value: 28.8957 - type: nauc_ndcg_at_5_max value: 37.3094 - type: nauc_ndcg_at_5_std value: 12.0986 - type: nauc_ndcg_at_5_diff1 value: 30.1683 - type: nauc_ndcg_at_10_max value: 37.8415 - type: nauc_ndcg_at_10_std value: 13.6007 - type: nauc_ndcg_at_10_diff1 value: 27.7172 - type: nauc_ndcg_at_20_max value: 36.201899999999995 - type: nauc_ndcg_at_20_std value: 14.508399999999998 - type: nauc_ndcg_at_20_diff1 value: 25.6504 - type: nauc_ndcg_at_100_max value: 37.8181 - type: nauc_ndcg_at_100_std value: 22.2808 - type: nauc_ndcg_at_100_diff1 value: 22.156100000000002 - type: nauc_ndcg_at_1000_max value: 43.2943 - type: nauc_ndcg_at_1000_std value: 29.2433 - type: nauc_ndcg_at_1000_diff1 value: 24.593 - type: nauc_map_at_1_max value: 3.9762 - type: nauc_map_at_1_std value: 2.929 - type: nauc_map_at_1_diff1 value: 21.787699999999997 - type: nauc_map_at_3_max value: 7.2749 - type: nauc_map_at_3_std value: 4.1128 - type: nauc_map_at_3_diff1 value: 19.4785 - type: nauc_map_at_5_max value: 11.6105 - type: nauc_map_at_5_std value: 3.9446000000000003 - type: nauc_map_at_5_diff1 value: 21.250700000000002 - type: nauc_map_at_10_max value: 17.3344 - type: nauc_map_at_10_std value: 6.990200000000001 - type: nauc_map_at_10_diff1 value: 20.962 - type: nauc_map_at_20_max value: 23.447200000000002 - type: nauc_map_at_20_std value: 11.8169 - type: nauc_map_at_20_diff1 value: 21.0181 - type: nauc_map_at_100_max value: 32.9328 - type: nauc_map_at_100_std value: 21.3233 - type: nauc_map_at_100_diff1 value: 19.3584 - type: nauc_map_at_1000_max value: 34.9988 - type: nauc_map_at_1000_std value: 23.3726 - type: nauc_map_at_1000_diff1 value: 19.9623 - type: nauc_recall_at_1_max value: 3.9762 - type: nauc_recall_at_1_std value: 2.929 - type: nauc_recall_at_1_diff1 value: 21.787699999999997 - type: nauc_recall_at_3_max value: 2.7925999999999997 - type: nauc_recall_at_3_std value: -2.4797 - type: nauc_recall_at_3_diff1 value: 13.525 - type: nauc_recall_at_5_max value: 6.8843000000000005 - type: nauc_recall_at_5_std value: -3.7343 - type: nauc_recall_at_5_diff1 value: 17.638499999999997 - type: nauc_recall_at_10_max value: 11.6201 - type: nauc_recall_at_10_std value: -1.0245 - type: nauc_recall_at_10_diff1 value: 15.4671 - type: nauc_recall_at_20_max value: 15.815999999999999 - type: nauc_recall_at_20_std value: 3.6186999999999996 - type: nauc_recall_at_20_diff1 value: 15.407000000000002 - type: nauc_recall_at_100_max value: 24.712 - type: nauc_recall_at_100_std value: 22.0841 - type: nauc_recall_at_100_diff1 value: 10.1828 - type: nauc_recall_at_1000_max value: 33.821 - type: nauc_recall_at_1000_std value: 36.807 - type: nauc_recall_at_1000_diff1 value: 12.8396 - type: nauc_precision_at_1_max value: 39.2878 - type: nauc_precision_at_1_std value: 15.6774 - type: nauc_precision_at_1_diff1 value: 31.384 - type: nauc_precision_at_3_max value: 43.498 - type: nauc_precision_at_3_std value: 17.592299999999998 - type: nauc_precision_at_3_diff1 value: 25.154799999999998 - type: nauc_precision_at_5_max value: 47.632600000000004 - type: nauc_precision_at_5_std value: 19.6694 - type: nauc_precision_at_5_diff1 value: 26.762399999999996 - type: nauc_precision_at_10_max value: 50.91139999999999 - type: nauc_precision_at_10_std value: 23.6363 - type: nauc_precision_at_10_diff1 value: 23.097 - type: nauc_precision_at_20_max value: 52.53489999999999 - type: nauc_precision_at_20_std value: 28.8839 - type: nauc_precision_at_20_diff1 value: 18.9418 - type: nauc_precision_at_100_max value: 48.79 - type: nauc_precision_at_100_std value: 31.642500000000002 - type: nauc_precision_at_100_diff1 value: 13.646700000000001 - type: nauc_precision_at_1000_max value: 27.015099999999997 - type: nauc_precision_at_1000_std value: 13.613900000000001 - type: nauc_precision_at_1000_diff1 value: 12.138300000000001 - type: nauc_mrr_at_1_max value: 39.2878 - type: nauc_mrr_at_1_std value: 15.6774 - type: nauc_mrr_at_1_diff1 value: 31.384 - type: nauc_mrr_at_3_max value: 41.747299999999996 - type: nauc_mrr_at_3_std value: 14.7682 - type: nauc_mrr_at_3_diff1 value: 29.8219 - type: nauc_mrr_at_5_max value: 42.408699999999996 - type: nauc_mrr_at_5_std value: 14.769099999999998 - type: nauc_mrr_at_5_diff1 value: 31.1068 - type: nauc_mrr_at_10_max value: 42.571999999999996 - type: nauc_mrr_at_10_std value: 14.8256 - type: nauc_mrr_at_10_diff1 value: 31.156299999999998 - type: nauc_mrr_at_20_max value: 42.4832 - type: nauc_mrr_at_20_std value: 14.7993 - type: nauc_mrr_at_20_diff1 value: 31.260700000000003 - type: nauc_mrr_at_100_max value: 42.5018 - type: nauc_mrr_at_100_std value: 14.9009 - type: nauc_mrr_at_100_diff1 value: 31.2395 - type: nauc_mrr_at_1000_max value: 42.4996 - type: nauc_mrr_at_1000_std value: 14.9098 - type: nauc_mrr_at_1000_diff1 value: 31.230400000000003 - type: main_score value: 23.246 - task: type: Classification dataset: name: MTEB EmotionClassification (default) type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 45.68 - type: f1 value: 43.1207 - type: f1_weighted value: 48.0349 - type: main_score value: 45.68 - task: type: Retrieval dataset: name: MTEB FEVER (default) type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: ndcg_at_1 value: 16.742 - type: ndcg_at_3 value: 23.316 - type: ndcg_at_5 value: 25.738 - type: ndcg_at_10 value: 28.68 - type: ndcg_at_20 value: 30.959999999999997 - type: ndcg_at_100 value: 34.037 - type: ndcg_at_1000 value: 36.004999999999995 - type: map_at_1 value: 15.797 - type: map_at_3 value: 21.209 - type: map_at_5 value: 22.547 - type: map_at_10 value: 23.762 - type: map_at_20 value: 24.401 - type: map_at_100 value: 24.83 - type: map_at_1000 value: 24.901 - type: recall_at_1 value: 15.797 - type: recall_at_3 value: 28.233000000000004 - type: recall_at_5 value: 33.997 - type: recall_at_10 value: 42.888 - type: recall_at_20 value: 51.635 - type: recall_at_100 value: 67.801 - type: recall_at_1000 value: 82.998 - type: precision_at_1 value: 16.742 - type: precision_at_3 value: 10.096 - type: precision_at_5 value: 7.335999999999999 - type: precision_at_10 value: 4.65 - type: precision_at_20 value: 2.817 - type: precision_at_100 value: 0.748 - type: precision_at_1000 value: 0.093 - type: mrr_at_1 value: 16.7417 - type: mrr_at_3 value: 22.4122 - type: mrr_at_5 value: 23.8374 - type: mrr_at_10 value: 25.101000000000003 - type: mrr_at_20 value: 25.739800000000002 - type: mrr_at_100 value: 26.164199999999997 - type: mrr_at_1000 value: 26.227800000000002 - type: nauc_ndcg_at_1_max value: 13.991500000000002 - type: nauc_ndcg_at_1_std value: -25.4382 - type: nauc_ndcg_at_1_diff1 value: 21.2751 - type: nauc_ndcg_at_3_max value: 15.4019 - type: nauc_ndcg_at_3_std value: -25.9724 - type: nauc_ndcg_at_3_diff1 value: 16.3365 - type: nauc_ndcg_at_5_max value: 16.4606 - type: nauc_ndcg_at_5_std value: -26.063599999999997 - type: nauc_ndcg_at_5_diff1 value: 15.334900000000001 - type: nauc_ndcg_at_10_max value: 17.1297 - type: nauc_ndcg_at_10_std value: -26.709 - type: nauc_ndcg_at_10_diff1 value: 14.072799999999999 - type: nauc_ndcg_at_20_max value: 18.0756 - type: nauc_ndcg_at_20_std value: -25.849899999999998 - type: nauc_ndcg_at_20_diff1 value: 13.3475 - type: nauc_ndcg_at_100_max value: 18.5017 - type: nauc_ndcg_at_100_std value: -25.1975 - type: nauc_ndcg_at_100_diff1 value: 13.128200000000001 - type: nauc_ndcg_at_1000_max value: 18.570500000000003 - type: nauc_ndcg_at_1000_std value: -24.5199 - type: nauc_ndcg_at_1000_diff1 value: 13.608600000000001 - type: nauc_map_at_1_max value: 14.4553 - type: nauc_map_at_1_std value: -25.291999999999998 - type: nauc_map_at_1_diff1 value: 21.4966 - type: nauc_map_at_3_max value: 15.1199 - type: nauc_map_at_3_std value: -25.8608 - type: nauc_map_at_3_diff1 value: 17.5 - type: nauc_map_at_5_max value: 15.748599999999998 - type: nauc_map_at_5_std value: -25.928 - type: nauc_map_at_5_diff1 value: 16.8883 - type: nauc_map_at_10_max value: 16.036 - type: nauc_map_at_10_std value: -26.2116 - type: nauc_map_at_10_diff1 value: 16.335 - type: nauc_map_at_20_max value: 16.305500000000002 - type: nauc_map_at_20_std value: -25.965500000000002 - type: nauc_map_at_20_diff1 value: 16.1305 - type: nauc_map_at_100_max value: 16.380200000000002 - type: nauc_map_at_100_std value: -25.870199999999997 - type: nauc_map_at_100_diff1 value: 16.1253 - type: nauc_map_at_1000_max value: 16.3924 - type: nauc_map_at_1000_std value: -25.838499999999996 - type: nauc_map_at_1000_diff1 value: 16.1408 - type: nauc_recall_at_1_max value: 14.4553 - type: nauc_recall_at_1_std value: -25.291999999999998 - type: nauc_recall_at_1_diff1 value: 21.4966 - type: nauc_recall_at_3_max value: 16.1074 - type: nauc_recall_at_3_std value: -25.916099999999997 - type: nauc_recall_at_3_diff1 value: 13.5176 - type: nauc_recall_at_5_max value: 18.0189 - type: nauc_recall_at_5_std value: -25.795299999999997 - type: nauc_recall_at_5_diff1 value: 11.3842 - type: nauc_recall_at_10_max value: 19.4035 - type: nauc_recall_at_10_std value: -27.2015 - type: nauc_recall_at_10_diff1 value: 7.9085 - type: nauc_recall_at_20_max value: 22.5578 - type: nauc_recall_at_20_std value: -24.1674 - type: nauc_recall_at_20_diff1 value: 5.0956 - type: nauc_recall_at_100_max value: 25.2855 - type: nauc_recall_at_100_std value: -19.9378 - type: nauc_recall_at_100_diff1 value: 1.3199 - type: nauc_recall_at_1000_max value: 29.253400000000003 - type: nauc_recall_at_1000_std value: -8.519599999999999 - type: nauc_recall_at_1000_diff1 value: 0.1057 - type: nauc_precision_at_1_max value: 13.991500000000002 - type: nauc_precision_at_1_std value: -25.4382 - type: nauc_precision_at_1_diff1 value: 21.2751 - type: nauc_precision_at_3_max value: 15.758700000000001 - type: nauc_precision_at_3_std value: -26.3494 - type: nauc_precision_at_3_diff1 value: 13.6081 - type: nauc_precision_at_5_max value: 17.851300000000002 - type: nauc_precision_at_5_std value: -26.3818 - type: nauc_precision_at_5_diff1 value: 11.4331 - type: nauc_precision_at_10_max value: 19.5748 - type: nauc_precision_at_10_std value: -27.594400000000004 - type: nauc_precision_at_10_diff1 value: 8.0539 - type: nauc_precision_at_20_max value: 22.453799999999998 - type: nauc_precision_at_20_std value: -23.707800000000002 - type: nauc_precision_at_20_diff1 value: 5.2 - type: nauc_precision_at_100_max value: 24.1067 - type: nauc_precision_at_100_std value: -16.6068 - type: nauc_precision_at_100_diff1 value: 1.1200999999999999 - type: nauc_precision_at_1000_max value: 22.516 - type: nauc_precision_at_1000_std value: -0.621 - type: nauc_precision_at_1000_diff1 value: -0.26749999999999996 - type: nauc_mrr_at_1_max value: 13.991500000000002 - type: nauc_mrr_at_1_std value: -25.4382 - type: nauc_mrr_at_1_diff1 value: 21.2751 - type: nauc_mrr_at_3_max value: 14.95 - type: nauc_mrr_at_3_std value: -25.885 - type: nauc_mrr_at_3_diff1 value: 17.3215 - type: nauc_mrr_at_5_max value: 15.5568 - type: nauc_mrr_at_5_std value: -25.963 - type: nauc_mrr_at_5_diff1 value: 16.699 - type: nauc_mrr_at_10_max value: 15.901299999999999 - type: nauc_mrr_at_10_std value: -26.2471 - type: nauc_mrr_at_10_diff1 value: 16.189899999999998 - type: nauc_mrr_at_20_max value: 16.1798 - type: nauc_mrr_at_20_std value: -25.989600000000003 - type: nauc_mrr_at_20_diff1 value: 15.984499999999999 - type: nauc_mrr_at_100_max value: 16.2602 - type: nauc_mrr_at_100_std value: -25.9187 - type: nauc_mrr_at_100_diff1 value: 16.0136 - type: nauc_mrr_at_1000_max value: 16.2577 - type: nauc_mrr_at_1000_std value: -25.9039 - type: nauc_mrr_at_1000_diff1 value: 16.0318 - type: main_score value: 28.68 - task: type: Retrieval dataset: name: MTEB FiQA2018 (default) type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: ndcg_at_1 value: 14.198 - type: ndcg_at_3 value: 14.018 - type: ndcg_at_5 value: 14.857000000000001 - type: ndcg_at_10 value: 16.509999999999998 - type: ndcg_at_20 value: 18.499 - type: ndcg_at_100 value: 22.658 - type: ndcg_at_1000 value: 26.894000000000002 - type: map_at_1 value: 7.061000000000001 - type: map_at_3 value: 10.151 - type: map_at_5 value: 11.0 - type: map_at_10 value: 11.883000000000001 - type: map_at_20 value: 12.5 - type: map_at_100 value: 13.154 - type: map_at_1000 value: 13.343 - type: recall_at_1 value: 7.061000000000001 - type: recall_at_3 value: 13.339 - type: recall_at_5 value: 16.689999999999998 - type: recall_at_10 value: 21.435000000000002 - type: recall_at_20 value: 27.779999999999998 - type: recall_at_100 value: 45.381 - type: recall_at_1000 value: 71.61699999999999 - type: precision_at_1 value: 14.198 - type: precision_at_3 value: 9.568 - type: precision_at_5 value: 7.099 - type: precision_at_10 value: 4.7379999999999995 - type: precision_at_20 value: 3.1329999999999996 - type: precision_at_100 value: 1.083 - type: precision_at_1000 value: 0.181 - type: mrr_at_1 value: 14.1975 - type: mrr_at_3 value: 18.5185 - type: mrr_at_5 value: 19.8302 - type: mrr_at_10 value: 20.6685 - type: mrr_at_20 value: 21.273 - type: mrr_at_100 value: 21.8076 - type: mrr_at_1000 value: 21.9063 - type: nauc_ndcg_at_1_max value: 12.2117 - type: nauc_ndcg_at_1_std value: -10.7059 - type: nauc_ndcg_at_1_diff1 value: 27.4415 - type: nauc_ndcg_at_3_max value: 12.4823 - type: nauc_ndcg_at_3_std value: -10.252500000000001 - type: nauc_ndcg_at_3_diff1 value: 20.6834 - type: nauc_ndcg_at_5_max value: 10.3316 - type: nauc_ndcg_at_5_std value: -9.8648 - type: nauc_ndcg_at_5_diff1 value: 19.6879 - type: nauc_ndcg_at_10_max value: 9.2057 - type: nauc_ndcg_at_10_std value: -9.3284 - type: nauc_ndcg_at_10_diff1 value: 19.5253 - type: nauc_ndcg_at_20_max value: 8.3092 - type: nauc_ndcg_at_20_std value: -6.686400000000001 - type: nauc_ndcg_at_20_diff1 value: 19.0031 - type: nauc_ndcg_at_100_max value: 9.321200000000001 - type: nauc_ndcg_at_100_std value: -4.4703 - type: nauc_ndcg_at_100_diff1 value: 19.2995 - type: nauc_ndcg_at_1000_max value: 11.754199999999999 - type: nauc_ndcg_at_1000_std value: -2.6593999999999998 - type: nauc_ndcg_at_1000_diff1 value: 20.3056 - type: nauc_map_at_1_max value: 17.227899999999998 - type: nauc_map_at_1_std value: -6.8508 - type: nauc_map_at_1_diff1 value: 25.9133 - type: nauc_map_at_3_max value: 13.716999999999999 - type: nauc_map_at_3_std value: -8.86 - type: nauc_map_at_3_diff1 value: 21.0714 - type: nauc_map_at_5_max value: 12.146700000000001 - type: nauc_map_at_5_std value: -8.909400000000002 - type: nauc_map_at_5_diff1 value: 20.3887 - type: nauc_map_at_10_max value: 11.417 - type: nauc_map_at_10_std value: -8.9141 - type: nauc_map_at_10_diff1 value: 20.7165 - type: nauc_map_at_20_max value: 11.0988 - type: nauc_map_at_20_std value: -7.9453 - type: nauc_map_at_20_diff1 value: 20.7809 - type: nauc_map_at_100_max value: 11.1694 - type: nauc_map_at_100_std value: -7.4639 - type: nauc_map_at_100_diff1 value: 20.9252 - type: nauc_map_at_1000_max value: 11.3405 - type: nauc_map_at_1000_std value: -7.3102 - type: nauc_map_at_1000_diff1 value: 20.9959 - type: nauc_recall_at_1_max value: 17.227899999999998 - type: nauc_recall_at_1_std value: -6.8508 - type: nauc_recall_at_1_diff1 value: 25.9133 - type: nauc_recall_at_3_max value: 11.2722 - type: nauc_recall_at_3_std value: -9.4755 - type: nauc_recall_at_3_diff1 value: 15.1741 - type: nauc_recall_at_5_max value: 6.7860000000000005 - type: nauc_recall_at_5_std value: -8.9743 - type: nauc_recall_at_5_diff1 value: 14.091999999999999 - type: nauc_recall_at_10_max value: 4.5781 - type: nauc_recall_at_10_std value: -8.4828 - type: nauc_recall_at_10_diff1 value: 13.1033 - type: nauc_recall_at_20_max value: 3.0408999999999997 - type: nauc_recall_at_20_std value: -1.0319 - type: nauc_recall_at_20_diff1 value: 11.2412 - type: nauc_recall_at_100_max value: 4.6371 - type: nauc_recall_at_100_std value: 5.6984 - type: nauc_recall_at_100_diff1 value: 10.648399999999999 - type: nauc_recall_at_1000_max value: 14.4284 - type: nauc_recall_at_1000_std value: 20.471 - type: nauc_recall_at_1000_diff1 value: 13.6603 - type: nauc_precision_at_1_max value: 12.2117 - type: nauc_precision_at_1_std value: -10.7059 - type: nauc_precision_at_1_diff1 value: 27.4415 - type: nauc_precision_at_3_max value: 8.3303 - type: nauc_precision_at_3_std value: -12.3434 - type: nauc_precision_at_3_diff1 value: 20.3774 - type: nauc_precision_at_5_max value: 5.46 - type: nauc_precision_at_5_std value: -10.6964 - type: nauc_precision_at_5_diff1 value: 19.3914 - type: nauc_precision_at_10_max value: 5.8885 - type: nauc_precision_at_10_std value: -9.0149 - type: nauc_precision_at_10_diff1 value: 21.8392 - type: nauc_precision_at_20_max value: 3.8181 - type: nauc_precision_at_20_std value: -4.2505 - type: nauc_precision_at_20_diff1 value: 19.9848 - type: nauc_precision_at_100_max value: 9.6538 - type: nauc_precision_at_100_std value: 1.8809 - type: nauc_precision_at_100_diff1 value: 18.6529 - type: nauc_precision_at_1000_max value: 15.5018 - type: nauc_precision_at_1000_std value: 5.4286 - type: nauc_precision_at_1000_diff1 value: 13.2946 - type: nauc_mrr_at_1_max value: 12.2117 - type: nauc_mrr_at_1_std value: -10.7059 - type: nauc_mrr_at_1_diff1 value: 27.4415 - type: nauc_mrr_at_3_max value: 10.5481 - type: nauc_mrr_at_3_std value: -10.7069 - type: nauc_mrr_at_3_diff1 value: 22.1345 - type: nauc_mrr_at_5_max value: 9.463000000000001 - type: nauc_mrr_at_5_std value: -10.5558 - type: nauc_mrr_at_5_diff1 value: 21.8622 - type: nauc_mrr_at_10_max value: 9.6679 - type: nauc_mrr_at_10_std value: -10.399600000000001 - type: nauc_mrr_at_10_diff1 value: 21.7847 - type: nauc_mrr_at_20_max value: 9.422600000000001 - type: nauc_mrr_at_20_std value: -9.8865 - type: nauc_mrr_at_20_diff1 value: 21.4703 - type: nauc_mrr_at_100_max value: 9.640500000000001 - type: nauc_mrr_at_100_std value: -9.8299 - type: nauc_mrr_at_100_diff1 value: 21.5227 - type: nauc_mrr_at_1000_max value: 9.6734 - type: nauc_mrr_at_1000_std value: -9.8079 - type: nauc_mrr_at_1000_diff1 value: 21.5451 - type: main_score value: 16.509999999999998 - task: type: Retrieval dataset: name: MTEB HotpotQA (default) type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: ndcg_at_1 value: 40.297 - type: ndcg_at_3 value: 31.719 - type: ndcg_at_5 value: 33.744 - type: ndcg_at_10 value: 35.72 - type: ndcg_at_20 value: 37.266 - type: ndcg_at_100 value: 39.778000000000006 - type: ndcg_at_1000 value: 42.056 - type: map_at_1 value: 20.149 - type: map_at_3 value: 25.899 - type: map_at_5 value: 27.157999999999998 - type: map_at_10 value: 28.105000000000004 - type: map_at_20 value: 28.586 - type: map_at_100 value: 29.000999999999998 - type: map_at_1000 value: 29.098000000000003 - type: recall_at_1 value: 20.149 - type: recall_at_3 value: 29.932 - type: recall_at_5 value: 33.93 - type: recall_at_10 value: 38.92 - type: recall_at_20 value: 43.903 - type: recall_at_100 value: 55.057 - type: recall_at_1000 value: 70.27 - type: precision_at_1 value: 40.297 - type: precision_at_3 value: 19.955000000000002 - type: precision_at_5 value: 13.572000000000001 - type: precision_at_10 value: 7.784000000000001 - type: precision_at_20 value: 4.390000000000001 - type: precision_at_100 value: 1.101 - type: precision_at_1000 value: 0.14100000000000001 - type: mrr_at_1 value: 40.2971 - type: mrr_at_3 value: 46.041 - type: mrr_at_5 value: 47.199600000000004 - type: mrr_at_10 value: 47.9631 - type: mrr_at_20 value: 48.3871 - type: mrr_at_100 value: 48.661500000000004 - type: mrr_at_1000 value: 48.707 - type: nauc_ndcg_at_1_max value: 27.8706 - type: nauc_ndcg_at_1_std value: -8.272300000000001 - type: nauc_ndcg_at_1_diff1 value: 57.8385 - type: nauc_ndcg_at_3_max value: 27.852500000000003 - type: nauc_ndcg_at_3_std value: -6.4216 - type: nauc_ndcg_at_3_diff1 value: 48.365 - type: nauc_ndcg_at_5_max value: 27.509099999999997 - type: nauc_ndcg_at_5_std value: -5.6179 - type: nauc_ndcg_at_5_diff1 value: 46.5015 - type: nauc_ndcg_at_10_max value: 27.002 - type: nauc_ndcg_at_10_std value: -4.5545 - type: nauc_ndcg_at_10_diff1 value: 45.7081 - type: nauc_ndcg_at_20_max value: 26.984799999999996 - type: nauc_ndcg_at_20_std value: -3.6883 - type: nauc_ndcg_at_20_diff1 value: 44.9584 - type: nauc_ndcg_at_100_max value: 27.283600000000003 - type: nauc_ndcg_at_100_std value: -2.3537 - type: nauc_ndcg_at_100_diff1 value: 44.1115 - type: nauc_ndcg_at_1000_max value: 27.417399999999997 - type: nauc_ndcg_at_1000_std value: -1.2178 - type: nauc_ndcg_at_1000_diff1 value: 44.0544 - type: nauc_map_at_1_max value: 27.8706 - type: nauc_map_at_1_std value: -8.272300000000001 - type: nauc_map_at_1_diff1 value: 57.8385 - type: nauc_map_at_3_max value: 27.584799999999998 - type: nauc_map_at_3_std value: -5.9387 - type: nauc_map_at_3_diff1 value: 47.2019 - type: nauc_map_at_5_max value: 27.242 - type: nauc_map_at_5_std value: -5.3224 - type: nauc_map_at_5_diff1 value: 45.831 - type: nauc_map_at_10_max value: 26.9723 - type: nauc_map_at_10_std value: -4.7007 - type: nauc_map_at_10_diff1 value: 45.3311 - type: nauc_map_at_20_max value: 26.919700000000002 - type: nauc_map_at_20_std value: -4.3851 - type: nauc_map_at_20_diff1 value: 45.0687 - type: nauc_map_at_100_max value: 26.995400000000004 - type: nauc_map_at_100_std value: -4.0821000000000005 - type: nauc_map_at_100_diff1 value: 44.9062 - type: nauc_map_at_1000_max value: 26.998499999999996 - type: nauc_map_at_1000_std value: -4.0238000000000005 - type: nauc_map_at_1000_diff1 value: 44.8961 - type: nauc_recall_at_1_max value: 27.8706 - type: nauc_recall_at_1_std value: -8.272300000000001 - type: nauc_recall_at_1_diff1 value: 57.8385 - type: nauc_recall_at_3_max value: 27.3795 - type: nauc_recall_at_3_std value: -5.1751 - type: nauc_recall_at_3_diff1 value: 42.3825 - type: nauc_recall_at_5_max value: 25.634800000000002 - type: nauc_recall_at_5_std value: -3.3379 - type: nauc_recall_at_5_diff1 value: 37.0532 - type: nauc_recall_at_10_max value: 23.5746 - type: nauc_recall_at_10_std value: -0.5226 - type: nauc_recall_at_10_diff1 value: 34.071200000000005 - type: nauc_recall_at_20_max value: 22.1536 - type: nauc_recall_at_20_std value: 2.3993 - type: nauc_recall_at_20_diff1 value: 29.439 - type: nauc_recall_at_100_max value: 20.7576 - type: nauc_recall_at_100_std value: 8.468499999999999 - type: nauc_recall_at_100_diff1 value: 21.221799999999998 - type: nauc_recall_at_1000_max value: 18.7522 - type: nauc_recall_at_1000_std value: 18.916800000000002 - type: nauc_recall_at_1000_diff1 value: 13.558200000000001 - type: nauc_precision_at_1_max value: 27.8706 - type: nauc_precision_at_1_std value: -8.272300000000001 - type: nauc_precision_at_1_diff1 value: 57.8385 - type: nauc_precision_at_3_max value: 27.3795 - type: nauc_precision_at_3_std value: -5.1751 - type: nauc_precision_at_3_diff1 value: 42.3825 - type: nauc_precision_at_5_max value: 25.634800000000002 - type: nauc_precision_at_5_std value: -3.3379 - type: nauc_precision_at_5_diff1 value: 37.0532 - type: nauc_precision_at_10_max value: 23.5746 - type: nauc_precision_at_10_std value: -0.5226 - type: nauc_precision_at_10_diff1 value: 34.071200000000005 - type: nauc_precision_at_20_max value: 22.1536 - type: nauc_precision_at_20_std value: 2.3993 - type: nauc_precision_at_20_diff1 value: 29.439 - type: nauc_precision_at_100_max value: 20.7576 - type: nauc_precision_at_100_std value: 8.468499999999999 - type: nauc_precision_at_100_diff1 value: 21.221799999999998 - type: nauc_precision_at_1000_max value: 18.7522 - type: nauc_precision_at_1000_std value: 18.916800000000002 - type: nauc_precision_at_1000_diff1 value: 13.558200000000001 - type: nauc_mrr_at_1_max value: 27.8706 - type: nauc_mrr_at_1_std value: -8.272300000000001 - type: nauc_mrr_at_1_diff1 value: 57.8385 - type: nauc_mrr_at_3_max value: 28.256700000000002 - type: nauc_mrr_at_3_std value: -8.050699999999999 - type: nauc_mrr_at_3_diff1 value: 54.5601 - type: nauc_mrr_at_5_max value: 28.2928 - type: nauc_mrr_at_5_std value: -7.8317 - type: nauc_mrr_at_5_diff1 value: 54.046499999999995 - type: nauc_mrr_at_10_max value: 28.151500000000002 - type: nauc_mrr_at_10_std value: -7.6431 - type: nauc_mrr_at_10_diff1 value: 53.9751 - type: nauc_mrr_at_20_max value: 28.215 - type: nauc_mrr_at_20_std value: -7.5285 - type: nauc_mrr_at_20_diff1 value: 53.9177 - type: nauc_mrr_at_100_max value: 28.215600000000002 - type: nauc_mrr_at_100_std value: -7.524699999999999 - type: nauc_mrr_at_100_diff1 value: 53.9393 - type: nauc_mrr_at_1000_max value: 28.2194 - type: nauc_mrr_at_1000_std value: -7.5150999999999994 - type: nauc_mrr_at_1000_diff1 value: 53.95290000000001 - type: main_score value: 35.72 - task: type: Classification dataset: name: MTEB ImdbClassification (default) type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 65.8656 - type: f1 value: 65.385 - type: f1_weighted value: 65.385 - type: ap value: 60.506899999999995 - type: ap_weighted value: 60.506899999999995 - type: main_score value: 65.8656 - task: type: Retrieval dataset: name: MTEB MSMARCO (default) type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: ndcg_at_1 value: 6.877 - type: ndcg_at_3 value: 10.963000000000001 - type: ndcg_at_5 value: 12.845 - type: ndcg_at_10 value: 14.918000000000001 - type: ndcg_at_20 value: 16.721 - type: ndcg_at_100 value: 20.041 - type: ndcg_at_1000 value: 23.296 - type: map_at_1 value: 6.717 - type: map_at_3 value: 9.846 - type: map_at_5 value: 10.886999999999999 - type: map_at_10 value: 11.74 - type: map_at_20 value: 12.237 - type: map_at_100 value: 12.683 - type: map_at_1000 value: 12.792 - type: recall_at_1 value: 6.717 - type: recall_at_3 value: 13.963999999999999 - type: recall_at_5 value: 18.498 - type: recall_at_10 value: 24.869 - type: recall_at_20 value: 31.901000000000003 - type: recall_at_100 value: 49.786 - type: recall_at_1000 value: 75.913 - type: precision_at_1 value: 6.877 - type: precision_at_3 value: 4.809 - type: precision_at_5 value: 3.8280000000000003 - type: precision_at_10 value: 2.5829999999999997 - type: precision_at_20 value: 1.6650000000000003 - type: precision_at_100 value: 0.523 - type: precision_at_1000 value: 0.08 - type: mrr_at_1 value: 6.876799999999999 - type: mrr_at_3 value: 10.093100000000002 - type: mrr_at_5 value: 11.1526 - type: mrr_at_10 value: 12.0074 - type: mrr_at_20 value: 12.5083 - type: mrr_at_100 value: 12.9529 - type: mrr_at_1000 value: 13.057099999999998 - type: nauc_ndcg_at_1_max value: 4.7264 - type: nauc_ndcg_at_1_std value: -16.2439 - type: nauc_ndcg_at_1_diff1 value: 27.4463 - type: nauc_ndcg_at_3_max value: 6.1734 - type: nauc_ndcg_at_3_std value: -16.8949 - type: nauc_ndcg_at_3_diff1 value: 22.7183 - type: nauc_ndcg_at_5_max value: 6.493 - type: nauc_ndcg_at_5_std value: -15.7852 - type: nauc_ndcg_at_5_diff1 value: 21.0805 - type: nauc_ndcg_at_10_max value: 7.099600000000001 - type: nauc_ndcg_at_10_std value: -15.1727 - type: nauc_ndcg_at_10_diff1 value: 20.3957 - type: nauc_ndcg_at_20_max value: 7.9073 - type: nauc_ndcg_at_20_std value: -14.596200000000001 - type: nauc_ndcg_at_20_diff1 value: 20.0084 - type: nauc_ndcg_at_100_max value: 9.112 - type: nauc_ndcg_at_100_std value: -12.0562 - type: nauc_ndcg_at_100_diff1 value: 19.3717 - type: nauc_ndcg_at_1000_max value: 10.1474 - type: nauc_ndcg_at_1000_std value: -10.3955 - type: nauc_ndcg_at_1000_diff1 value: 19.2427 - type: nauc_map_at_1_max value: 4.4801 - type: nauc_map_at_1_std value: -16.4499 - type: nauc_map_at_1_diff1 value: 27.5511 - type: nauc_map_at_3_max value: 5.8799 - type: nauc_map_at_3_std value: -16.7696 - type: nauc_map_at_3_diff1 value: 23.531299999999998 - type: nauc_map_at_5_max value: 6.0905000000000005 - type: nauc_map_at_5_std value: -16.0525 - type: nauc_map_at_5_diff1 value: 22.395799999999998 - type: nauc_map_at_10_max value: 6.3876 - type: nauc_map_at_10_std value: -15.774 - type: nauc_map_at_10_diff1 value: 22.0367 - type: nauc_map_at_20_max value: 6.6676 - type: nauc_map_at_20_std value: -15.5729 - type: nauc_map_at_20_diff1 value: 21.8952 - type: nauc_map_at_100_max value: 6.912400000000001 - type: nauc_map_at_100_std value: -15.162400000000002 - type: nauc_map_at_100_diff1 value: 21.7666 - type: nauc_map_at_1000_max value: 6.952500000000001 - type: nauc_map_at_1000_std value: -15.085799999999999 - type: nauc_map_at_1000_diff1 value: 21.7618 - type: nauc_recall_at_1_max value: 4.4801 - type: nauc_recall_at_1_std value: -16.4499 - type: nauc_recall_at_1_diff1 value: 27.5511 - type: nauc_recall_at_3_max value: 6.7195 - type: nauc_recall_at_3_std value: -17.2961 - type: nauc_recall_at_3_diff1 value: 20.9572 - type: nauc_recall_at_5_max value: 7.199 - type: nauc_recall_at_5_std value: -15.260599999999998 - type: nauc_recall_at_5_diff1 value: 18.4745 - type: nauc_recall_at_10_max value: 8.3289 - type: nauc_recall_at_10_std value: -14.0152 - type: nauc_recall_at_10_diff1 value: 17.3142 - type: nauc_recall_at_20_max value: 10.1702 - type: nauc_recall_at_20_std value: -12.7265 - type: nauc_recall_at_20_diff1 value: 16.5162 - type: nauc_recall_at_100_max value: 13.9363 - type: nauc_recall_at_100_std value: -4.0486 - type: nauc_recall_at_100_diff1 value: 14.5015 - type: nauc_recall_at_1000_max value: 24.3013 - type: nauc_recall_at_1000_std value: 12.3673 - type: nauc_recall_at_1000_diff1 value: 10.9827 - type: nauc_precision_at_1_max value: 4.7264 - type: nauc_precision_at_1_std value: -16.2439 - type: nauc_precision_at_1_diff1 value: 27.4463 - type: nauc_precision_at_3_max value: 6.895700000000001 - type: nauc_precision_at_3_std value: -17.0973 - type: nauc_precision_at_3_diff1 value: 20.7819 - type: nauc_precision_at_5_max value: 7.3601 - type: nauc_precision_at_5_std value: -15.189400000000001 - type: nauc_precision_at_5_diff1 value: 18.2284 - type: nauc_precision_at_10_max value: 8.5933 - type: nauc_precision_at_10_std value: -13.9345 - type: nauc_precision_at_10_diff1 value: 17.1801 - type: nauc_precision_at_20_max value: 10.5732 - type: nauc_precision_at_20_std value: -12.2593 - type: nauc_precision_at_20_diff1 value: 16.3194 - type: nauc_precision_at_100_max value: 14.462800000000001 - type: nauc_precision_at_100_std value: -2.7812 - type: nauc_precision_at_100_diff1 value: 13.8556 - type: nauc_precision_at_1000_max value: 22.7827 - type: nauc_precision_at_1000_std value: 13.1185 - type: nauc_precision_at_1000_diff1 value: 8.331199999999999 - type: nauc_mrr_at_1_max value: 4.7264 - type: nauc_mrr_at_1_std value: -16.2439 - type: nauc_mrr_at_1_diff1 value: 27.4463 - type: nauc_mrr_at_3_max value: 5.9976 - type: nauc_mrr_at_3_std value: -16.5493 - type: nauc_mrr_at_3_diff1 value: 23.5058 - type: nauc_mrr_at_5_max value: 6.1958 - type: nauc_mrr_at_5_std value: -15.893699999999999 - type: nauc_mrr_at_5_diff1 value: 22.4454 - type: nauc_mrr_at_10_max value: 6.514200000000001 - type: nauc_mrr_at_10_std value: -15.5116 - type: nauc_mrr_at_10_diff1 value: 22.0264 - type: nauc_mrr_at_20_max value: 6.7813 - type: nauc_mrr_at_20_std value: -15.2942 - type: nauc_mrr_at_20_diff1 value: 21.8857 - type: nauc_mrr_at_100_max value: 7.0158 - type: nauc_mrr_at_100_std value: -14.894599999999999 - type: nauc_mrr_at_100_diff1 value: 21.757299999999997 - type: nauc_mrr_at_1000_max value: 7.0534 - type: nauc_mrr_at_1000_std value: -14.8351 - type: nauc_mrr_at_1000_diff1 value: 21.7544 - type: main_score value: 14.918000000000001 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 82.4669 - type: f1 value: 81.3346 - type: f1_weighted value: 82.6885 - type: main_score value: 82.4669 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 58.1145 - type: f1 value: 40.7841 - type: f1_weighted value: 62.343 - type: main_score value: 58.1145 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 4672e20407010da34463acc759c162ca9734bca6 metrics: - type: accuracy value: 60.24549999999999 - type: f1 value: 59.534 - type: f1_weighted value: 60.47670000000001 - type: main_score value: 60.24549999999999 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: fad2c6e8459f9e1c45d9315f4953d921437d70f8 metrics: - type: accuracy value: 66.32820000000001 - type: f1 value: 65.2929 - type: f1_weighted value: 66.51979999999999 - type: main_score value: 66.32820000000001 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P (default) type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 25.8495 - type: v_measure_std value: 1.6320000000000001 - type: main_score value: 25.8495 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S (default) type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 20.0754 - type: v_measure_std value: 1.3306 - type: main_score value: 20.0754 - task: type: Reranking dataset: name: MTEB MindSmallReranking (default) type: mteb/mind_small config: default split: test revision: 59042f120c80e8afa9cdbb224f67076cec0fc9a7 metrics: - type: map value: 28.5611 - type: mrr value: 29.4014 - type: nAUC_map_max value: -20.8019 - type: nAUC_map_std value: -5.307300000000001 - type: nAUC_map_diff1 value: 20.6483 - type: nAUC_mrr_max value: -14.9738 - type: nAUC_mrr_std value: -2.9508 - type: nAUC_mrr_diff1 value: 18.6743 - type: main_score value: 28.5611 - task: type: Retrieval dataset: name: MTEB NFCorpus (default) type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: ndcg_at_1 value: 32.972 - type: ndcg_at_3 value: 29.965000000000003 - type: ndcg_at_5 value: 28.773 - type: ndcg_at_10 value: 26.434 - type: ndcg_at_20 value: 24.922 - type: ndcg_at_100 value: 24.852 - type: ndcg_at_1000 value: 33.388 - type: map_at_1 value: 3.737 - type: map_at_3 value: 6.387 - type: map_at_5 value: 7.420999999999999 - type: map_at_10 value: 8.652 - type: map_at_20 value: 9.745 - type: map_at_100 value: 11.247 - type: map_at_1000 value: 12.494 - type: recall_at_1 value: 3.737 - type: recall_at_3 value: 7.889 - type: recall_at_5 value: 10.026 - type: recall_at_10 value: 12.615000000000002 - type: recall_at_20 value: 16.184 - type: recall_at_100 value: 26.988 - type: recall_at_1000 value: 57.594 - type: precision_at_1 value: 34.675 - type: precision_at_3 value: 28.173 - type: precision_at_5 value: 25.201 - type: precision_at_10 value: 20.0 - type: precision_at_20 value: 15.356 - type: precision_at_100 value: 6.898 - type: precision_at_1000 value: 1.936 - type: mrr_at_1 value: 34.674899999999994 - type: mrr_at_3 value: 42.0537 - type: mrr_at_5 value: 43.741 - type: mrr_at_10 value: 44.277699999999996 - type: mrr_at_20 value: 44.819700000000005 - type: mrr_at_100 value: 45.1552 - type: mrr_at_1000 value: 45.2048 - type: nauc_ndcg_at_1_max value: 27.6992 - type: nauc_ndcg_at_1_std value: 13.1387 - type: nauc_ndcg_at_1_diff1 value: 33.7772 - type: nauc_ndcg_at_3_max value: 32.4741 - type: nauc_ndcg_at_3_std value: 19.264 - type: nauc_ndcg_at_3_diff1 value: 26.1486 - type: nauc_ndcg_at_5_max value: 32.6623 - type: nauc_ndcg_at_5_std value: 21.435499999999998 - type: nauc_ndcg_at_5_diff1 value: 24.0412 - type: nauc_ndcg_at_10_max value: 33.217400000000005 - type: nauc_ndcg_at_10_std value: 22.591900000000003 - type: nauc_ndcg_at_10_diff1 value: 22.3637 - type: nauc_ndcg_at_20_max value: 33.3978 - type: nauc_ndcg_at_20_std value: 22.520200000000003 - type: nauc_ndcg_at_20_diff1 value: 22.0163 - type: nauc_ndcg_at_100_max value: 33.0608 - type: nauc_ndcg_at_100_std value: 20.4305 - type: nauc_ndcg_at_100_diff1 value: 21.1175 - type: nauc_ndcg_at_1000_max value: 38.198100000000004 - type: nauc_ndcg_at_1000_std value: 26.8712 - type: nauc_ndcg_at_1000_diff1 value: 22.78 - type: nauc_map_at_1_max value: 18.898300000000003 - type: nauc_map_at_1_std value: -11.0976 - type: nauc_map_at_1_diff1 value: 55.1605 - type: nauc_map_at_3_max value: 20.451800000000002 - type: nauc_map_at_3_std value: -12.0342 - type: nauc_map_at_3_diff1 value: 45.2096 - type: nauc_map_at_5_max value: 21.199 - type: nauc_map_at_5_std value: -9.8514 - type: nauc_map_at_5_diff1 value: 42.0142 - type: nauc_map_at_10_max value: 23.1645 - type: nauc_map_at_10_std value: -5.8333 - type: nauc_map_at_10_diff1 value: 38.048 - type: nauc_map_at_20_max value: 24.9482 - type: nauc_map_at_20_std value: -1.5368 - type: nauc_map_at_20_diff1 value: 36.241299999999995 - type: nauc_map_at_100_max value: 27.1413 - type: nauc_map_at_100_std value: 5.6268 - type: nauc_map_at_100_diff1 value: 33.3298 - type: nauc_map_at_1000_max value: 28.7674 - type: nauc_map_at_1000_std value: 10.9326 - type: nauc_map_at_1000_diff1 value: 31.700899999999997 - type: nauc_recall_at_1_max value: 18.898300000000003 - type: nauc_recall_at_1_std value: -11.0976 - type: nauc_recall_at_1_diff1 value: 55.1605 - type: nauc_recall_at_3_max value: 19.4721 - type: nauc_recall_at_3_std value: -13.496 - type: nauc_recall_at_3_diff1 value: 35.0178 - type: nauc_recall_at_5_max value: 19.5024 - type: nauc_recall_at_5_std value: -12.3428 - type: nauc_recall_at_5_diff1 value: 29.517 - type: nauc_recall_at_10_max value: 21.215500000000002 - type: nauc_recall_at_10_std value: -8.7165 - type: nauc_recall_at_10_diff1 value: 24.282 - type: nauc_recall_at_20_max value: 21.735 - type: nauc_recall_at_20_std value: -5.0988999999999995 - type: nauc_recall_at_20_diff1 value: 20.3041 - type: nauc_recall_at_100_max value: 19.9243 - type: nauc_recall_at_100_std value: 3.4522999999999997 - type: nauc_recall_at_100_diff1 value: 5.9747 - type: nauc_recall_at_1000_max value: 21.7819 - type: nauc_recall_at_1000_std value: 13.6785 - type: nauc_recall_at_1000_diff1 value: -0.25980000000000003 - type: nauc_precision_at_1_max value: 28.624899999999997 - type: nauc_precision_at_1_std value: 12.709599999999998 - type: nauc_precision_at_1_diff1 value: 33.308 - type: nauc_precision_at_3_max value: 35.1699 - type: nauc_precision_at_3_std value: 25.9338 - type: nauc_precision_at_3_diff1 value: 18.5464 - type: nauc_precision_at_5_max value: 33.4433 - type: nauc_precision_at_5_std value: 32.4517 - type: nauc_precision_at_5_diff1 value: 12.5543 - type: nauc_precision_at_10_max value: 32.3973 - type: nauc_precision_at_10_std value: 37.7554 - type: nauc_precision_at_10_diff1 value: 6.7227 - type: nauc_precision_at_20_max value: 31.591599999999996 - type: nauc_precision_at_20_std value: 44.658 - type: nauc_precision_at_20_diff1 value: 2.2702 - type: nauc_precision_at_100_max value: 25.163600000000002 - type: nauc_precision_at_100_std value: 51.7642 - type: nauc_precision_at_100_diff1 value: -4.8361 - type: nauc_precision_at_1000_max value: 20.2984 - type: nauc_precision_at_1000_std value: 49.0469 - type: nauc_precision_at_1000_diff1 value: -6.662700000000001 - type: nauc_mrr_at_1_max value: 28.624899999999997 - type: nauc_mrr_at_1_std value: 12.709599999999998 - type: nauc_mrr_at_1_diff1 value: 33.308 - type: nauc_mrr_at_3_max value: 32.3306 - type: nauc_mrr_at_3_std value: 18.1604 - type: nauc_mrr_at_3_diff1 value: 31.128600000000002 - type: nauc_mrr_at_5_max value: 32.0504 - type: nauc_mrr_at_5_std value: 18.3022 - type: nauc_mrr_at_5_diff1 value: 30.1868 - type: nauc_mrr_at_10_max value: 32.093500000000006 - type: nauc_mrr_at_10_std value: 18.348 - type: nauc_mrr_at_10_diff1 value: 30.2307 - type: nauc_mrr_at_20_max value: 32.3491 - type: nauc_mrr_at_20_std value: 18.309800000000003 - type: nauc_mrr_at_20_diff1 value: 30.0848 - type: nauc_mrr_at_100_max value: 32.5297 - type: nauc_mrr_at_100_std value: 18.4197 - type: nauc_mrr_at_100_diff1 value: 30.03 - type: nauc_mrr_at_1000_max value: 32.502700000000004 - type: nauc_mrr_at_1000_std value: 18.4073 - type: nauc_mrr_at_1000_diff1 value: 30.059599999999996 - type: main_score value: 26.434 - task: type: Retrieval dataset: name: MTEB NQ (default) type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: ndcg_at_1 value: 9.067 - type: ndcg_at_3 value: 13.33 - type: ndcg_at_5 value: 15.773000000000001 - type: ndcg_at_10 value: 18.239 - type: ndcg_at_20 value: 20.777 - type: ndcg_at_100 value: 25.046000000000003 - type: ndcg_at_1000 value: 27.814 - type: map_at_1 value: 8.007 - type: map_at_3 value: 11.732 - type: map_at_5 value: 13.095 - type: map_at_10 value: 14.127 - type: map_at_20 value: 14.860000000000001 - type: map_at_100 value: 15.467 - type: map_at_1000 value: 15.57 - type: recall_at_1 value: 8.007 - type: recall_at_3 value: 16.553 - type: recall_at_5 value: 22.282 - type: recall_at_10 value: 29.592000000000002 - type: recall_at_20 value: 39.134 - type: recall_at_100 value: 61.307 - type: recall_at_1000 value: 82.556 - type: precision_at_1 value: 9.067 - type: precision_at_3 value: 6.441 - type: precision_at_5 value: 5.220000000000001 - type: precision_at_10 value: 3.488 - type: precision_at_20 value: 2.329 - type: precision_at_100 value: 0.734 - type: precision_at_1000 value: 0.1 - type: mrr_at_1 value: 9.0672 - type: mrr_at_3 value: 13.1277 - type: mrr_at_5 value: 14.544199999999998 - type: mrr_at_10 value: 15.589400000000001 - type: mrr_at_20 value: 16.2651 - type: mrr_at_100 value: 16.8195 - type: mrr_at_1000 value: 16.902800000000003 - type: nauc_ndcg_at_1_max value: 11.3832 - type: nauc_ndcg_at_1_std value: -4.1221 - type: nauc_ndcg_at_1_diff1 value: 20.5341 - type: nauc_ndcg_at_3_max value: 11.4743 - type: nauc_ndcg_at_3_std value: -4.4418 - type: nauc_ndcg_at_3_diff1 value: 16.481 - type: nauc_ndcg_at_5_max value: 12.6479 - type: nauc_ndcg_at_5_std value: -4.5466 - type: nauc_ndcg_at_5_diff1 value: 15.1785 - type: nauc_ndcg_at_10_max value: 14.3237 - type: nauc_ndcg_at_10_std value: -4.4135 - type: nauc_ndcg_at_10_diff1 value: 14.6574 - type: nauc_ndcg_at_20_max value: 15.717300000000002 - type: nauc_ndcg_at_20_std value: -3.0106 - type: nauc_ndcg_at_20_diff1 value: 14.6044 - type: nauc_ndcg_at_100_max value: 17.5878 - type: nauc_ndcg_at_100_std value: -0.36519999999999997 - type: nauc_ndcg_at_100_diff1 value: 14.5606 - type: nauc_ndcg_at_1000_max value: 17.5657 - type: nauc_ndcg_at_1000_std value: 1.1903000000000001 - type: nauc_ndcg_at_1000_diff1 value: 14.5654 - type: nauc_map_at_1_max value: 10.2386 - type: nauc_map_at_1_std value: -4.9847 - type: nauc_map_at_1_diff1 value: 20.9545 - type: nauc_map_at_3_max value: 10.9023 - type: nauc_map_at_3_std value: -4.8369 - type: nauc_map_at_3_diff1 value: 17.5991 - type: nauc_map_at_5_max value: 11.7413 - type: nauc_map_at_5_std value: -4.9516 - type: nauc_map_at_5_diff1 value: 16.7798 - type: nauc_map_at_10_max value: 12.6051 - type: nauc_map_at_10_std value: -4.9007000000000005 - type: nauc_map_at_10_diff1 value: 16.4911 - type: nauc_map_at_20_max value: 13.1256 - type: nauc_map_at_20_std value: -4.4518 - type: nauc_map_at_20_diff1 value: 16.4184 - type: nauc_map_at_100_max value: 13.4467 - type: nauc_map_at_100_std value: -3.9765 - type: nauc_map_at_100_diff1 value: 16.4427 - type: nauc_map_at_1000_max value: 13.452 - type: nauc_map_at_1000_std value: -3.8988 - type: nauc_map_at_1000_diff1 value: 16.4438 - type: nauc_recall_at_1_max value: 10.2386 - type: nauc_recall_at_1_std value: -4.9847 - type: nauc_recall_at_1_diff1 value: 20.9545 - type: nauc_recall_at_3_max value: 11.843399999999999 - type: nauc_recall_at_3_std value: -4.3091 - type: nauc_recall_at_3_diff1 value: 14.285999999999998 - type: nauc_recall_at_5_max value: 13.5182 - type: nauc_recall_at_5_std value: -4.417800000000001 - type: nauc_recall_at_5_diff1 value: 12.1453 - type: nauc_recall_at_10_max value: 17.0065 - type: nauc_recall_at_10_std value: -4.252000000000001 - type: nauc_recall_at_10_diff1 value: 11.457199999999998 - type: nauc_recall_at_20_max value: 20.3871 - type: nauc_recall_at_20_std value: -0.7614 - type: nauc_recall_at_20_diff1 value: 11.5536 - type: nauc_recall_at_100_max value: 28.3368 - type: nauc_recall_at_100_std value: 9.5722 - type: nauc_recall_at_100_diff1 value: 10.7211 - type: nauc_recall_at_1000_max value: 37.0782 - type: nauc_recall_at_1000_std value: 31.6326 - type: nauc_recall_at_1000_diff1 value: 8.82 - type: nauc_precision_at_1_max value: 11.3832 - type: nauc_precision_at_1_std value: -4.1221 - type: nauc_precision_at_1_diff1 value: 20.5341 - type: nauc_precision_at_3_max value: 12.951099999999999 - type: nauc_precision_at_3_std value: -3.4715999999999996 - type: nauc_precision_at_3_diff1 value: 14.0988 - type: nauc_precision_at_5_max value: 14.8679 - type: nauc_precision_at_5_std value: -3.9043 - type: nauc_precision_at_5_diff1 value: 11.9479 - type: nauc_precision_at_10_max value: 18.0976 - type: nauc_precision_at_10_std value: -3.1489999999999996 - type: nauc_precision_at_10_diff1 value: 10.7419 - type: nauc_precision_at_20_max value: 20.4974 - type: nauc_precision_at_20_std value: 1.2608 - type: nauc_precision_at_20_diff1 value: 9.8315 - type: nauc_precision_at_100_max value: 24.1911 - type: nauc_precision_at_100_std value: 11.971400000000001 - type: nauc_precision_at_100_diff1 value: 7.0899 - type: nauc_precision_at_1000_max value: 20.2919 - type: nauc_precision_at_1000_std value: 23.0171 - type: nauc_precision_at_1000_diff1 value: 1.4091 - type: nauc_mrr_at_1_max value: 11.3832 - type: nauc_mrr_at_1_std value: -4.1221 - type: nauc_mrr_at_1_diff1 value: 20.5341 - type: nauc_mrr_at_3_max value: 11.7865 - type: nauc_mrr_at_3_std value: -3.6935999999999996 - type: nauc_mrr_at_3_diff1 value: 16.8127 - type: nauc_mrr_at_5_max value: 12.518199999999998 - type: nauc_mrr_at_5_std value: -3.7152 - type: nauc_mrr_at_5_diff1 value: 15.893699999999999 - type: nauc_mrr_at_10_max value: 13.1787 - type: nauc_mrr_at_10_std value: -3.6301 - type: nauc_mrr_at_10_diff1 value: 15.617500000000001 - type: nauc_mrr_at_20_max value: 13.529399999999999 - type: nauc_mrr_at_20_std value: -3.1929 - type: nauc_mrr_at_20_diff1 value: 15.6602 - type: nauc_mrr_at_100_max value: 13.770199999999999 - type: nauc_mrr_at_100_std value: -2.9103 - type: nauc_mrr_at_100_diff1 value: 15.6841 - type: nauc_mrr_at_1000_max value: 13.7598 - type: nauc_mrr_at_1000_std value: -2.8705000000000003 - type: nauc_mrr_at_1000_diff1 value: 15.6886 - type: main_score value: 18.239 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval (default) type: mteb/quora config: default split: test revision: e4e08e0b7dbe3c8700f0daef558ff32256715259 metrics: - type: ndcg_at_1 value: 72.39 - type: ndcg_at_3 value: 76.303 - type: ndcg_at_5 value: 78.164 - type: ndcg_at_10 value: 79.946 - type: ndcg_at_20 value: 80.963 - type: ndcg_at_100 value: 82.086 - type: ndcg_at_1000 value: 82.494 - type: map_at_1 value: 62.965 - type: map_at_3 value: 72.429 - type: map_at_5 value: 74.246 - type: map_at_10 value: 75.414 - type: map_at_20 value: 75.87899999999999 - type: map_at_100 value: 76.164 - type: map_at_1000 value: 76.198 - type: recall_at_1 value: 62.965 - type: recall_at_3 value: 78.39 - type: recall_at_5 value: 83.506 - type: recall_at_10 value: 88.787 - type: recall_at_20 value: 92.223 - type: recall_at_100 value: 96.98 - type: recall_at_1000 value: 99.30099999999999 - type: precision_at_1 value: 72.39 - type: precision_at_3 value: 33.040000000000006 - type: precision_at_5 value: 21.884 - type: precision_at_10 value: 12.084999999999999 - type: precision_at_20 value: 6.49 - type: precision_at_100 value: 1.444 - type: precision_at_1000 value: 0.154 - type: mrr_at_1 value: 72.39 - type: mrr_at_3 value: 77.9883 - type: mrr_at_5 value: 78.8933 - type: mrr_at_10 value: 79.443 - type: mrr_at_20 value: 79.6218 - type: mrr_at_100 value: 79.7045 - type: mrr_at_1000 value: 79.7112 - type: nauc_ndcg_at_1_max value: 43.343199999999996 - type: nauc_ndcg_at_1_std value: -15.6476 - type: nauc_ndcg_at_1_diff1 value: 74.5603 - type: nauc_ndcg_at_3_max value: 41.4951 - type: nauc_ndcg_at_3_std value: -18.006 - type: nauc_ndcg_at_3_diff1 value: 71.4871 - type: nauc_ndcg_at_5_max value: 41.665 - type: nauc_ndcg_at_5_std value: -18.2802 - type: nauc_ndcg_at_5_diff1 value: 71.31060000000001 - type: nauc_ndcg_at_10_max value: 41.9766 - type: nauc_ndcg_at_10_std value: -17.1129 - type: nauc_ndcg_at_10_diff1 value: 71.4114 - type: nauc_ndcg_at_20_max value: 42.3933 - type: nauc_ndcg_at_20_std value: -16.8854 - type: nauc_ndcg_at_20_diff1 value: 71.5046 - type: nauc_ndcg_at_100_max value: 42.7267 - type: nauc_ndcg_at_100_std value: -15.7841 - type: nauc_ndcg_at_100_diff1 value: 71.7294 - type: nauc_ndcg_at_1000_max value: 42.770799999999994 - type: nauc_ndcg_at_1000_std value: -15.8694 - type: nauc_ndcg_at_1000_diff1 value: 71.8391 - type: nauc_map_at_1_max value: 34.103899999999996 - type: nauc_map_at_1_std value: -17.6429 - type: nauc_map_at_1_diff1 value: 74.37780000000001 - type: nauc_map_at_3_max value: 39.3622 - type: nauc_map_at_3_std value: -19.3706 - type: nauc_map_at_3_diff1 value: 72.3035 - type: nauc_map_at_5_max value: 40.3833 - type: nauc_map_at_5_std value: -19.126099999999997 - type: nauc_map_at_5_diff1 value: 71.99950000000001 - type: nauc_map_at_10_max value: 40.8837 - type: nauc_map_at_10_std value: -18.34 - type: nauc_map_at_10_diff1 value: 71.92150000000001 - type: nauc_map_at_20_max value: 41.14 - type: nauc_map_at_20_std value: -18.01 - type: nauc_map_at_20_diff1 value: 71.85629999999999 - type: nauc_map_at_100_max value: 41.2511 - type: nauc_map_at_100_std value: -17.6727 - type: nauc_map_at_100_diff1 value: 71.8731 - type: nauc_map_at_1000_max value: 41.2569 - type: nauc_map_at_1000_std value: -17.6477 - type: nauc_map_at_1000_diff1 value: 71.8801 - type: nauc_recall_at_1_max value: 34.103899999999996 - type: nauc_recall_at_1_std value: -17.6429 - type: nauc_recall_at_1_diff1 value: 74.37780000000001 - type: nauc_recall_at_3_max value: 37.4459 - type: nauc_recall_at_3_std value: -21.2405 - type: nauc_recall_at_3_diff1 value: 68.2773 - type: nauc_recall_at_5_max value: 38.5924 - type: nauc_recall_at_5_std value: -21.644 - type: nauc_recall_at_5_diff1 value: 66.3095 - type: nauc_recall_at_10_max value: 39.3957 - type: nauc_recall_at_10_std value: -17.0364 - type: nauc_recall_at_10_diff1 value: 64.8501 - type: nauc_recall_at_20_max value: 40.325 - type: nauc_recall_at_20_std value: -15.4228 - type: nauc_recall_at_20_diff1 value: 63.5063 - type: nauc_recall_at_100_max value: 43.7134 - type: nauc_recall_at_100_std value: 3.7923 - type: nauc_recall_at_100_diff1 value: 63.7613 - type: nauc_recall_at_1000_max value: 53.65180000000001 - type: nauc_recall_at_1000_std value: 35.6561 - type: nauc_recall_at_1000_diff1 value: 65.9936 - type: nauc_precision_at_1_max value: 43.343199999999996 - type: nauc_precision_at_1_std value: -15.6476 - type: nauc_precision_at_1_diff1 value: 74.5603 - type: nauc_precision_at_3_max value: 21.8142 - type: nauc_precision_at_3_std value: -1.1627999999999998 - type: nauc_precision_at_3_diff1 value: 9.954 - type: nauc_precision_at_5_max value: 15.2041 - type: nauc_precision_at_5_std value: 4.2947 - type: nauc_precision_at_5_diff1 value: -5.305 - type: nauc_precision_at_10_max value: 8.163499999999999 - type: nauc_precision_at_10_std value: 10.9367 - type: nauc_precision_at_10_diff1 value: -18.0036 - type: nauc_precision_at_20_max value: 3.5585 - type: nauc_precision_at_20_std value: 14.5351 - type: nauc_precision_at_20_diff1 value: -25.249700000000004 - type: nauc_precision_at_100_max value: -3.0063 - type: nauc_precision_at_100_std value: 19.791700000000002 - type: nauc_precision_at_100_diff1 value: -32.281 - type: nauc_precision_at_1000_max value: -6.468100000000001 - type: nauc_precision_at_1000_std value: 20.025100000000002 - type: nauc_precision_at_1000_diff1 value: -34.4531 - type: nauc_mrr_at_1_max value: 43.2621 - type: nauc_mrr_at_1_std value: -15.864 - type: nauc_mrr_at_1_diff1 value: 74.5603 - type: nauc_mrr_at_3_max value: 43.8197 - type: nauc_mrr_at_3_std value: -16.1674 - type: nauc_mrr_at_3_diff1 value: 72.9802 - type: nauc_mrr_at_5_max value: 43.9843 - type: nauc_mrr_at_5_std value: -16.042 - type: nauc_mrr_at_5_diff1 value: 72.907 - type: nauc_mrr_at_10_max value: 44.0294 - type: nauc_mrr_at_10_std value: -15.711500000000001 - type: nauc_mrr_at_10_diff1 value: 72.9915 - type: nauc_mrr_at_20_max value: 44.044200000000004 - type: nauc_mrr_at_20_std value: -15.7842 - type: nauc_mrr_at_20_diff1 value: 73.0535 - type: nauc_mrr_at_100_max value: 44.0194 - type: nauc_mrr_at_100_std value: -15.7612 - type: nauc_mrr_at_100_diff1 value: 73.0738 - type: nauc_mrr_at_1000_max value: 44.0187 - type: nauc_mrr_at_1000_std value: -15.764100000000001 - type: nauc_mrr_at_1000_diff1 value: 73.0758 - type: main_score value: 79.946 - task: type: Clustering dataset: name: MTEB RedditClustering (default) type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 20.2171 - type: v_measure_std value: 4.4216 - type: main_score value: 20.2171 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P (default) type: mteb/reddit-clustering-p2p config: default split: test revision: 385e3cb46b4cfa89021f56c4380204149d0efe33 metrics: - type: v_measure value: 38.8882 - type: v_measure_std value: 9.315 - type: main_score value: 38.8882 - task: type: Retrieval dataset: name: MTEB SCIDOCS (default) type: mteb/scidocs config: default split: test revision: f8c2fcf00f625baaa80f62ec5bd9e1fff3b8ae88 metrics: - type: ndcg_at_1 value: 15.1 - type: ndcg_at_3 value: 12.036 - type: ndcg_at_5 value: 11.007 - type: ndcg_at_10 value: 13.352 - type: ndcg_at_20 value: 15.6 - type: ndcg_at_100 value: 19.871 - type: ndcg_at_1000 value: 25.255 - type: map_at_1 value: 3.058 - type: map_at_3 value: 5.268 - type: map_at_5 value: 6.406000000000001 - type: map_at_10 value: 7.478 - type: map_at_20 value: 8.21 - type: map_at_100 value: 8.946 - type: map_at_1000 value: 9.223 - type: recall_at_1 value: 3.058 - type: recall_at_3 value: 6.793 - type: recall_at_5 value: 10.003 - type: recall_at_10 value: 14.288 - type: recall_at_20 value: 19.542 - type: recall_at_100 value: 33.413 - type: recall_at_1000 value: 59.733000000000004 - type: precision_at_1 value: 15.1 - type: precision_at_3 value: 11.167 - type: precision_at_5 value: 9.879999999999999 - type: precision_at_10 value: 7.07 - type: precision_at_20 value: 4.825 - type: precision_at_100 value: 1.649 - type: precision_at_1000 value: 0.294 - type: mrr_at_1 value: 15.1 - type: mrr_at_3 value: 20.2833 - type: mrr_at_5 value: 22.4733 - type: mrr_at_10 value: 23.6601 - type: mrr_at_20 value: 24.3772 - type: mrr_at_100 value: 24.9007 - type: mrr_at_1000 value: 24.9743 - type: nauc_ndcg_at_1_max value: 18.8537 - type: nauc_ndcg_at_1_std value: -3.2037000000000004 - type: nauc_ndcg_at_1_diff1 value: 20.8288 - type: nauc_ndcg_at_3_max value: 15.3817 - type: nauc_ndcg_at_3_std value: -3.2159 - type: nauc_ndcg_at_3_diff1 value: 18.13 - type: nauc_ndcg_at_5_max value: 17.940900000000003 - type: nauc_ndcg_at_5_std value: 0.3294 - type: nauc_ndcg_at_5_diff1 value: 16.9378 - type: nauc_ndcg_at_10_max value: 21.146 - type: nauc_ndcg_at_10_std value: 2.6954 - type: nauc_ndcg_at_10_diff1 value: 15.363399999999999 - type: nauc_ndcg_at_20_max value: 21.9075 - type: nauc_ndcg_at_20_std value: 4.9554 - type: nauc_ndcg_at_20_diff1 value: 15.4857 - type: nauc_ndcg_at_100_max value: 22.9248 - type: nauc_ndcg_at_100_std value: 8.8094 - type: nauc_ndcg_at_100_diff1 value: 15.1255 - type: nauc_ndcg_at_1000_max value: 24.7883 - type: nauc_ndcg_at_1000_std value: 13.3551 - type: nauc_ndcg_at_1000_diff1 value: 15.1244 - type: nauc_map_at_1_max value: 19.238 - type: nauc_map_at_1_std value: -2.9537 - type: nauc_map_at_1_diff1 value: 21.3456 - type: nauc_map_at_3_max value: 16.0914 - type: nauc_map_at_3_std value: -4.2357 - type: nauc_map_at_3_diff1 value: 17.1314 - type: nauc_map_at_5_max value: 17.9317 - type: nauc_map_at_5_std value: -1.2885 - type: nauc_map_at_5_diff1 value: 15.5052 - type: nauc_map_at_10_max value: 20.1204 - type: nauc_map_at_10_std value: 0.29109999999999997 - type: nauc_map_at_10_diff1 value: 14.513200000000001 - type: nauc_map_at_20_max value: 20.6688 - type: nauc_map_at_20_std value: 1.6063 - type: nauc_map_at_20_diff1 value: 14.934800000000001 - type: nauc_map_at_100_max value: 21.2455 - type: nauc_map_at_100_std value: 3.1651 - type: nauc_map_at_100_diff1 value: 14.6507 - type: nauc_map_at_1000_max value: 21.4903 - type: nauc_map_at_1000_std value: 3.7647 - type: nauc_map_at_1000_diff1 value: 14.6354 - type: nauc_recall_at_1_max value: 19.238 - type: nauc_recall_at_1_std value: -2.9537 - type: nauc_recall_at_1_diff1 value: 21.3456 - type: nauc_recall_at_3_max value: 14.5564 - type: nauc_recall_at_3_std value: -3.2211 - type: nauc_recall_at_3_diff1 value: 17.0505 - type: nauc_recall_at_5_max value: 18.159200000000002 - type: nauc_recall_at_5_std value: 2.6766 - type: nauc_recall_at_5_diff1 value: 14.7598 - type: nauc_recall_at_10_max value: 23.6071 - type: nauc_recall_at_10_std value: 6.6582 - type: nauc_recall_at_10_diff1 value: 11.7647 - type: nauc_recall_at_20_max value: 23.5471 - type: nauc_recall_at_20_std value: 10.6906 - type: nauc_recall_at_20_diff1 value: 11.5654 - type: nauc_recall_at_100_max value: 23.2746 - type: nauc_recall_at_100_std value: 18.3139 - type: nauc_recall_at_100_diff1 value: 10.2364 - type: nauc_recall_at_1000_max value: 27.2333 - type: nauc_recall_at_1000_std value: 32.5351 - type: nauc_recall_at_1000_diff1 value: 8.7211 - type: nauc_precision_at_1_max value: 18.8537 - type: nauc_precision_at_1_std value: -3.2037000000000004 - type: nauc_precision_at_1_diff1 value: 20.8288 - type: nauc_precision_at_3_max value: 14.260200000000001 - type: nauc_precision_at_3_std value: -3.1767 - type: nauc_precision_at_3_diff1 value: 16.9826 - type: nauc_precision_at_5_max value: 17.999399999999998 - type: nauc_precision_at_5_std value: 2.7119999999999997 - type: nauc_precision_at_5_diff1 value: 14.685300000000002 - type: nauc_precision_at_10_max value: 23.5629 - type: nauc_precision_at_10_std value: 6.7014000000000005 - type: nauc_precision_at_10_diff1 value: 11.6848 - type: nauc_precision_at_20_max value: 23.1819 - type: nauc_precision_at_20_std value: 10.478 - type: nauc_precision_at_20_diff1 value: 11.6263 - type: nauc_precision_at_100_max value: 22.7954 - type: nauc_precision_at_100_std value: 18.215500000000002 - type: nauc_precision_at_100_diff1 value: 10.526299999999999 - type: nauc_precision_at_1000_max value: 26.4283 - type: nauc_precision_at_1000_std value: 31.9492 - type: nauc_precision_at_1000_diff1 value: 9.031799999999999 - type: nauc_mrr_at_1_max value: 18.8537 - type: nauc_mrr_at_1_std value: -3.2037000000000004 - type: nauc_mrr_at_1_diff1 value: 20.8288 - type: nauc_mrr_at_3_max value: 16.253500000000003 - type: nauc_mrr_at_3_std value: -2.3413 - type: nauc_mrr_at_3_diff1 value: 20.333399999999997 - type: nauc_mrr_at_5_max value: 17.2285 - type: nauc_mrr_at_5_std value: -0.5249 - type: nauc_mrr_at_5_diff1 value: 20.119 - type: nauc_mrr_at_10_max value: 18.351100000000002 - type: nauc_mrr_at_10_std value: 0.0489 - type: nauc_mrr_at_10_diff1 value: 19.711000000000002 - type: nauc_mrr_at_20_max value: 18.409100000000002 - type: nauc_mrr_at_20_std value: 0.41079999999999994 - type: nauc_mrr_at_20_diff1 value: 19.5248 - type: nauc_mrr_at_100_max value: 18.404799999999998 - type: nauc_mrr_at_100_std value: 0.4336 - type: nauc_mrr_at_100_diff1 value: 19.5129 - type: nauc_mrr_at_1000_max value: 18.3706 - type: nauc_mrr_at_1000_std value: 0.41529999999999995 - type: nauc_mrr_at_1000_diff1 value: 19.5103 - type: main_score value: 13.352 - task: type: STS dataset: name: MTEB SICK-R (default) type: mteb/sickr-sts config: default split: test revision: 20a6d6f312dd54037fe07a32d58e5e168867909d metrics: - type: pearson value: 73.39529999999999 - type: spearman value: 63.871599999999994 - type: cosine_pearson value: 73.39529999999999 - type: cosine_spearman value: 63.871500000000005 - type: manhattan_pearson value: 62.5861 - type: manhattan_spearman value: 56.714600000000004 - type: euclidean_pearson value: 62.606899999999996 - type: euclidean_spearman value: 56.714200000000005 - type: main_score value: 63.871500000000005 - task: type: STS dataset: name: MTEB STS12 (default) type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: pearson value: 72.35770000000001 - type: spearman value: 63.606899999999996 - type: cosine_pearson value: 72.35770000000001 - type: cosine_spearman value: 63.610299999999995 - type: manhattan_pearson value: 59.8404 - type: manhattan_spearman value: 56.85059999999999 - type: euclidean_pearson value: 59.8116 - type: euclidean_spearman value: 56.691 - type: main_score value: 63.610299999999995 - task: type: STS dataset: name: MTEB STS13 (default) type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: pearson value: 76.4727 - type: spearman value: 76.983 - type: cosine_pearson value: 76.4727 - type: cosine_spearman value: 76.983 - type: manhattan_pearson value: 49.4803 - type: manhattan_spearman value: 51.1301 - type: euclidean_pearson value: 49.4542 - type: euclidean_spearman value: 51.19669999999999 - type: main_score value: 76.983 - task: type: STS dataset: name: MTEB STS14 (default) type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: pearson value: 75.777 - type: spearman value: 71.2099 - type: cosine_pearson value: 75.777 - type: cosine_spearman value: 71.2099 - type: manhattan_pearson value: 52.475899999999996 - type: manhattan_spearman value: 53.8072 - type: euclidean_pearson value: 52.416799999999995 - type: euclidean_spearman value: 53.725500000000004 - type: main_score value: 71.2099 - task: type: STS dataset: name: MTEB STS15 (default) type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: pearson value: 80.1072 - type: spearman value: 80.735 - type: cosine_pearson value: 80.1072 - type: cosine_spearman value: 80.7349 - type: manhattan_pearson value: 50.711600000000004 - type: manhattan_spearman value: 53.491299999999995 - type: euclidean_pearson value: 50.6255 - type: euclidean_spearman value: 53.47539999999999 - type: main_score value: 80.7349 - task: type: STS dataset: name: MTEB STS16 (default) type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: pearson value: 73.1658 - type: spearman value: 74.2121 - type: cosine_pearson value: 73.1658 - type: cosine_spearman value: 74.2121 - type: manhattan_pearson value: 43.4074 - type: manhattan_spearman value: 47.193200000000004 - type: euclidean_pearson value: 43.438300000000005 - type: euclidean_spearman value: 47.2757 - type: main_score value: 74.2121 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: faeb762787bd10488a50c8b5be4a3b82e411949c metrics: - type: pearson value: 81.8156 - type: spearman value: 81.9457 - type: cosine_pearson value: 81.8156 - type: cosine_spearman value: 81.9457 - type: manhattan_pearson value: 59.4332 - type: manhattan_spearman value: 60.5687 - type: euclidean_pearson value: 59.2942 - type: euclidean_spearman value: 60.39679999999999 - type: main_score value: 81.9457 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: de9d86b3b84231dc21f76c7b7af1f28e2f57f6e3 metrics: - type: pearson value: 48.9285 - type: spearman value: 55.862500000000004 - type: cosine_pearson value: 48.9285 - type: cosine_spearman value: 55.862500000000004 - type: manhattan_pearson value: 43.082300000000004 - type: manhattan_spearman value: 51.1876 - type: euclidean_pearson value: 43.2313 - type: euclidean_spearman value: 51.094899999999996 - type: main_score value: 55.862500000000004 - task: type: STS dataset: name: MTEB STSBenchmark (default) type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: pearson value: 73.44380000000001 - type: spearman value: 71.9343 - type: cosine_pearson value: 73.44380000000001 - type: cosine_spearman value: 71.9345 - type: manhattan_pearson value: 52.233799999999995 - type: manhattan_spearman value: 51.7687 - type: euclidean_pearson value: 52.2753 - type: euclidean_spearman value: 51.845 - type: main_score value: 71.9345 - task: type: Reranking dataset: name: MTEB SciDocsRR (default) type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 71.4557 - type: mrr value: 90.6219 - type: nAUC_map_max value: 54.74830000000001 - type: nAUC_map_std value: 65.2558 - type: nAUC_map_diff1 value: 10.2936 - type: nAUC_mrr_max value: 75.10900000000001 - type: nAUC_mrr_std value: 69.6523 - type: nAUC_mrr_diff1 value: 49.4991 - type: main_score value: 71.4557 - task: type: Retrieval dataset: name: MTEB SciFact (default) type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: ndcg_at_1 value: 43.667 - type: ndcg_at_3 value: 52.102000000000004 - type: ndcg_at_5 value: 54.751000000000005 - type: ndcg_at_10 value: 57.422 - type: ndcg_at_20 value: 59.425 - type: ndcg_at_100 value: 61.166 - type: ndcg_at_1000 value: 62.244 - type: map_at_1 value: 41.888999999999996 - type: map_at_3 value: 49.435 - type: map_at_5 value: 51.029 - type: map_at_10 value: 52.190000000000005 - type: map_at_20 value: 52.797000000000004 - type: map_at_100 value: 53.03 - type: map_at_1000 value: 53.069 - type: recall_at_1 value: 41.888999999999996 - type: recall_at_3 value: 57.916999999999994 - type: recall_at_5 value: 64.372 - type: recall_at_10 value: 72.311 - type: recall_at_20 value: 79.97800000000001 - type: recall_at_100 value: 89.333 - type: recall_at_1000 value: 97.867 - type: precision_at_1 value: 43.667 - type: precision_at_3 value: 20.778 - type: precision_at_5 value: 14.066999999999998 - type: precision_at_10 value: 8.033 - type: precision_at_20 value: 4.45 - type: precision_at_100 value: 1.0030000000000001 - type: precision_at_1000 value: 0.11 - type: mrr_at_1 value: 43.666700000000006 - type: mrr_at_3 value: 50.9444 - type: mrr_at_5 value: 52.3444 - type: mrr_at_10 value: 53.3852 - type: mrr_at_20 value: 53.8864 - type: mrr_at_100 value: 54.0887 - type: mrr_at_1000 value: 54.11749999999999 - type: nauc_ndcg_at_1_max value: 36.6444 - type: nauc_ndcg_at_1_std value: -7.4722 - type: nauc_ndcg_at_1_diff1 value: 63.631099999999996 - type: nauc_ndcg_at_3_max value: 37.2859 - type: nauc_ndcg_at_3_std value: -11.2775 - type: nauc_ndcg_at_3_diff1 value: 56.352999999999994 - type: nauc_ndcg_at_5_max value: 36.7832 - type: nauc_ndcg_at_5_std value: -12.310699999999999 - type: nauc_ndcg_at_5_diff1 value: 55.41740000000001 - type: nauc_ndcg_at_10_max value: 37.9586 - type: nauc_ndcg_at_10_std value: -9.7483 - type: nauc_ndcg_at_10_diff1 value: 56.8082 - type: nauc_ndcg_at_20_max value: 38.4072 - type: nauc_ndcg_at_20_std value: -7.473299999999999 - type: nauc_ndcg_at_20_diff1 value: 56.4974 - type: nauc_ndcg_at_100_max value: 38.5583 - type: nauc_ndcg_at_100_std value: -5.521100000000001 - type: nauc_ndcg_at_100_diff1 value: 56.8808 - type: nauc_ndcg_at_1000_max value: 38.580999999999996 - type: nauc_ndcg_at_1000_std value: -6.6578 - type: nauc_ndcg_at_1000_diff1 value: 57.3412 - type: nauc_map_at_1_max value: 35.4069 - type: nauc_map_at_1_std value: -11.9598 - type: nauc_map_at_1_diff1 value: 62.351299999999995 - type: nauc_map_at_3_max value: 36.3612 - type: nauc_map_at_3_std value: -12.6999 - type: nauc_map_at_3_diff1 value: 57.918099999999995 - type: nauc_map_at_5_max value: 36.268299999999996 - type: nauc_map_at_5_std value: -12.921199999999999 - type: nauc_map_at_5_diff1 value: 57.496 - type: nauc_map_at_10_max value: 36.918099999999995 - type: nauc_map_at_10_std value: -11.6299 - type: nauc_map_at_10_diff1 value: 58.1148 - type: nauc_map_at_20_max value: 37.060900000000004 - type: nauc_map_at_20_std value: -10.8228 - type: nauc_map_at_20_diff1 value: 58.0205 - type: nauc_map_at_100_max value: 37.085499999999996 - type: nauc_map_at_100_std value: -10.5358 - type: nauc_map_at_100_diff1 value: 58.095 - type: nauc_map_at_1000_max value: 37.1083 - type: nauc_map_at_1000_std value: -10.5578 - type: nauc_map_at_1000_diff1 value: 58.1224 - type: nauc_recall_at_1_max value: 35.4069 - type: nauc_recall_at_1_std value: -11.9598 - type: nauc_recall_at_1_diff1 value: 62.351299999999995 - type: nauc_recall_at_3_max value: 37.6511 - type: nauc_recall_at_3_std value: -13.3993 - type: nauc_recall_at_3_diff1 value: 50.4572 - type: nauc_recall_at_5_max value: 35.8548 - type: nauc_recall_at_5_std value: -16.1098 - type: nauc_recall_at_5_diff1 value: 47.2106 - type: nauc_recall_at_10_max value: 38.9793 - type: nauc_recall_at_10_std value: -8.1869 - type: nauc_recall_at_10_diff1 value: 50.5379 - type: nauc_recall_at_20_max value: 42.3127 - type: nauc_recall_at_20_std value: 4.1918999999999995 - type: nauc_recall_at_20_diff1 value: 47.5366 - type: nauc_recall_at_100_max value: 48.4392 - type: nauc_recall_at_100_std value: 37.5486 - type: nauc_recall_at_100_diff1 value: 46.853699999999996 - type: nauc_recall_at_1000_max value: 70.1389 - type: nauc_recall_at_1000_std value: 81.7519 - type: nauc_recall_at_1000_diff1 value: 46.0741 - type: nauc_precision_at_1_max value: 36.6444 - type: nauc_precision_at_1_std value: -7.4722 - type: nauc_precision_at_1_diff1 value: 63.631099999999996 - type: nauc_precision_at_3_max value: 37.9141 - type: nauc_precision_at_3_std value: -2.6281 - type: nauc_precision_at_3_diff1 value: 45.406600000000005 - type: nauc_precision_at_5_max value: 35.0402 - type: nauc_precision_at_5_std value: 0.7128 - type: nauc_precision_at_5_diff1 value: 36.686099999999996 - type: nauc_precision_at_10_max value: 37.4825 - type: nauc_precision_at_10_std value: 15.613199999999999 - type: nauc_precision_at_10_diff1 value: 33.1716 - type: nauc_precision_at_20_max value: 36.1575 - type: nauc_precision_at_20_std value: 30.4446 - type: nauc_precision_at_20_diff1 value: 23.3224 - type: nauc_precision_at_100_max value: 29.5019 - type: nauc_precision_at_100_std value: 52.942 - type: nauc_precision_at_100_diff1 value: 9.0284 - type: nauc_precision_at_1000_max value: 20.350099999999998 - type: nauc_precision_at_1000_std value: 52.2915 - type: nauc_precision_at_1000_diff1 value: -8.6009 - type: nauc_mrr_at_1_max value: 36.6444 - type: nauc_mrr_at_1_std value: -7.4722 - type: nauc_mrr_at_1_diff1 value: 63.631099999999996 - type: nauc_mrr_at_3_max value: 38.016299999999994 - type: nauc_mrr_at_3_std value: -8.0229 - type: nauc_mrr_at_3_diff1 value: 58.757400000000004 - type: nauc_mrr_at_5_max value: 37.433899999999994 - type: nauc_mrr_at_5_std value: -8.1996 - type: nauc_mrr_at_5_diff1 value: 58.235899999999994 - type: nauc_mrr_at_10_max value: 37.7997 - type: nauc_mrr_at_10_std value: -7.542699999999999 - type: nauc_mrr_at_10_diff1 value: 58.8486 - type: nauc_mrr_at_20_max value: 37.8879 - type: nauc_mrr_at_20_std value: -7.133000000000001 - type: nauc_mrr_at_20_diff1 value: 58.834900000000005 - type: nauc_mrr_at_100_max value: 37.8627 - type: nauc_mrr_at_100_std value: -6.9667 - type: nauc_mrr_at_100_diff1 value: 58.880900000000004 - type: nauc_mrr_at_1000_max value: 37.8675 - type: nauc_mrr_at_1000_std value: -6.9817 - type: nauc_mrr_at_1000_diff1 value: 58.904500000000006 - type: main_score value: 57.422 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions (default) type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: similarity_accuracy value: 99.6703 - type: similarity_accuracy_threshold value: 81.69669999999999 - type: similarity_f1 value: 82.5479 - type: similarity_f1_threshold value: 80.97919999999999 - type: similarity_precision value: 85.6069 - type: similarity_recall value: 79.7 - type: similarity_ap value: 87.6918 - type: cosine_accuracy value: 99.6703 - type: cosine_accuracy_threshold value: 81.69669999999999 - type: cosine_f1 value: 82.5479 - type: cosine_f1_threshold value: 80.97919999999999 - type: cosine_precision value: 85.6069 - type: cosine_recall value: 79.7 - type: cosine_ap value: 87.6918 - type: manhattan_accuracy value: 99.4327 - type: manhattan_accuracy_threshold value: 2292.4838999999997 - type: manhattan_f1 value: 66.0851 - type: manhattan_f1_threshold value: 2517.333 - type: manhattan_precision value: 72.6619 - type: manhattan_recall value: 60.6 - type: manhattan_ap value: 68.1683 - type: euclidean_accuracy value: 99.4327 - type: euclidean_accuracy_threshold value: 105.6427 - type: euclidean_f1 value: 66.1605 - type: euclidean_f1_threshold value: 114.9346 - type: euclidean_precision value: 72.2749 - type: euclidean_recall value: 61.0 - type: euclidean_ap value: 68.2419 - type: dot_accuracy value: 99.0168 - type: dot_accuracy_threshold value: 1011.5417000000001 - type: dot_f1 value: 18.6459 - type: dot_f1_threshold value: 554.0581999999999 - type: dot_precision value: 20.9476 - type: dot_recall value: 16.8 - type: dot_ap value: 11.5838 - type: max_accuracy value: 99.6703 - type: max_f1 value: 82.5479 - type: max_precision value: 85.6069 - type: max_recall value: 79.7 - type: max_ap value: 87.6918 - type: main_score value: 87.6918 - task: type: Clustering dataset: name: MTEB StackExchangeClustering (default) type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 27.147700000000004 - type: v_measure_std value: 4.3151 - type: main_score value: 27.147700000000004 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P (default) type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 28.9253 - type: v_measure_std value: 1.6500000000000001 - type: main_score value: 28.9253 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions (default) type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 42.7933 - type: mrr value: 43.2531 - type: nAUC_map_max value: 15.137400000000001 - type: nAUC_map_std value: 4.6048 - type: nAUC_map_diff1 value: 31.665100000000002 - type: nAUC_mrr_max value: 16.429299999999998 - type: nAUC_mrr_std value: 4.943899999999999 - type: nAUC_mrr_diff1 value: 30.8849 - type: main_score value: 42.7933 - task: type: Summarization dataset: name: MTEB SummEval (default) type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: pearson value: 31.8891 - type: spearman value: 30.635299999999997 - type: cosine_spearman value: 30.635299999999997 - type: cosine_pearson value: 31.8891 - type: dot_spearman value: 23.1495 - type: dot_pearson value: 20.2811 - type: main_score value: 30.635299999999997 - task: type: Retrieval dataset: name: MTEB TRECCOVID (default) type: mteb/trec-covid config: default split: test revision: bb9466bac8153a0349341eb1b22e06409e78ef4e metrics: - type: ndcg_at_1 value: 60.0 - type: ndcg_at_3 value: 56.592 - type: ndcg_at_5 value: 52.15 - type: ndcg_at_10 value: 48.264 - type: ndcg_at_20 value: 43.568 - type: ndcg_at_100 value: 31.196 - type: ndcg_at_1000 value: 26.101000000000003 - type: map_at_1 value: 0.153 - type: map_at_3 value: 0.4 - type: map_at_5 value: 0.601 - type: map_at_10 value: 1.016 - type: map_at_20 value: 1.6099999999999999 - type: map_at_100 value: 4.169 - type: map_at_1000 value: 9.733 - type: recall_at_1 value: 0.153 - type: recall_at_3 value: 0.42300000000000004 - type: recall_at_5 value: 0.6629999999999999 - type: recall_at_10 value: 1.201 - type: recall_at_20 value: 2.022 - type: recall_at_100 value: 6.5409999999999995 - type: recall_at_1000 value: 24.422 - type: precision_at_1 value: 64.0 - type: precision_at_3 value: 58.667 - type: precision_at_5 value: 54.0 - type: precision_at_10 value: 49.8 - type: precision_at_20 value: 44.3 - type: precision_at_100 value: 31.180000000000003 - type: precision_at_1000 value: 12.21 - type: mrr_at_1 value: 64.0 - type: mrr_at_3 value: 68.6667 - type: mrr_at_5 value: 69.9667 - type: mrr_at_10 value: 71.2222 - type: mrr_at_20 value: 71.3651 - type: mrr_at_100 value: 71.4965 - type: mrr_at_1000 value: 71.51429999999999 - type: nauc_ndcg_at_1_max value: 37.0018 - type: nauc_ndcg_at_1_std value: 3.0042 - type: nauc_ndcg_at_1_diff1 value: 1.0129000000000001 - type: nauc_ndcg_at_3_max value: 42.3179 - type: nauc_ndcg_at_3_std value: 1.1211 - type: nauc_ndcg_at_3_diff1 value: -1.3197999999999999 - type: nauc_ndcg_at_5_max value: 38.2867 - type: nauc_ndcg_at_5_std value: 1.436 - type: nauc_ndcg_at_5_diff1 value: -0.635 - type: nauc_ndcg_at_10_max value: 36.545100000000005 - type: nauc_ndcg_at_10_std value: 9.4313 - type: nauc_ndcg_at_10_diff1 value: 0.7185 - type: nauc_ndcg_at_20_max value: 28.841499999999996 - type: nauc_ndcg_at_20_std value: 14.584 - type: nauc_ndcg_at_20_diff1 value: 0.2278 - type: nauc_ndcg_at_100_max value: 22.2284 - type: nauc_ndcg_at_100_std value: 30.9548 - type: nauc_ndcg_at_100_diff1 value: 1.7124000000000001 - type: nauc_ndcg_at_1000_max value: 7.9275 - type: nauc_ndcg_at_1000_std value: 43.918 - type: nauc_ndcg_at_1000_diff1 value: 1.1608 - type: nauc_map_at_1_max value: 16.718700000000002 - type: nauc_map_at_1_std value: -14.5026 - type: nauc_map_at_1_diff1 value: 6.9494 - type: nauc_map_at_3_max value: 26.3749 - type: nauc_map_at_3_std value: -14.2379 - type: nauc_map_at_3_diff1 value: 2.6883 - type: nauc_map_at_5_max value: 26.8639 - type: nauc_map_at_5_std value: -11.9289 - type: nauc_map_at_5_diff1 value: -0.5275 - type: nauc_map_at_10_max value: 28.7924 - type: nauc_map_at_10_std value: -6.2317 - type: nauc_map_at_10_diff1 value: 0.153 - type: nauc_map_at_20_max value: 24.3923 - type: nauc_map_at_20_std value: 1.5524 - type: nauc_map_at_20_diff1 value: -0.7799999999999999 - type: nauc_map_at_100_max value: 14.5538 - type: nauc_map_at_100_std value: 29.851499999999998 - type: nauc_map_at_100_diff1 value: -1.5013 - type: nauc_map_at_1000_max value: 6.609800000000001 - type: nauc_map_at_1000_std value: 50.8853 - type: nauc_map_at_1000_diff1 value: 2.2463 - type: nauc_recall_at_1_max value: 16.718700000000002 - type: nauc_recall_at_1_std value: -14.5026 - type: nauc_recall_at_1_diff1 value: 6.9494 - type: nauc_recall_at_3_max value: 26.313 - type: nauc_recall_at_3_std value: -16.5391 - type: nauc_recall_at_3_diff1 value: -0.0947 - type: nauc_recall_at_5_max value: 27.136 - type: nauc_recall_at_5_std value: -13.486999999999998 - type: nauc_recall_at_5_diff1 value: -2.2484 - type: nauc_recall_at_10_max value: 27.9019 - type: nauc_recall_at_10_std value: -7.2991 - type: nauc_recall_at_10_diff1 value: 0.35729999999999995 - type: nauc_recall_at_20_max value: 24.1923 - type: nauc_recall_at_20_std value: 0.3075 - type: nauc_recall_at_20_diff1 value: -2.6993 - type: nauc_recall_at_100_max value: 15.928400000000002 - type: nauc_recall_at_100_std value: 24.5423 - type: nauc_recall_at_100_diff1 value: -4.0408 - type: nauc_recall_at_1000_max value: -0.2523 - type: nauc_recall_at_1000_std value: 49.0728 - type: nauc_recall_at_1000_diff1 value: -0.1562 - type: nauc_precision_at_1_max value: 42.5437 - type: nauc_precision_at_1_std value: 0.859 - type: nauc_precision_at_1_diff1 value: -7.6319 - type: nauc_precision_at_3_max value: 46.4231 - type: nauc_precision_at_3_std value: -2.6254 - type: nauc_precision_at_3_diff1 value: -5.129700000000001 - type: nauc_precision_at_5_max value: 40.022600000000004 - type: nauc_precision_at_5_std value: 1.4931 - type: nauc_precision_at_5_diff1 value: -5.634399999999999 - type: nauc_precision_at_10_max value: 37.8846 - type: nauc_precision_at_10_std value: 11.4085 - type: nauc_precision_at_10_diff1 value: -2.3909 - type: nauc_precision_at_20_max value: 26.971400000000003 - type: nauc_precision_at_20_std value: 17.3784 - type: nauc_precision_at_20_diff1 value: -1.5310000000000001 - type: nauc_precision_at_100_max value: 19.9237 - type: nauc_precision_at_100_std value: 35.952400000000004 - type: nauc_precision_at_100_diff1 value: 1.4594 - type: nauc_precision_at_1000_max value: 6.1676 - type: nauc_precision_at_1000_std value: 50.53959999999999 - type: nauc_precision_at_1000_diff1 value: 3.8484 - type: nauc_mrr_at_1_max value: 42.5437 - type: nauc_mrr_at_1_std value: 0.859 - type: nauc_mrr_at_1_diff1 value: -7.6319 - type: nauc_mrr_at_3_max value: 44.3255 - type: nauc_mrr_at_3_std value: -4.5994 - type: nauc_mrr_at_3_diff1 value: -12.2252 - type: nauc_mrr_at_5_max value: 45.7817 - type: nauc_mrr_at_5_std value: -3.1611000000000002 - type: nauc_mrr_at_5_diff1 value: -10.706100000000001 - type: nauc_mrr_at_10_max value: 45.5444 - type: nauc_mrr_at_10_std value: -1.1735 - type: nauc_mrr_at_10_diff1 value: -9.6912 - type: nauc_mrr_at_20_max value: 45.3001 - type: nauc_mrr_at_20_std value: -0.8477999999999999 - type: nauc_mrr_at_20_diff1 value: -8.7214 - type: nauc_mrr_at_100_max value: 45.3697 - type: nauc_mrr_at_100_std value: -1.2326 - type: nauc_mrr_at_100_diff1 value: -9.1853 - type: nauc_mrr_at_1000_max value: 45.356 - type: nauc_mrr_at_1000_std value: -1.2729000000000001 - type: nauc_mrr_at_1000_diff1 value: -9.2226 - type: main_score value: 48.264 - task: type: Retrieval dataset: name: MTEB Touche2020 (default) type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: ndcg_at_1 value: 13.264999999999999 - type: ndcg_at_3 value: 16.817 - type: ndcg_at_5 value: 17.718999999999998 - type: ndcg_at_10 value: 17.318 - type: ndcg_at_20 value: 18.445 - type: ndcg_at_100 value: 28.137 - type: ndcg_at_1000 value: 41.744 - type: map_at_1 value: 1.335 - type: map_at_3 value: 2.94 - type: map_at_5 value: 4.37 - type: map_at_10 value: 6.447 - type: map_at_20 value: 8.141 - type: map_at_100 value: 10.428999999999998 - type: map_at_1000 value: 12.23 - type: recall_at_1 value: 1.335 - type: recall_at_3 value: 4.05 - type: recall_at_5 value: 7.507999999999999 - type: recall_at_10 value: 12.862000000000002 - type: recall_at_20 value: 18.953999999999997 - type: recall_at_100 value: 40.384 - type: recall_at_1000 value: 82.421 - type: precision_at_1 value: 16.326999999999998 - type: precision_at_3 value: 21.088 - type: precision_at_5 value: 21.224 - type: precision_at_10 value: 17.755000000000003 - type: precision_at_20 value: 13.264999999999999 - type: precision_at_100 value: 6.5920000000000005 - type: precision_at_1000 value: 1.516 - type: mrr_at_1 value: 16.3265 - type: mrr_at_3 value: 29.251700000000003 - type: mrr_at_5 value: 32.9252 - type: mrr_at_10 value: 34.613699999999994 - type: mrr_at_20 value: 35.3587 - type: mrr_at_100 value: 35.6307 - type: mrr_at_1000 value: 35.6307 - type: nauc_ndcg_at_1_max value: -32.3322 - type: nauc_ndcg_at_1_std value: -13.9866 - type: nauc_ndcg_at_1_diff1 value: -21.525 - type: nauc_ndcg_at_3_max value: -33.6213 - type: nauc_ndcg_at_3_std value: -9.2265 - type: nauc_ndcg_at_3_diff1 value: -7.9922 - type: nauc_ndcg_at_5_max value: -38.3363 - type: nauc_ndcg_at_5_std value: -19.017999999999997 - type: nauc_ndcg_at_5_diff1 value: 0.7867000000000001 - type: nauc_ndcg_at_10_max value: -45.460699999999996 - type: nauc_ndcg_at_10_std value: -36.0452 - type: nauc_ndcg_at_10_diff1 value: 11.525599999999999 - type: nauc_ndcg_at_20_max value: -43.7997 - type: nauc_ndcg_at_20_std value: -39.293499999999995 - type: nauc_ndcg_at_20_diff1 value: 18.019099999999998 - type: nauc_ndcg_at_100_max value: -47.180499999999995 - type: nauc_ndcg_at_100_std value: -31.8569 - type: nauc_ndcg_at_100_diff1 value: 14.1121 - type: nauc_ndcg_at_1000_max value: -40.8476 - type: nauc_ndcg_at_1000_std value: -21.2172 - type: nauc_ndcg_at_1000_diff1 value: 20.3064 - type: nauc_map_at_1_max value: -39.5068 - type: nauc_map_at_1_std value: -16.150000000000002 - type: nauc_map_at_1_diff1 value: -31.249900000000004 - type: nauc_map_at_3_max value: -41.2738 - type: nauc_map_at_3_std value: -23.5467 - type: nauc_map_at_3_diff1 value: -21.5959 - type: nauc_map_at_5_max value: -45.9079 - type: nauc_map_at_5_std value: -28.181099999999997 - type: nauc_map_at_5_diff1 value: -14.3231 - type: nauc_map_at_10_max value: -45.8169 - type: nauc_map_at_10_std value: -41.293400000000005 - type: nauc_map_at_10_diff1 value: -0.7166 - type: nauc_map_at_20_max value: -42.233900000000006 - type: nauc_map_at_20_std value: -42.2579 - type: nauc_map_at_20_diff1 value: 9.9162 - type: nauc_map_at_100_max value: -42.6044 - type: nauc_map_at_100_std value: -39.921 - type: nauc_map_at_100_diff1 value: 10.408900000000001 - type: nauc_map_at_1000_max value: -41.4171 - type: nauc_map_at_1000_std value: -37.167899999999996 - type: nauc_map_at_1000_diff1 value: 11.7185 - type: nauc_recall_at_1_max value: -39.5068 - type: nauc_recall_at_1_std value: -16.150000000000002 - type: nauc_recall_at_1_diff1 value: -31.249900000000004 - type: nauc_recall_at_3_max value: -38.8655 - type: nauc_recall_at_3_std value: -21.6066 - type: nauc_recall_at_3_diff1 value: -11.395900000000001 - type: nauc_recall_at_5_max value: -47.9991 - type: nauc_recall_at_5_std value: -32.9137 - type: nauc_recall_at_5_diff1 value: -1.0116 - type: nauc_recall_at_10_max value: -49.586999999999996 - type: nauc_recall_at_10_std value: -48.6293 - type: nauc_recall_at_10_diff1 value: 13.092699999999999 - type: nauc_recall_at_20_max value: -45.1018 - type: nauc_recall_at_20_std value: -46.1638 - type: nauc_recall_at_20_diff1 value: 20.9848 - type: nauc_recall_at_100_max value: -48.106700000000004 - type: nauc_recall_at_100_std value: -30.618699999999997 - type: nauc_recall_at_100_diff1 value: 8.3225 - type: nauc_recall_at_1000_max value: -35.183 - type: nauc_recall_at_1000_std value: 9.1089 - type: nauc_recall_at_1000_diff1 value: 14.8164 - type: nauc_precision_at_1_max value: -36.7404 - type: nauc_precision_at_1_std value: -20.7164 - type: nauc_precision_at_1_diff1 value: -24.9514 - type: nauc_precision_at_3_max value: -32.1394 - type: nauc_precision_at_3_std value: -14.9321 - type: nauc_precision_at_3_diff1 value: -5.2914 - type: nauc_precision_at_5_max value: -39.6017 - type: nauc_precision_at_5_std value: -27.8755 - type: nauc_precision_at_5_diff1 value: 6.2789 - type: nauc_precision_at_10_max value: -42.565799999999996 - type: nauc_precision_at_10_std value: -45.101200000000006 - type: nauc_precision_at_10_diff1 value: 18.4024 - type: nauc_precision_at_20_max value: -36.074 - type: nauc_precision_at_20_std value: -41.6858 - type: nauc_precision_at_20_diff1 value: 29.625899999999998 - type: nauc_precision_at_100_max value: -20.7563 - type: nauc_precision_at_100_std value: -6.5164 - type: nauc_precision_at_100_diff1 value: 13.5108 - type: nauc_precision_at_1000_max value: 41.492200000000004 - type: nauc_precision_at_1000_std value: 45.918 - type: nauc_precision_at_1000_diff1 value: 9.314400000000001 - type: nauc_mrr_at_1_max value: -36.7404 - type: nauc_mrr_at_1_std value: -20.7164 - type: nauc_mrr_at_1_diff1 value: -24.9514 - type: nauc_mrr_at_3_max value: -34.8748 - type: nauc_mrr_at_3_std value: -11.2167 - type: nauc_mrr_at_3_diff1 value: -14.4811 - type: nauc_mrr_at_5_max value: -39.5232 - type: nauc_mrr_at_5_std value: -18.9591 - type: nauc_mrr_at_5_diff1 value: -13.2719 - type: nauc_mrr_at_10_max value: -41.7821 - type: nauc_mrr_at_10_std value: -18.368399999999998 - type: nauc_mrr_at_10_diff1 value: -13.4359 - type: nauc_mrr_at_20_max value: -42.8581 - type: nauc_mrr_at_20_std value: -18.6052 - type: nauc_mrr_at_20_diff1 value: -13.6098 - type: nauc_mrr_at_100_max value: -42.0696 - type: nauc_mrr_at_100_std value: -18.1447 - type: nauc_mrr_at_100_diff1 value: -14.102500000000001 - type: nauc_mrr_at_1000_max value: -42.0696 - type: nauc_mrr_at_1000_std value: -18.1447 - type: nauc_mrr_at_1000_diff1 value: -14.102500000000001 - type: main_score value: 17.318 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification (default) type: mteb/toxic_conversations_50k config: default split: test revision: edfaf9da55d3dd50d43143d90c1ac476895ae6de metrics: - type: accuracy value: 74.0283 - type: f1 value: 54.813100000000006 - type: f1_weighted value: 79.4125 - type: ap value: 12.750800000000002 - type: ap_weighted value: 12.750800000000002 - type: main_score value: 74.0283 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification (default) type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 52.818299999999994 - type: f1 value: 52.8999 - type: f1_weighted value: 52.223299999999995 - type: main_score value: 52.818299999999994 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering (default) type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 14.5905 - type: v_measure_std value: 1.0532 - type: main_score value: 14.5905 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 (default) type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: similarity_accuracy value: 80.3481 - type: similarity_accuracy_threshold value: 85.3551 - type: similarity_f1 value: 51.27850000000001 - type: similarity_f1_threshold value: 75.8966 - type: similarity_precision value: 45.8247 - type: similarity_recall value: 58.205799999999996 - type: similarity_ap value: 52.295100000000005 - type: cosine_accuracy value: 80.3481 - type: cosine_accuracy_threshold value: 85.3551 - type: cosine_f1 value: 51.27850000000001 - type: cosine_f1_threshold value: 75.8966 - type: cosine_precision value: 45.8247 - type: cosine_recall value: 58.205799999999996 - type: cosine_ap value: 52.295199999999994 - type: manhattan_accuracy value: 78.9712 - type: manhattan_accuracy_threshold value: 3046.9002 - type: manhattan_f1 value: 44.784600000000005 - type: manhattan_f1_threshold value: 4624.7635 - type: manhattan_precision value: 35.5133 - type: manhattan_recall value: 60.606899999999996 - type: manhattan_ap value: 44.4155 - type: euclidean_accuracy value: 78.9772 - type: euclidean_accuracy_threshold value: 141.3014 - type: euclidean_f1 value: 44.8638 - type: euclidean_f1_threshold value: 210.8781 - type: euclidean_precision value: 35.3191 - type: euclidean_recall value: 61.477599999999995 - type: euclidean_ap value: 44.3973 - type: dot_accuracy value: 77.4095 - type: dot_accuracy_threshold value: 3833.3893000000003 - type: dot_f1 value: 41.7116 - type: dot_f1_threshold value: 336.5812 - type: dot_precision value: 28.259600000000002 - type: dot_recall value: 79.6042 - type: dot_ap value: 30.7809 - type: max_accuracy value: 80.3481 - type: max_f1 value: 51.27850000000001 - type: max_precision value: 45.8247 - type: max_recall value: 79.6042 - type: max_ap value: 52.295199999999994 - type: main_score value: 52.295199999999994 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus (default) type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: similarity_accuracy value: 85.9025 - type: similarity_accuracy_threshold value: 71.6078 - type: similarity_f1 value: 70.9832 - type: similarity_f1_threshold value: 66.4079 - type: similarity_precision value: 68.9871 - type: similarity_recall value: 73.0982 - type: similarity_ap value: 79.2622 - type: cosine_accuracy value: 85.9025 - type: cosine_accuracy_threshold value: 71.6078 - type: cosine_f1 value: 70.9832 - type: cosine_f1_threshold value: 66.4079 - type: cosine_precision value: 68.9871 - type: cosine_recall value: 73.0982 - type: cosine_ap value: 79.2622 - type: manhattan_accuracy value: 81.8954 - type: manhattan_accuracy_threshold value: 2754.9084000000003 - type: manhattan_f1 value: 58.4303 - type: manhattan_f1_threshold value: 3301.9608 - type: manhattan_precision value: 56.1511 - type: manhattan_recall value: 60.9024 - type: manhattan_ap value: 66.2046 - type: euclidean_accuracy value: 81.8974 - type: euclidean_accuracy_threshold value: 122.74810000000001 - type: euclidean_f1 value: 58.455 - type: euclidean_f1_threshold value: 151.3654 - type: euclidean_precision value: 55.0722 - type: euclidean_recall value: 62.2806 - type: euclidean_ap value: 66.22019999999999 - type: dot_accuracy value: 78.7402 - type: dot_accuracy_threshold value: 317.0264 - type: dot_f1 value: 58.2905 - type: dot_f1_threshold value: 187.0591 - type: dot_precision value: 48.1454 - type: dot_recall value: 73.8528 - type: dot_ap value: 58.116 - type: max_accuracy value: 85.9025 - type: max_f1 value: 70.9832 - type: max_precision value: 68.9871 - type: max_recall value: 73.8528 - type: max_ap value: 79.2622 - type: main_score value: 79.2622 --- # 🧚🏻‍♀️ brown-fairy-base-v0 Model Card <div align="center"> <img width="50%" alt="Fairy logo" src="./assets/fairy_logo.png"> </div> > [!TIP] > Fairies are among the most enchanting and magical beings in folklore and mythology. They appear across countless cultures and stories, from ancient forests to modern gardens. They are celebrated for their ability to bridge the mundane and magical realms, known for their ethereal grace and transformative powers. Fairies are tiny, higher-dimensional beings that can interact with the world in ways that are beyond our understanding. The fairy series of models are an attempt to tune the beetle series of models to be more suitable for downstream tasks. These models are meant to fully open experiments at making state-of-the-art static embeddings. The brown-fairy-base-v0 model is a distillation of the `baai/bge-base-en-v1.5` model into the `brown-beetle-base-v0` model. There was no PCA or Zipf applied to this model. ## Installation Install model2vec using pip: ```bash pip install model2vec ``` ## Usage Load this model using the `from_pretrained` method: ```python from model2vec import StaticModel # Load a pretrained Model2Vec model model = StaticModel.from_pretrained("bhavnicksm/brown-fairy-base-v0") # Compute text embeddings embeddings = model.encode(["Example sentence"]) ``` Read more about the Model2Vec library [here](https://github.com/MinishLab/model2vec). ## Reproduce this model This model was trained on a subset of the 2 Million texts from the [FineWeb-Edu](https://huggingface.co/datasets/mixedbread-ai/fineweb-edu) dataset, which was labeled by the `baai/bge-base-en-v1.5` model. <details> <summary>Training Code</summary> Note: The datasets need to me made seperately and loaded with the `datasets` library. ```python static_embedding = StaticEmbedding.from_model2vec("bhavnicksm/brown-beetle-base-v0") model = SentenceTransformer( modules=[static_embedding] ) loss = MSELoss(model) run_name = "brown-fairy-base-v0" args = SentenceTransformerTrainingArguments( # Required parameter: output_dir=f"output/{run_name}", # Optional training parameters: num_train_epochs=1, per_device_train_batch_size=2048, per_device_eval_batch_size=2048, learning_rate=1e-1, warmup_ratio=0.1, fp16=False, # Set to False if you get an error that your GPU can't run on FP16 bf16=True, # Set to True if you have a GPU that supports BF16 batch_sampler=BatchSamplers.NO_DUPLICATES, # Optional tracking/debugging parameters: eval_strategy="steps", eval_steps=50, save_strategy="steps", save_steps=50, save_total_limit=5, logging_steps=50, logging_first_step=True, run_name=run_name, ) evaluator = NanoBEIREvaluator() evaluator(model) trainer = SentenceTransformerTrainer( model=model, args=args, train_dataset=train_dataset, eval_dataset=eval_dataset, loss=loss, evaluator=evaluator, ) trainer.train() evaluator(model) model.save_pretrained(f"output/{run_name}") ``` </details> ## Comparison with other models Coming soon... ## Acknowledgements This model is based on the [Model2Vec](https://github.com/MinishLab/model2vec) library. Credit goes to the [Minish Lab](https://github.com/MinishLab) team for developing this library. ## Citation This model builds on work done by Minish Lab. Please cite the [Model2Vec repository](https://github.com/MinishLab/model2vec) if you use this model in your work. ```bibtex @software{minishlab2024model2vec, authors = {Stephan Tulkens, Thomas van Dongen}, title = {Model2Vec: Turn any Sentence Transformer into a Small Fast Model}, year = {2024}, url = {https://github.com/MinishLab/model2vec}, } ```
[ "BIOSSES", "SCIFACT" ]
Triangle104/Distilled-DarkPlanet-Allades-8B_TIES
Triangle104
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "mergekit", "merge", "conversational", "en", "arxiv:2306.01708", "base_model:DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B", "base_model:merge:DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B", "base_model:deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "base_model:merge:deepseek-ai/DeepSeek-R1-Distill-Llama-8B", "base_model:nbeerbower/Llama3.1-Allades-8B", "base_model:merge:nbeerbower/Llama3.1-Allades-8B", "license:llama3.1", "model-index", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2025-02-02T18:47:40Z
2025-02-08T00:20:18+00:00
23
1
--- base_model: - deepseek-ai/DeepSeek-R1-Distill-Llama-8B - nbeerbower/Llama3.1-Allades-8B - DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B language: - en library_name: transformers license: llama3.1 tags: - mergekit - merge model-index: - name: Distilled-DarkPlanet-Allades-8B_TIES results: - task: type: text-generation name: Text Generation dataset: name: IFEval (0-Shot) type: HuggingFaceH4/ifeval args: num_few_shot: 0 metrics: - type: inst_level_strict_acc and prompt_level_strict_acc value: 38.92 name: strict accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Triangle104/Distilled-DarkPlanet-Allades-8B_TIES name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: BBH (3-Shot) type: BBH args: num_few_shot: 3 metrics: - type: acc_norm value: 29.96 name: normalized accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Triangle104/Distilled-DarkPlanet-Allades-8B_TIES name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MATH Lvl 5 (4-Shot) type: hendrycks/competition_math args: num_few_shot: 4 metrics: - type: exact_match value: 8.38 name: exact match source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Triangle104/Distilled-DarkPlanet-Allades-8B_TIES name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: GPQA (0-shot) type: Idavidrein/gpqa args: num_few_shot: 0 metrics: - type: acc_norm value: 8.61 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Triangle104/Distilled-DarkPlanet-Allades-8B_TIES name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MuSR (0-shot) type: TAUR-Lab/MuSR args: num_few_shot: 0 metrics: - type: acc_norm value: 8.05 name: acc_norm source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Triangle104/Distilled-DarkPlanet-Allades-8B_TIES name: Open LLM Leaderboard - task: type: text-generation name: Text Generation dataset: name: MMLU-PRO (5-shot) type: TIGER-Lab/MMLU-Pro config: main split: test args: num_few_shot: 5 metrics: - type: acc value: 26.68 name: accuracy source: url: https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard?query=Triangle104/Distilled-DarkPlanet-Allades-8B_TIES name: Open LLM Leaderboard --- # Merge This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details Different merge of Allades, Dark-Planet-SF and DS Distilled models, using DS as base. Doesn't seem to trigger thought proccess of Deepseek model, but useful nonetheless. Sample - User: Write a story about an elf called Tyril fighting an owlbear. Assistant: Tyril, a skilled Elven archer, crouched behind a moss-covered boulder, her piercing emerald eyes fixed on the snarling owlbear in front of her. The creature's yellow eyes glinted with menace as it reared up on its hind legs, its long claws flexing menacingly. The boulder was no more than 20 feet away from Tyril, but it provided a modicum of cover. The forest floor around her was a tangle of thorny brambles and tall, swaying ferns that offered some concealment, but not enough to make the owlbear's keen eyes miss her. Tyril inhaled deeply, filling her lungs with the damp, earthy scent of the forest. She mentally prepared herself for the fight ahead, as she had a reputation for being one of the most skilled and deadly archers among the Elven community. The owlbear was a formidable opponent; its razor-sharp talons could tear through even the thickest of armor, and it possessed brute strength to match any bear. Tyril knew that if she didn't act quickly, her chances of victory would be slim to none. She slowly reached for one of the quivers of arrows at her side. With a smooth, practiced motion, she nocked an arrow on to her bowstring. The owlbear, sensing her presence, turned its attention fully to Tyril and took a step forward, its claws scraping against the rough bark of a nearby tree as it shifted weight. Tyril's eyes never left the beast, tracking its every movement. She could almost feel the heat emanating from its body, and she knew that if she hesitated for even an instant, she would be within reach of those deadly talons. Her bowstring hummed as she drew back to her ear, and a steady, measured breath escaped from between her lips. As her fingers released the arrow, it was gone in a flash - a streak of green and black, whirring through the air with a sharp, guttural whoosh. The owlbear, anticipating Tyril's next move, swiped at the arrow as if to knock it out of the sky. However, Tyril had taken the angle into account, and her aim was true; the arrow struck the owlbear squarely between its eyes with a sickening thud. The creature let out an anguished howl that echoed through the forest, and for a moment, it stumbled backwards, clutching at the wound. Tyril didn't wait to see if the attack had been effective - she immediately notched another arrow, her bow singing in its familiar rhythm as she drew back to her ear. With an economy of motion, she released the next arrow, which struck the owlbear's shoulder and caused it to rear up in agony. The beast's howls now took on a desperate edge, and Tyril realized that this was no ordinary attack - the owlbear was not only wounded but also enraged by the pain. This made it an even more formidable foe, as its ferocity increased with every passing moment. As she readied yet another arrow, Tyril's eyes flicked over to a nearby tree. She had seen a branch snap off under the weight of the owlbear during one of their earlier skirmishes. That same branch now lay on the forest floor about 20 feet from where Tyril was hiding - and in an instant, she decided that it could become her best friend. Tyril swiftly nocked another arrow and released it with a smooth, practiced motion. The arrow flew true, striking the owlbear in its side, causing it to wince and take a step back. Then, using all of her archer's skill, she expertly guided her next arrow towards where she knew the snapped branch was lying - straight for the owlbear. The owlbear, as if anticipating Tyril's strategy, swung around with an arm-like motion. The force of its swipe sent a flurry of leaves and twigs flying through the air, but, to Tyril's surprise, it failed to connect with the branch that she had aimed at. Instead, it missed by a fraction - but not before its razor-sharp talons brushed against her bow, sending a shiver of fear down Tyril's spine as the edge of the bow scraped and almost broke under the owlbear's swipe. The creature's howls grew even more frantic now, as if enraged by its own ineptitude. However, this was merely a momentary setback for both combatants - Tyril knew that the owlbear would be back to try and claim her as its next meal in no time. ### Merge Method This model was merged using the [TIES](https://arxiv.org/abs/2306.01708) merge method using [deepseek-ai/DeepSeek-R1-Distill-Llama-8B](https://huggingface.co/deepseek-ai/DeepSeek-R1-Distill-Llama-8B) as a base. ### Models Merged The following models were included in the merge: * [nbeerbower/Llama3.1-Allades-8B](https://huggingface.co/nbeerbower/Llama3.1-Allades-8B) * [DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B](https://huggingface.co/DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: nbeerbower/Llama3.1-Allades-8B parameters: density: 0.5 weight: 1 - model: DavidAU/L3.1-Dark-Planet-SpinFire-Uncensored-8B parameters: density: 0.5 weight: 1 merge_method: ties base_model: deepseek-ai/DeepSeek-R1-Distill-Llama-8B parameters: normalize: true int8_mask: true dtype: float16 ``` # [Open LLM Leaderboard Evaluation Results](https://huggingface.co/spaces/open-llm-leaderboard/open_llm_leaderboard) Detailed results can be found [here](https://huggingface.co/datasets/open-llm-leaderboard/Triangle104__Distilled-DarkPlanet-Allades-8B_TIES-details) | Metric |Value| |-------------------|----:| |Avg. |20.10| |IFEval (0-Shot) |38.92| |BBH (3-Shot) |29.96| |MATH Lvl 5 (4-Shot)| 8.38| |GPQA (0-shot) | 8.61| |MuSR (0-shot) | 8.05| |MMLU-PRO (5-shot) |26.68|
[ "BEAR" ]
MikeRoz/Black-Ink-Guild_Pernicious_Prophecy_70B-8.0bpw-h8-exl2
MikeRoz
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "merge", "axolotl", "finetune", "conversational", "en", "base_model:EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1", "base_model:merge:EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1", "base_model:SicariusSicariiStuff/Negative_LLAMA_70B", "base_model:merge:SicariusSicariiStuff/Negative_LLAMA_70B", "base_model:aaditya/Llama3-OpenBioLLM-70B", "base_model:merge:aaditya/Llama3-OpenBioLLM-70B", "base_model:invisietch/L3.1-70Blivion-v0.1-rc1-70B", "base_model:merge:invisietch/L3.1-70Blivion-v0.1-rc1-70B", "license:llama3.3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "8-bit", "exl2", "region:us" ]
2025-02-05T23:51:08Z
2025-02-06T03:49:53+00:00
23
0
--- base_model: - SicariusSicariiStuff/Negative_LLAMA_70B - invisietch/L3.1-70Blivion-v0.1-rc1-70B - EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1 - aaditya/Llama3-OpenBioLLM-70B language: - en library_name: transformers license: llama3.3 license_name: llama3.3 tags: - merge - axolotl - finetune --- <html lang="en"> <head> <meta charset="UTF-8" /> <title>Pernicious Prophecy 70B</title> <link rel="preconnect" href="https://fonts.googleapis.com"> <link rel="preconnect" href="https://fonts.gstatic.com" crossorigin> <link href="https://fonts.googleapis.com/css2?family=Darker+Grotesque:[email protected]&family=Uncial+Antiqua&display=swap" rel="stylesheet"> <style> html, body { margin: 0; padding: 0; background: rgb(11, 15, 25); color: #E6FFE6; font-family: 'Darker Grotesque', sans-serif; } @keyframes runeGlow { 0% { text-shadow: 0 0 4px #91ca00; filter: brightness(0.7); } 50% { text-shadow: 0 0 8px #91ca00; filter: brightness(1.0); } 100% { text-shadow: 0 0 4px #91ca00; filter: brightness(0.7); } } img.badge { filter: grayscale(100%); transition: filter 0.7s ease-in-out; } img.badge:hover { filter: grayscale(0%); } .rune-border::before, .rune-border::after, .vertical-sides::before, .vertical-sides::after { animation: runeGlow 1.5s infinite alternate; } .rune-border::before { animation-delay: 0s; } .rune-border::after { animation-delay: 0.2s; } .vertical-sides::before { animation-delay: 0.4s; } .vertical-sides::after { animation-delay: 0.6s; } .rune-border { position: relative; max-width: 45em; margin: 2em auto; padding: 2em 4em; box-sizing: border-box; } .rune-border::before, .rune-border::after { position: absolute; left: 0; right: 0; margin: 0 2em; text-align: center; white-space: nowrap; overflow: hidden; color: #91ca00; text-shadow: 0 0 4px #91ca00; font-family: monospace; font-size: 14px; content: "ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ"; } .rune-separator:after { position: absolute; left: 0; right: 0; margin: 0 2em; text-align: center; white-space: nowrap; overflow: hidden; color: #91ca00; text-shadow: 0 0 4px #91ca00; font-family: monospace; font-size: 14px; content: "ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ"; } .rune-border::before { top: 0; } .rune-border::after { bottom: 0; } .vertical-sides { position: absolute; margin: 2em 0; top: 0; bottom: 0; left: 0; right: 0; pointer-events: none; } .vertical-sides::before, .vertical-sides::after { position: absolute; top: 0; bottom: 0; width: 1.5em; white-space: nowrap; overflow: hidden; color: #91ca00; text-shadow: 0 0 4px #91ca00; font-family: monospace; font-size: 14px; writing-mode: vertical-rl; text-orientation: mixed; } .vertical-sides::before { left: 0; content: "ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ"; } .vertical-sides::after { right: 0; content: "ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ | ᛁᛏ ᛁᛋ ᚢᚱᛁᛏᛏᛁᚾ ᛅᚾᛏ ᛁᛏ ᚢᛁᛚᛚ ᚴᚬᛘᛁ ᛏᚬ ᛒᛅᛋᛋ"; } h1, h2, h3 { font-family: "Uncial Antiqua", serif; font-weight: 400; font-style: normal; color: #426100; -webkit-text-stroke: 1px #91ca00; text-stroke: 1px #91ca00; margin-top: 1em; } h2 { padding-top: 1.5em; } a { color: #619300; text-decoration: none; } a:hover { text-decoration: underline; } h1 { font-size: 2.5em; } h2 { font-size: 2em; } h3 { font-size: 1.5em; } p, li { font-size: 1.2em; line-height: 1.2; } p.red { color: #ef2323; } img { border-radius: 20px; max-width: 100%; height: auto; display: block; margin: 0 auto; } .sidebyside { display: flex; justify-content: center; /* Center horizontally */ align-items: center; /* Align images vertically */ gap: 1em; /* Space of 1em between images */ flex-wrap: wrap; /* Wrap to next line if needed */ } .sidebyside img { max-width: 100%; /* Ensure images are responsive */ height: auto; /* Maintain aspect ratio */ display: inline; } .container { display: flex; flex-direction: column; align-items: center; justify-content: center; text-align: center; } </style> </head> <body> <div class="rune-border"> <div class="vertical-sides"></div> <div class="container"> <h1>Pernicious Prophecy 70B</h1> <p> <img src="./header.gif" alt="Pernicious Prophecy 70B GIF" /> </p> <h2 style="margin-top: 0em; padding-top: 0em;">Jump Straight In...</h2> <p> <a href="#settings">Click here for downloads & settings</a> </p> </div> <div class="rune-separator"></div> <h2 style='padding-top:0.5em;'>An Introduction...</h2> <p> <b>Pernicious Prophecy 70B</b> is a Llama-3.3 70B-based, two-step model designed by <a href="https://huggingface.co/Black-Ink-Guild">Black Ink Guild</a> (<a href="https://huggingface.co/SicariusSicariiStuff">SicariusSicariiStuff</a> and <a href="https://huggingface.co/invisietch">invisietch</a>) for uncensored roleplay, assistant tasks, and general usage. </p> <p class="red"> <b>NOTE:</b> Pernicious Prophecy 70B is an uncensored model and can produce deranged, offensive, and dangerous outputs. You are solely responsible for anything that you choose to do with this model. </p> <p> If you have any issues or just want to chat about Pernicious Prophecy &amp; future Black Ink Guild releases, join <a href="https://discord.gg/gXQzQcnedb">our Discord server</a>. </p> <div class="rune-separator"></div> <h2 id="settings">Engage the Model...</h2> <h3>Model Downloads</h3> <p> FPX: <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B">FP16 (HF)</a> | <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B_FP8">FP8 (Aph.)</a> </p> <p> GGUF: <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B_GGUF_Q4_K_S">Q4_K_S</a> | <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B_GGUF_Q4_K_M">Q4_K_M</a> | <a href="https://huggingface.co/mradermacher/Pernicious_Prophecy_70B-GGUF">mradermacher</a> </p> <p> EXL2: <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B-3.5bpw">3.5bpw</a> | <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B-5.0bpw">5.0bpw</a> </p> <h3>Recommended Settings</h3> <p> Pernicious Prophecy 70B uses the Llama-3 Instruct format, which is available as a preset in all good UIs. The sampler settings used in testing are as follows: </p> <ul> <li><b>Instruct Template</b>: Llama-3 Instruct</li> <li><b>Context</b>: 32,768</li> <li><b>Temperature</b>: 0.9-1.1</li> <li><b>Min P</b>: 0.06-0.12</li> <li><b>Rep Pen</b>: 1.07-1.09</li> <li><b>Rep Pen Range</b>: 1,536</li> </ul> <p> Feel free to use other sampler settings, these are just sane defaults. XTC is good for roleplaying with the model but may not be beneficial for other tasks. </p> <h3>Context Length</h3> <p> The model has been tested in roleplays using up to <b>32,768 token context</b> at various quantizations and is incredibly stable at this context length. </p> <p> It is possible that the context works at even longer context lengths, but it was not deemed within the parameters of our testing. </p> <div class="rune-separator"></div> <h2>Sip the Poison...</h2> <p> Here, you can find example outputs from the LLM to various instructions. For each of these examples, the model was inferenced at fp8 with 1.0 temperature, 0.1 min-p, 1.04 repetition penalty, and all other samplers neutralized. </p> <ul> <li> <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B/blob/main/nasa.md">Write a 2000 word, Markdown-formatted, report for NASA. Evaluate each of Jupiter's moons as a suitable colony with pros & cons, then provide a recommendation.</a> </li> <li> <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B/blob/main/tone.md">Write me a 3,000 word opening chapter of a 'gritty hard sci-fi' novel, drawing inspiration from the writing styles of Isaac Asimov & Andy Weir. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a 26 year old astronaut called Tone on a mission to Europa, who has just realised that the craft for the return journey is broken beyond repair, and he only has supplies for a few months. Given that survival is impossible, he seeks to spend the few months he has researching titan, so his life &amp; mission are not wasted.</a> </li> <li> <a href="https://huggingface.co/Black-Ink-Guild/Pernicious_Prophecy_70B/blob/main/cookie.md">Build me a basic cookie clicker game in HTML & Javascript.</a><br /> </li> </ul> <p> These examples were all the best of 2 responses. </p> <div class="rune-separator"></div> <h2>The Codex...</h2> <p> Here, you can find some useful prompting tips for working with Pernicious Prophecy 70B. </p> <h3>Formatting</h3> <p> 'Use markdown' and 'use formatting' are likely to produce the best formatted output. We decided to train these on trigger words to avoid random Markdown in roleplay replies. </p> <h3>System Prompting</h3> <p> Pernicious Prophecy 70V is very sensitive to prompting, even over long context. The more you instruct it, the more it will know what you want it to do. </p> <p> 'Avoid purple prose, avoid cliches, avoid deus ex machinae' is a useful prompt snippet for roleplaying purposes. For best results, don't use your roleplay prompt when using Pernicious Prophecy as an assistant. </p> <div class="rune-separator"></div> <h2>Assembling the Repertoire...</h2> <p> We used a two-step process: a merge step to combine the abilities of some of the best L3 70B models on Huggingface and a gentle SFT training step to heal the merge and address some issues around refusals and positivity bias. </p> <h3>The Merge Step</h3> <p> First, a <code>model_stock</code> merge was applied using four high-quality Llama-3 based models: <ul> <li> <b>SicariusSicariiStuff/Negative_LLAMA_70B</b> - chosen to be the base model, because of its low censorship, reduced positivity bias, and engaging writing style </li> <li> <b>invisietch/L3.1-70Blivion-v0.1-rc1-70B</b> - added for its exceptional formatting, roleplay performance, and general intelligence. </li> <li> <b>EVA-UNIT-01/EVA-LLaMA-3.33-70B-v0.1</b> - selected for its ability in longer-form storytelling, varied outputs, and quality thought. </li> <li> <b>aaditya/Llama3-OpenBioLLM-70B</b> - to add a better understanding of anatomy, and another long-form reasoning model to the stack. </li> </ul> </p> <h3>The Finetuning Step</h3> <p> We used a <b>qlora-based</b>, targeted finetune on 2x NVIDIA RTX A6000 GPUs, with a curated dataset of approximately 18 million tokens designed to surgically address issues that we identified in the merge. </p> <p> The finetuning took a total of about 14 hours, using Axolotl, and targeted specific high-priority LORA modules which allowed us to maintain a 16k sequence length even with 96GB VRAM. </p> <div class="sidebyside" style="padding-bottom:2em;"> <a href="https://github.com/arcee-ai/mergekit"> <img class="badge" src="https://huggingface.co/Black-Ink-Guild/READMETEST/resolve/main/mergekit.png" alt="Built with Mergekit" width="200" height="32" /> </a> <a href="https://github.com/axolotl-ai-cloud/axolotl"> <img class="badge" src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32" /> </div> </div> </body> </html>
[ "CRAFT" ]
RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF
RcINS
sentence-similarity
[ "sentence-transformers", "gguf", "mteb", "transformers", "Qwen2", "sentence-similarity", "llama-cpp", "gguf-my-repo", "base_model:Alibaba-NLP/gte-Qwen2-7B-instruct", "base_model:quantized:Alibaba-NLP/gte-Qwen2-7B-instruct", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us", "conversational" ]
2025-02-26T06:03:06Z
2025-02-26T06:03:36+00:00
23
0
--- base_model: Alibaba-NLP/gte-Qwen2-7B-instruct license: apache-2.0 tags: - mteb - sentence-transformers - transformers - Qwen2 - sentence-similarity - llama-cpp - gguf-my-repo model-index: - name: gte-qwen2-7B-instruct results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 91.31343283582089 - type: ap value: 67.64251402604096 - type: f1 value: 87.53372530755692 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 97.497825 - type: ap value: 96.30329547047529 - type: f1 value: 97.49769793778039 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 62.564 - type: f1 value: 60.975777935041066 - task: type: Retrieval dataset: name: MTEB ArguAna type: mteb/arguana config: default split: test revision: c22ab2a51041ffd869aaddef7af8d8215647e41a metrics: - type: map_at_1 value: 36.486000000000004 - type: map_at_10 value: 54.842 - type: map_at_100 value: 55.206999999999994 - type: map_at_1000 value: 55.206999999999994 - type: map_at_3 value: 49.893 - type: map_at_5 value: 53.105000000000004 - type: mrr_at_1 value: 37.34 - type: mrr_at_10 value: 55.143 - type: mrr_at_100 value: 55.509 - type: mrr_at_1000 value: 55.509 - type: mrr_at_3 value: 50.212999999999994 - type: mrr_at_5 value: 53.432 - type: ndcg_at_1 value: 36.486000000000004 - type: ndcg_at_10 value: 64.273 - type: ndcg_at_100 value: 65.66199999999999 - type: ndcg_at_1000 value: 65.66199999999999 - type: ndcg_at_3 value: 54.352999999999994 - type: ndcg_at_5 value: 60.131 - type: precision_at_1 value: 36.486000000000004 - type: precision_at_10 value: 9.395000000000001 - type: precision_at_100 value: 0.996 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 22.428 - type: precision_at_5 value: 16.259 - type: recall_at_1 value: 36.486000000000004 - type: recall_at_10 value: 93.95400000000001 - type: recall_at_100 value: 99.644 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 67.283 - type: recall_at_5 value: 81.294 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 56.461169803700564 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 51.73600434466286 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 67.57827065898053 - type: mrr value: 79.08136569493911 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 83.53324575999243 - type: cos_sim_spearman value: 81.37173362822374 - type: euclidean_pearson value: 82.19243335103444 - type: euclidean_spearman value: 81.33679307304334 - type: manhattan_pearson value: 82.38752665975699 - type: manhattan_spearman value: 81.31510583189689 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 87.56818181818181 - type: f1 value: 87.25826722019875 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 50.09239610327673 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 46.64733054606282 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: f46a197baaae43b4f621051089b82a364682dfeb metrics: - type: map_at_1 value: 33.997 - type: map_at_10 value: 48.176 - type: map_at_100 value: 49.82 - type: map_at_1000 value: 49.924 - type: map_at_3 value: 43.626 - type: map_at_5 value: 46.275 - type: mrr_at_1 value: 42.059999999999995 - type: mrr_at_10 value: 53.726 - type: mrr_at_100 value: 54.398 - type: mrr_at_1000 value: 54.416 - type: mrr_at_3 value: 50.714999999999996 - type: mrr_at_5 value: 52.639 - type: ndcg_at_1 value: 42.059999999999995 - type: ndcg_at_10 value: 55.574999999999996 - type: ndcg_at_100 value: 60.744 - type: ndcg_at_1000 value: 61.85699999999999 - type: ndcg_at_3 value: 49.363 - type: ndcg_at_5 value: 52.44 - type: precision_at_1 value: 42.059999999999995 - type: precision_at_10 value: 11.101999999999999 - type: precision_at_100 value: 1.73 - type: precision_at_1000 value: 0.218 - type: precision_at_3 value: 24.464 - type: precision_at_5 value: 18.026 - type: recall_at_1 value: 33.997 - type: recall_at_10 value: 70.35900000000001 - type: recall_at_100 value: 91.642 - type: recall_at_1000 value: 97.977 - type: recall_at_3 value: 52.76 - type: recall_at_5 value: 61.148 - task: type: Retrieval dataset: name: MTEB CQADupstackEnglishRetrieval type: BeIR/cqadupstack config: default split: test revision: ad9991cb51e31e31e430383c75ffb2885547b5f0 metrics: - type: map_at_1 value: 35.884 - type: map_at_10 value: 48.14 - type: map_at_100 value: 49.5 - type: map_at_1000 value: 49.63 - type: map_at_3 value: 44.646 - type: map_at_5 value: 46.617999999999995 - type: mrr_at_1 value: 44.458999999999996 - type: mrr_at_10 value: 53.751000000000005 - type: mrr_at_100 value: 54.37800000000001 - type: mrr_at_1000 value: 54.415 - type: mrr_at_3 value: 51.815 - type: mrr_at_5 value: 52.882 - type: ndcg_at_1 value: 44.458999999999996 - type: ndcg_at_10 value: 54.157 - type: ndcg_at_100 value: 58.362 - type: ndcg_at_1000 value: 60.178 - type: ndcg_at_3 value: 49.661 - type: ndcg_at_5 value: 51.74999999999999 - type: precision_at_1 value: 44.458999999999996 - type: precision_at_10 value: 10.248 - type: precision_at_100 value: 1.5890000000000002 - type: precision_at_1000 value: 0.207 - type: precision_at_3 value: 23.928 - type: precision_at_5 value: 16.878999999999998 - type: recall_at_1 value: 35.884 - type: recall_at_10 value: 64.798 - type: recall_at_100 value: 82.345 - type: recall_at_1000 value: 93.267 - type: recall_at_3 value: 51.847 - type: recall_at_5 value: 57.601 - task: type: Retrieval dataset: name: MTEB CQADupstackGamingRetrieval type: BeIR/cqadupstack config: default split: test revision: 4885aa143210c98657558c04aaf3dc47cfb54340 metrics: - type: map_at_1 value: 39.383 - type: map_at_10 value: 53.714 - type: map_at_100 value: 54.838 - type: map_at_1000 value: 54.87800000000001 - type: map_at_3 value: 50.114999999999995 - type: map_at_5 value: 52.153000000000006 - type: mrr_at_1 value: 45.016 - type: mrr_at_10 value: 56.732000000000006 - type: mrr_at_100 value: 57.411 - type: mrr_at_1000 value: 57.431 - type: mrr_at_3 value: 54.044000000000004 - type: mrr_at_5 value: 55.639 - type: ndcg_at_1 value: 45.016 - type: ndcg_at_10 value: 60.228 - type: ndcg_at_100 value: 64.277 - type: ndcg_at_1000 value: 65.07 - type: ndcg_at_3 value: 54.124 - type: ndcg_at_5 value: 57.147000000000006 - type: precision_at_1 value: 45.016 - type: precision_at_10 value: 9.937 - type: precision_at_100 value: 1.288 - type: precision_at_1000 value: 0.13899999999999998 - type: precision_at_3 value: 24.471999999999998 - type: precision_at_5 value: 16.991 - type: recall_at_1 value: 39.383 - type: recall_at_10 value: 76.175 - type: recall_at_100 value: 93.02 - type: recall_at_1000 value: 98.60900000000001 - type: recall_at_3 value: 60.265 - type: recall_at_5 value: 67.46600000000001 - task: type: Retrieval dataset: name: MTEB CQADupstackGisRetrieval type: BeIR/cqadupstack config: default split: test revision: 5003b3064772da1887988e05400cf3806fe491f2 metrics: - type: map_at_1 value: 27.426000000000002 - type: map_at_10 value: 37.397000000000006 - type: map_at_100 value: 38.61 - type: map_at_1000 value: 38.678000000000004 - type: map_at_3 value: 34.150999999999996 - type: map_at_5 value: 36.137 - type: mrr_at_1 value: 29.944 - type: mrr_at_10 value: 39.654 - type: mrr_at_100 value: 40.638000000000005 - type: mrr_at_1000 value: 40.691 - type: mrr_at_3 value: 36.817 - type: mrr_at_5 value: 38.524 - type: ndcg_at_1 value: 29.944 - type: ndcg_at_10 value: 43.094 - type: ndcg_at_100 value: 48.789 - type: ndcg_at_1000 value: 50.339999999999996 - type: ndcg_at_3 value: 36.984 - type: ndcg_at_5 value: 40.248 - type: precision_at_1 value: 29.944 - type: precision_at_10 value: 6.78 - type: precision_at_100 value: 1.024 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 15.895000000000001 - type: precision_at_5 value: 11.39 - type: recall_at_1 value: 27.426000000000002 - type: recall_at_10 value: 58.464000000000006 - type: recall_at_100 value: 84.193 - type: recall_at_1000 value: 95.52000000000001 - type: recall_at_3 value: 42.172 - type: recall_at_5 value: 50.101 - task: type: Retrieval dataset: name: MTEB CQADupstackMathematicaRetrieval type: BeIR/cqadupstack config: default split: test revision: 90fceea13679c63fe563ded68f3b6f06e50061de metrics: - type: map_at_1 value: 19.721 - type: map_at_10 value: 31.604 - type: map_at_100 value: 32.972 - type: map_at_1000 value: 33.077 - type: map_at_3 value: 27.218999999999998 - type: map_at_5 value: 29.53 - type: mrr_at_1 value: 25.0 - type: mrr_at_10 value: 35.843 - type: mrr_at_100 value: 36.785000000000004 - type: mrr_at_1000 value: 36.842000000000006 - type: mrr_at_3 value: 32.193 - type: mrr_at_5 value: 34.264 - type: ndcg_at_1 value: 25.0 - type: ndcg_at_10 value: 38.606 - type: ndcg_at_100 value: 44.272 - type: ndcg_at_1000 value: 46.527 - type: ndcg_at_3 value: 30.985000000000003 - type: ndcg_at_5 value: 34.43 - type: precision_at_1 value: 25.0 - type: precision_at_10 value: 7.811 - type: precision_at_100 value: 1.203 - type: precision_at_1000 value: 0.15 - type: precision_at_3 value: 15.423 - type: precision_at_5 value: 11.791 - type: recall_at_1 value: 19.721 - type: recall_at_10 value: 55.625 - type: recall_at_100 value: 79.34400000000001 - type: recall_at_1000 value: 95.208 - type: recall_at_3 value: 35.19 - type: recall_at_5 value: 43.626 - task: type: Retrieval dataset: name: MTEB CQADupstackPhysicsRetrieval type: BeIR/cqadupstack config: default split: test revision: 79531abbd1fb92d06c6d6315a0cbbbf5bb247ea4 metrics: - type: map_at_1 value: 33.784 - type: map_at_10 value: 47.522 - type: map_at_100 value: 48.949999999999996 - type: map_at_1000 value: 49.038 - type: map_at_3 value: 43.284 - type: map_at_5 value: 45.629 - type: mrr_at_1 value: 41.482 - type: mrr_at_10 value: 52.830999999999996 - type: mrr_at_100 value: 53.559999999999995 - type: mrr_at_1000 value: 53.588 - type: mrr_at_3 value: 50.016000000000005 - type: mrr_at_5 value: 51.614000000000004 - type: ndcg_at_1 value: 41.482 - type: ndcg_at_10 value: 54.569 - type: ndcg_at_100 value: 59.675999999999995 - type: ndcg_at_1000 value: 60.989000000000004 - type: ndcg_at_3 value: 48.187000000000005 - type: ndcg_at_5 value: 51.183 - type: precision_at_1 value: 41.482 - type: precision_at_10 value: 10.221 - type: precision_at_100 value: 1.486 - type: precision_at_1000 value: 0.17500000000000002 - type: precision_at_3 value: 23.548 - type: precision_at_5 value: 16.805 - type: recall_at_1 value: 33.784 - type: recall_at_10 value: 69.798 - type: recall_at_100 value: 90.098 - type: recall_at_1000 value: 98.176 - type: recall_at_3 value: 52.127 - type: recall_at_5 value: 59.861 - task: type: Retrieval dataset: name: MTEB CQADupstackProgrammersRetrieval type: BeIR/cqadupstack config: default split: test revision: 6184bc1440d2dbc7612be22b50686b8826d22b32 metrics: - type: map_at_1 value: 28.038999999999998 - type: map_at_10 value: 41.904 - type: map_at_100 value: 43.36 - type: map_at_1000 value: 43.453 - type: map_at_3 value: 37.785999999999994 - type: map_at_5 value: 40.105000000000004 - type: mrr_at_1 value: 35.046 - type: mrr_at_10 value: 46.926 - type: mrr_at_100 value: 47.815000000000005 - type: mrr_at_1000 value: 47.849000000000004 - type: mrr_at_3 value: 44.273 - type: mrr_at_5 value: 45.774 - type: ndcg_at_1 value: 35.046 - type: ndcg_at_10 value: 48.937000000000005 - type: ndcg_at_100 value: 54.544000000000004 - type: ndcg_at_1000 value: 56.069 - type: ndcg_at_3 value: 42.858000000000004 - type: ndcg_at_5 value: 45.644 - type: precision_at_1 value: 35.046 - type: precision_at_10 value: 9.452 - type: precision_at_100 value: 1.429 - type: precision_at_1000 value: 0.173 - type: precision_at_3 value: 21.346999999999998 - type: precision_at_5 value: 15.342 - type: recall_at_1 value: 28.038999999999998 - type: recall_at_10 value: 64.59700000000001 - type: recall_at_100 value: 87.735 - type: recall_at_1000 value: 97.41300000000001 - type: recall_at_3 value: 47.368 - type: recall_at_5 value: 54.93900000000001 - task: type: Retrieval dataset: name: MTEB CQADupstackRetrieval type: BeIR/cqadupstack config: default split: test revision: 4ffe81d471b1924886b33c7567bfb200e9eec5c4 metrics: - type: map_at_1 value: 28.17291666666667 - type: map_at_10 value: 40.025749999999995 - type: map_at_100 value: 41.39208333333333 - type: map_at_1000 value: 41.499249999999996 - type: map_at_3 value: 36.347 - type: map_at_5 value: 38.41391666666667 - type: mrr_at_1 value: 33.65925 - type: mrr_at_10 value: 44.085499999999996 - type: mrr_at_100 value: 44.94116666666667 - type: mrr_at_1000 value: 44.9855 - type: mrr_at_3 value: 41.2815 - type: mrr_at_5 value: 42.91491666666666 - type: ndcg_at_1 value: 33.65925 - type: ndcg_at_10 value: 46.430833333333325 - type: ndcg_at_100 value: 51.761 - type: ndcg_at_1000 value: 53.50899999999999 - type: ndcg_at_3 value: 40.45133333333333 - type: ndcg_at_5 value: 43.31483333333334 - type: precision_at_1 value: 33.65925 - type: precision_at_10 value: 8.4995 - type: precision_at_100 value: 1.3210000000000004 - type: precision_at_1000 value: 0.16591666666666666 - type: precision_at_3 value: 19.165083333333335 - type: precision_at_5 value: 13.81816666666667 - type: recall_at_1 value: 28.17291666666667 - type: recall_at_10 value: 61.12624999999999 - type: recall_at_100 value: 83.97266666666667 - type: recall_at_1000 value: 95.66550000000001 - type: recall_at_3 value: 44.661249999999995 - type: recall_at_5 value: 51.983333333333334 - type: map_at_1 value: 17.936 - type: map_at_10 value: 27.399 - type: map_at_100 value: 28.632 - type: map_at_1000 value: 28.738000000000003 - type: map_at_3 value: 24.456 - type: map_at_5 value: 26.06 - type: mrr_at_1 value: 19.224 - type: mrr_at_10 value: 28.998 - type: mrr_at_100 value: 30.11 - type: mrr_at_1000 value: 30.177 - type: mrr_at_3 value: 26.247999999999998 - type: mrr_at_5 value: 27.708 - type: ndcg_at_1 value: 19.224 - type: ndcg_at_10 value: 32.911 - type: ndcg_at_100 value: 38.873999999999995 - type: ndcg_at_1000 value: 41.277 - type: ndcg_at_3 value: 27.142 - type: ndcg_at_5 value: 29.755 - type: precision_at_1 value: 19.224 - type: precision_at_10 value: 5.6930000000000005 - type: precision_at_100 value: 0.9259999999999999 - type: precision_at_1000 value: 0.126 - type: precision_at_3 value: 12.138 - type: precision_at_5 value: 8.909 - type: recall_at_1 value: 17.936 - type: recall_at_10 value: 48.096 - type: recall_at_100 value: 75.389 - type: recall_at_1000 value: 92.803 - type: recall_at_3 value: 32.812999999999995 - type: recall_at_5 value: 38.851 - task: type: Retrieval dataset: name: MTEB CQADupstackStatsRetrieval type: BeIR/cqadupstack config: default split: test revision: 65ac3a16b8e91f9cee4c9828cc7c335575432a2a metrics: - type: map_at_1 value: 24.681 - type: map_at_10 value: 34.892 - type: map_at_100 value: 35.996 - type: map_at_1000 value: 36.083 - type: map_at_3 value: 31.491999999999997 - type: map_at_5 value: 33.632 - type: mrr_at_1 value: 28.528 - type: mrr_at_10 value: 37.694 - type: mrr_at_100 value: 38.613 - type: mrr_at_1000 value: 38.668 - type: mrr_at_3 value: 34.714 - type: mrr_at_5 value: 36.616 - type: ndcg_at_1 value: 28.528 - type: ndcg_at_10 value: 40.703 - type: ndcg_at_100 value: 45.993 - type: ndcg_at_1000 value: 47.847 - type: ndcg_at_3 value: 34.622 - type: ndcg_at_5 value: 38.035999999999994 - type: precision_at_1 value: 28.528 - type: precision_at_10 value: 6.902 - type: precision_at_100 value: 1.0370000000000001 - type: precision_at_1000 value: 0.126 - type: precision_at_3 value: 15.798000000000002 - type: precision_at_5 value: 11.655999999999999 - type: recall_at_1 value: 24.681 - type: recall_at_10 value: 55.81 - type: recall_at_100 value: 79.785 - type: recall_at_1000 value: 92.959 - type: recall_at_3 value: 39.074 - type: recall_at_5 value: 47.568 - task: type: Retrieval dataset: name: MTEB CQADupstackTexRetrieval type: BeIR/cqadupstack config: default split: test revision: 46989137a86843e03a6195de44b09deda022eec7 metrics: - type: map_at_1 value: 18.627 - type: map_at_10 value: 27.872000000000003 - type: map_at_100 value: 29.237999999999996 - type: map_at_1000 value: 29.363 - type: map_at_3 value: 24.751 - type: map_at_5 value: 26.521 - type: mrr_at_1 value: 23.021 - type: mrr_at_10 value: 31.924000000000003 - type: mrr_at_100 value: 32.922000000000004 - type: mrr_at_1000 value: 32.988 - type: mrr_at_3 value: 29.192 - type: mrr_at_5 value: 30.798 - type: ndcg_at_1 value: 23.021 - type: ndcg_at_10 value: 33.535 - type: ndcg_at_100 value: 39.732 - type: ndcg_at_1000 value: 42.201 - type: ndcg_at_3 value: 28.153 - type: ndcg_at_5 value: 30.746000000000002 - type: precision_at_1 value: 23.021 - type: precision_at_10 value: 6.459 - type: precision_at_100 value: 1.1320000000000001 - type: precision_at_1000 value: 0.153 - type: precision_at_3 value: 13.719000000000001 - type: precision_at_5 value: 10.193000000000001 - type: recall_at_1 value: 18.627 - type: recall_at_10 value: 46.463 - type: recall_at_100 value: 74.226 - type: recall_at_1000 value: 91.28500000000001 - type: recall_at_3 value: 31.357000000000003 - type: recall_at_5 value: 38.067 - task: type: Retrieval dataset: name: MTEB CQADupstackUnixRetrieval type: BeIR/cqadupstack config: default split: test revision: 6c6430d3a6d36f8d2a829195bc5dc94d7e063e53 metrics: - type: map_at_1 value: 31.457 - type: map_at_10 value: 42.888 - type: map_at_100 value: 44.24 - type: map_at_1000 value: 44.327 - type: map_at_3 value: 39.588 - type: map_at_5 value: 41.423 - type: mrr_at_1 value: 37.126999999999995 - type: mrr_at_10 value: 47.083000000000006 - type: mrr_at_100 value: 47.997 - type: mrr_at_1000 value: 48.044 - type: mrr_at_3 value: 44.574000000000005 - type: mrr_at_5 value: 46.202 - type: ndcg_at_1 value: 37.126999999999995 - type: ndcg_at_10 value: 48.833 - type: ndcg_at_100 value: 54.327000000000005 - type: ndcg_at_1000 value: 56.011 - type: ndcg_at_3 value: 43.541999999999994 - type: ndcg_at_5 value: 46.127 - type: precision_at_1 value: 37.126999999999995 - type: precision_at_10 value: 8.376999999999999 - type: precision_at_100 value: 1.2309999999999999 - type: precision_at_1000 value: 0.146 - type: precision_at_3 value: 20.211000000000002 - type: precision_at_5 value: 14.16 - type: recall_at_1 value: 31.457 - type: recall_at_10 value: 62.369 - type: recall_at_100 value: 85.444 - type: recall_at_1000 value: 96.65599999999999 - type: recall_at_3 value: 47.961 - type: recall_at_5 value: 54.676 - task: type: Retrieval dataset: name: MTEB CQADupstackWebmastersRetrieval type: BeIR/cqadupstack config: default split: test revision: 160c094312a0e1facb97e55eeddb698c0abe3571 metrics: - type: map_at_1 value: 27.139999999999997 - type: map_at_10 value: 38.801 - type: map_at_100 value: 40.549 - type: map_at_1000 value: 40.802 - type: map_at_3 value: 35.05 - type: map_at_5 value: 36.884 - type: mrr_at_1 value: 33.004 - type: mrr_at_10 value: 43.864 - type: mrr_at_100 value: 44.667 - type: mrr_at_1000 value: 44.717 - type: mrr_at_3 value: 40.777 - type: mrr_at_5 value: 42.319 - type: ndcg_at_1 value: 33.004 - type: ndcg_at_10 value: 46.022 - type: ndcg_at_100 value: 51.542 - type: ndcg_at_1000 value: 53.742000000000004 - type: ndcg_at_3 value: 39.795 - type: ndcg_at_5 value: 42.272 - type: precision_at_1 value: 33.004 - type: precision_at_10 value: 9.012 - type: precision_at_100 value: 1.7770000000000001 - type: precision_at_1000 value: 0.26 - type: precision_at_3 value: 19.038 - type: precision_at_5 value: 13.675999999999998 - type: recall_at_1 value: 27.139999999999997 - type: recall_at_10 value: 60.961 - type: recall_at_100 value: 84.451 - type: recall_at_1000 value: 98.113 - type: recall_at_3 value: 43.001 - type: recall_at_5 value: 49.896 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: mteb/climate-fever config: default split: test revision: 47f2ac6acb640fc46020b02a5b59fdda04d39380 metrics: - type: map_at_1 value: 22.076999999999998 - type: map_at_10 value: 35.44 - type: map_at_100 value: 37.651 - type: map_at_1000 value: 37.824999999999996 - type: map_at_3 value: 30.764999999999997 - type: map_at_5 value: 33.26 - type: mrr_at_1 value: 50.163000000000004 - type: mrr_at_10 value: 61.207 - type: mrr_at_100 value: 61.675000000000004 - type: mrr_at_1000 value: 61.692 - type: mrr_at_3 value: 58.60999999999999 - type: mrr_at_5 value: 60.307 - type: ndcg_at_1 value: 50.163000000000004 - type: ndcg_at_10 value: 45.882 - type: ndcg_at_100 value: 53.239999999999995 - type: ndcg_at_1000 value: 55.852000000000004 - type: ndcg_at_3 value: 40.514 - type: ndcg_at_5 value: 42.038 - type: precision_at_1 value: 50.163000000000004 - type: precision_at_10 value: 13.466000000000001 - type: precision_at_100 value: 2.164 - type: precision_at_1000 value: 0.266 - type: precision_at_3 value: 29.707 - type: precision_at_5 value: 21.694 - type: recall_at_1 value: 22.076999999999998 - type: recall_at_10 value: 50.193 - type: recall_at_100 value: 74.993 - type: recall_at_1000 value: 89.131 - type: recall_at_3 value: 35.472 - type: recall_at_5 value: 41.814 - task: type: Retrieval dataset: name: MTEB DBPedia type: mteb/dbpedia config: default split: test revision: c0f706b76e590d620bd6618b3ca8efdd34e2d659 metrics: - type: map_at_1 value: 9.953 - type: map_at_10 value: 24.515 - type: map_at_100 value: 36.173 - type: map_at_1000 value: 38.351 - type: map_at_3 value: 16.592000000000002 - type: map_at_5 value: 20.036 - type: mrr_at_1 value: 74.25 - type: mrr_at_10 value: 81.813 - type: mrr_at_100 value: 82.006 - type: mrr_at_1000 value: 82.011 - type: mrr_at_3 value: 80.875 - type: mrr_at_5 value: 81.362 - type: ndcg_at_1 value: 62.5 - type: ndcg_at_10 value: 52.42 - type: ndcg_at_100 value: 56.808 - type: ndcg_at_1000 value: 63.532999999999994 - type: ndcg_at_3 value: 56.654 - type: ndcg_at_5 value: 54.18300000000001 - type: precision_at_1 value: 74.25 - type: precision_at_10 value: 42.699999999999996 - type: precision_at_100 value: 13.675 - type: precision_at_1000 value: 2.664 - type: precision_at_3 value: 60.5 - type: precision_at_5 value: 52.800000000000004 - type: recall_at_1 value: 9.953 - type: recall_at_10 value: 30.253999999999998 - type: recall_at_100 value: 62.516000000000005 - type: recall_at_1000 value: 84.163 - type: recall_at_3 value: 18.13 - type: recall_at_5 value: 22.771 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 79.455 - type: f1 value: 74.16798697647569 - task: type: Retrieval dataset: name: MTEB FEVER type: mteb/fever config: default split: test revision: bea83ef9e8fb933d90a2f1d5515737465d613e12 metrics: - type: map_at_1 value: 87.531 - type: map_at_10 value: 93.16799999999999 - type: map_at_100 value: 93.341 - type: map_at_1000 value: 93.349 - type: map_at_3 value: 92.444 - type: map_at_5 value: 92.865 - type: mrr_at_1 value: 94.014 - type: mrr_at_10 value: 96.761 - type: mrr_at_100 value: 96.762 - type: mrr_at_1000 value: 96.762 - type: mrr_at_3 value: 96.672 - type: mrr_at_5 value: 96.736 - type: ndcg_at_1 value: 94.014 - type: ndcg_at_10 value: 95.112 - type: ndcg_at_100 value: 95.578 - type: ndcg_at_1000 value: 95.68900000000001 - type: ndcg_at_3 value: 94.392 - type: ndcg_at_5 value: 94.72500000000001 - type: precision_at_1 value: 94.014 - type: precision_at_10 value: 11.065 - type: precision_at_100 value: 1.157 - type: precision_at_1000 value: 0.11800000000000001 - type: precision_at_3 value: 35.259 - type: precision_at_5 value: 21.599 - type: recall_at_1 value: 87.531 - type: recall_at_10 value: 97.356 - type: recall_at_100 value: 98.965 - type: recall_at_1000 value: 99.607 - type: recall_at_3 value: 95.312 - type: recall_at_5 value: 96.295 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: mteb/fiqa config: default split: test revision: 27a168819829fe9bcd655c2df245fb19452e8e06 metrics: - type: map_at_1 value: 32.055 - type: map_at_10 value: 53.114 - type: map_at_100 value: 55.235 - type: map_at_1000 value: 55.345 - type: map_at_3 value: 45.854 - type: map_at_5 value: 50.025 - type: mrr_at_1 value: 60.34 - type: mrr_at_10 value: 68.804 - type: mrr_at_100 value: 69.309 - type: mrr_at_1000 value: 69.32199999999999 - type: mrr_at_3 value: 66.40899999999999 - type: mrr_at_5 value: 67.976 - type: ndcg_at_1 value: 60.34 - type: ndcg_at_10 value: 62.031000000000006 - type: ndcg_at_100 value: 68.00500000000001 - type: ndcg_at_1000 value: 69.286 - type: ndcg_at_3 value: 56.355999999999995 - type: ndcg_at_5 value: 58.687 - type: precision_at_1 value: 60.34 - type: precision_at_10 value: 17.176 - type: precision_at_100 value: 2.36 - type: precision_at_1000 value: 0.259 - type: precision_at_3 value: 37.14 - type: precision_at_5 value: 27.809 - type: recall_at_1 value: 32.055 - type: recall_at_10 value: 70.91 - type: recall_at_100 value: 91.83 - type: recall_at_1000 value: 98.871 - type: recall_at_3 value: 51.202999999999996 - type: recall_at_5 value: 60.563 - task: type: Retrieval dataset: name: MTEB HotpotQA type: mteb/hotpotqa config: default split: test revision: ab518f4d6fcca38d87c25209f94beba119d02014 metrics: - type: map_at_1 value: 43.68 - type: map_at_10 value: 64.389 - type: map_at_100 value: 65.24 - type: map_at_1000 value: 65.303 - type: map_at_3 value: 61.309000000000005 - type: map_at_5 value: 63.275999999999996 - type: mrr_at_1 value: 87.36 - type: mrr_at_10 value: 91.12 - type: mrr_at_100 value: 91.227 - type: mrr_at_1000 value: 91.229 - type: mrr_at_3 value: 90.57600000000001 - type: mrr_at_5 value: 90.912 - type: ndcg_at_1 value: 87.36 - type: ndcg_at_10 value: 73.076 - type: ndcg_at_100 value: 75.895 - type: ndcg_at_1000 value: 77.049 - type: ndcg_at_3 value: 68.929 - type: ndcg_at_5 value: 71.28 - type: precision_at_1 value: 87.36 - type: precision_at_10 value: 14.741000000000001 - type: precision_at_100 value: 1.694 - type: precision_at_1000 value: 0.185 - type: precision_at_3 value: 43.043 - type: precision_at_5 value: 27.681 - type: recall_at_1 value: 43.68 - type: recall_at_10 value: 73.707 - type: recall_at_100 value: 84.7 - type: recall_at_1000 value: 92.309 - type: recall_at_3 value: 64.564 - type: recall_at_5 value: 69.203 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 96.75399999999999 - type: ap value: 95.29389839242187 - type: f1 value: 96.75348377433475 - task: type: Retrieval dataset: name: MTEB MSMARCO type: mteb/msmarco config: default split: dev revision: c5a29a104738b98a9e76336939199e264163d4a0 metrics: - type: map_at_1 value: 25.176 - type: map_at_10 value: 38.598 - type: map_at_100 value: 39.707 - type: map_at_1000 value: 39.744 - type: map_at_3 value: 34.566 - type: map_at_5 value: 36.863 - type: mrr_at_1 value: 25.874000000000002 - type: mrr_at_10 value: 39.214 - type: mrr_at_100 value: 40.251 - type: mrr_at_1000 value: 40.281 - type: mrr_at_3 value: 35.291 - type: mrr_at_5 value: 37.545 - type: ndcg_at_1 value: 25.874000000000002 - type: ndcg_at_10 value: 45.98 - type: ndcg_at_100 value: 51.197 - type: ndcg_at_1000 value: 52.073 - type: ndcg_at_3 value: 37.785999999999994 - type: ndcg_at_5 value: 41.870000000000005 - type: precision_at_1 value: 25.874000000000002 - type: precision_at_10 value: 7.181 - type: precision_at_100 value: 0.979 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 16.051000000000002 - type: precision_at_5 value: 11.713 - type: recall_at_1 value: 25.176 - type: recall_at_10 value: 68.67699999999999 - type: recall_at_100 value: 92.55 - type: recall_at_1000 value: 99.164 - type: recall_at_3 value: 46.372 - type: recall_at_5 value: 56.16 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 99.03784769721841 - type: f1 value: 98.97791641821495 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 91.88326493388054 - type: f1 value: 73.74809928034335 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 85.41358439811701 - type: f1 value: 83.503679460639 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 89.77135171486215 - type: f1 value: 88.89843747468366 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 46.22695362087359 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 44.132372165849425 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 33.35680810650402 - type: mrr value: 34.72625715637218 - task: type: Retrieval dataset: name: MTEB NFCorpus type: mteb/nfcorpus config: default split: test revision: ec0fa4fe99da2ff19ca1214b7966684033a58814 metrics: - type: map_at_1 value: 7.165000000000001 - type: map_at_10 value: 15.424 - type: map_at_100 value: 20.28 - type: map_at_1000 value: 22.065 - type: map_at_3 value: 11.236 - type: map_at_5 value: 13.025999999999998 - type: mrr_at_1 value: 51.702999999999996 - type: mrr_at_10 value: 59.965 - type: mrr_at_100 value: 60.667 - type: mrr_at_1000 value: 60.702999999999996 - type: mrr_at_3 value: 58.772000000000006 - type: mrr_at_5 value: 59.267 - type: ndcg_at_1 value: 49.536 - type: ndcg_at_10 value: 40.6 - type: ndcg_at_100 value: 37.848 - type: ndcg_at_1000 value: 46.657 - type: ndcg_at_3 value: 46.117999999999995 - type: ndcg_at_5 value: 43.619 - type: precision_at_1 value: 51.393 - type: precision_at_10 value: 30.31 - type: precision_at_100 value: 9.972 - type: precision_at_1000 value: 2.329 - type: precision_at_3 value: 43.137 - type: precision_at_5 value: 37.585 - type: recall_at_1 value: 7.165000000000001 - type: recall_at_10 value: 19.689999999999998 - type: recall_at_100 value: 39.237 - type: recall_at_1000 value: 71.417 - type: recall_at_3 value: 12.247 - type: recall_at_5 value: 14.902999999999999 - task: type: Retrieval dataset: name: MTEB NQ type: mteb/nq config: default split: test revision: b774495ed302d8c44a3a7ea25c90dbce03968f31 metrics: - type: map_at_1 value: 42.653999999999996 - type: map_at_10 value: 59.611999999999995 - type: map_at_100 value: 60.32300000000001 - type: map_at_1000 value: 60.336 - type: map_at_3 value: 55.584999999999994 - type: map_at_5 value: 58.19 - type: mrr_at_1 value: 47.683 - type: mrr_at_10 value: 62.06700000000001 - type: mrr_at_100 value: 62.537 - type: mrr_at_1000 value: 62.544999999999995 - type: mrr_at_3 value: 59.178 - type: mrr_at_5 value: 61.034 - type: ndcg_at_1 value: 47.654 - type: ndcg_at_10 value: 67.001 - type: ndcg_at_100 value: 69.73899999999999 - type: ndcg_at_1000 value: 69.986 - type: ndcg_at_3 value: 59.95700000000001 - type: ndcg_at_5 value: 64.025 - type: precision_at_1 value: 47.654 - type: precision_at_10 value: 10.367999999999999 - type: precision_at_100 value: 1.192 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 26.651000000000003 - type: precision_at_5 value: 18.459 - type: recall_at_1 value: 42.653999999999996 - type: recall_at_10 value: 86.619 - type: recall_at_100 value: 98.04899999999999 - type: recall_at_1000 value: 99.812 - type: recall_at_3 value: 68.987 - type: recall_at_5 value: 78.158 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: mteb/quora config: default split: test revision: None metrics: - type: map_at_1 value: 72.538 - type: map_at_10 value: 86.702 - type: map_at_100 value: 87.31 - type: map_at_1000 value: 87.323 - type: map_at_3 value: 83.87 - type: map_at_5 value: 85.682 - type: mrr_at_1 value: 83.31 - type: mrr_at_10 value: 89.225 - type: mrr_at_100 value: 89.30399999999999 - type: mrr_at_1000 value: 89.30399999999999 - type: mrr_at_3 value: 88.44300000000001 - type: mrr_at_5 value: 89.005 - type: ndcg_at_1 value: 83.32000000000001 - type: ndcg_at_10 value: 90.095 - type: ndcg_at_100 value: 91.12 - type: ndcg_at_1000 value: 91.179 - type: ndcg_at_3 value: 87.606 - type: ndcg_at_5 value: 89.031 - type: precision_at_1 value: 83.32000000000001 - type: precision_at_10 value: 13.641 - type: precision_at_100 value: 1.541 - type: precision_at_1000 value: 0.157 - type: precision_at_3 value: 38.377 - type: precision_at_5 value: 25.162000000000003 - type: recall_at_1 value: 72.538 - type: recall_at_10 value: 96.47200000000001 - type: recall_at_100 value: 99.785 - type: recall_at_1000 value: 99.99900000000001 - type: recall_at_3 value: 89.278 - type: recall_at_5 value: 93.367 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 73.55219145406065 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 74.13437105242755 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: mteb/scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 6.873 - type: map_at_10 value: 17.944 - type: map_at_100 value: 21.171 - type: map_at_1000 value: 21.528 - type: map_at_3 value: 12.415 - type: map_at_5 value: 15.187999999999999 - type: mrr_at_1 value: 33.800000000000004 - type: mrr_at_10 value: 46.455 - type: mrr_at_100 value: 47.378 - type: mrr_at_1000 value: 47.394999999999996 - type: mrr_at_3 value: 42.367 - type: mrr_at_5 value: 44.972 - type: ndcg_at_1 value: 33.800000000000004 - type: ndcg_at_10 value: 28.907 - type: ndcg_at_100 value: 39.695 - type: ndcg_at_1000 value: 44.582 - type: ndcg_at_3 value: 26.949 - type: ndcg_at_5 value: 23.988 - type: precision_at_1 value: 33.800000000000004 - type: precision_at_10 value: 15.079999999999998 - type: precision_at_100 value: 3.056 - type: precision_at_1000 value: 0.42100000000000004 - type: precision_at_3 value: 25.167 - type: precision_at_5 value: 21.26 - type: recall_at_1 value: 6.873 - type: recall_at_10 value: 30.568 - type: recall_at_100 value: 62.062 - type: recall_at_1000 value: 85.37700000000001 - type: recall_at_3 value: 15.312999999999999 - type: recall_at_5 value: 21.575 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 82.37009118256057 - type: cos_sim_spearman value: 79.27986395671529 - type: euclidean_pearson value: 79.18037715442115 - type: euclidean_spearman value: 79.28004791561621 - type: manhattan_pearson value: 79.34062972800541 - type: manhattan_spearman value: 79.43106695543402 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 87.48474767383833 - type: cos_sim_spearman value: 79.54505388752513 - type: euclidean_pearson value: 83.43282704179565 - type: euclidean_spearman value: 79.54579919925405 - type: manhattan_pearson value: 83.77564492427952 - type: manhattan_spearman value: 79.84558396989286 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 88.803698035802 - type: cos_sim_spearman value: 88.83451367754881 - type: euclidean_pearson value: 88.28939285711628 - type: euclidean_spearman value: 88.83528996073112 - type: manhattan_pearson value: 88.28017412671795 - type: manhattan_spearman value: 88.9228828016344 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 85.27469288153428 - type: cos_sim_spearman value: 83.87477064876288 - type: euclidean_pearson value: 84.2601737035379 - type: euclidean_spearman value: 83.87431082479074 - type: manhattan_pearson value: 84.3621547772745 - type: manhattan_spearman value: 84.12094375000423 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 88.12749863201587 - type: cos_sim_spearman value: 88.54287568368565 - type: euclidean_pearson value: 87.90429700607999 - type: euclidean_spearman value: 88.5437689576261 - type: manhattan_pearson value: 88.19276653356833 - type: manhattan_spearman value: 88.99995393814679 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 85.68398747560902 - type: cos_sim_spearman value: 86.48815303460574 - type: euclidean_pearson value: 85.52356631237954 - type: euclidean_spearman value: 86.486391949551 - type: manhattan_pearson value: 85.67267981761788 - type: manhattan_spearman value: 86.7073696332485 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 88.9057107443124 - type: cos_sim_spearman value: 88.7312168757697 - type: euclidean_pearson value: 88.72810439714794 - type: euclidean_spearman value: 88.71976185854771 - type: manhattan_pearson value: 88.50433745949111 - type: manhattan_spearman value: 88.51726175544195 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 67.59391795109886 - type: cos_sim_spearman value: 66.87613008631367 - type: euclidean_pearson value: 69.23198488262217 - type: euclidean_spearman value: 66.85427723013692 - type: manhattan_pearson value: 69.50730124841084 - type: manhattan_spearman value: 67.10404669820792 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 87.0820605344619 - type: cos_sim_spearman value: 86.8518089863434 - type: euclidean_pearson value: 86.31087134689284 - type: euclidean_spearman value: 86.8518520517941 - type: manhattan_pearson value: 86.47203796160612 - type: manhattan_spearman value: 87.1080149734421 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 89.09255369305481 - type: mrr value: 97.10323445617563 - task: type: Retrieval dataset: name: MTEB SciFact type: mteb/scifact config: default split: test revision: 0228b52cf27578f30900b9e5271d331663a030d7 metrics: - type: map_at_1 value: 61.260999999999996 - type: map_at_10 value: 74.043 - type: map_at_100 value: 74.37700000000001 - type: map_at_1000 value: 74.384 - type: map_at_3 value: 71.222 - type: map_at_5 value: 72.875 - type: mrr_at_1 value: 64.333 - type: mrr_at_10 value: 74.984 - type: mrr_at_100 value: 75.247 - type: mrr_at_1000 value: 75.25500000000001 - type: mrr_at_3 value: 73.167 - type: mrr_at_5 value: 74.35000000000001 - type: ndcg_at_1 value: 64.333 - type: ndcg_at_10 value: 79.06 - type: ndcg_at_100 value: 80.416 - type: ndcg_at_1000 value: 80.55600000000001 - type: ndcg_at_3 value: 74.753 - type: ndcg_at_5 value: 76.97500000000001 - type: precision_at_1 value: 64.333 - type: precision_at_10 value: 10.567 - type: precision_at_100 value: 1.1199999999999999 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 29.889 - type: precision_at_5 value: 19.533 - type: recall_at_1 value: 61.260999999999996 - type: recall_at_10 value: 93.167 - type: recall_at_100 value: 99.0 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 81.667 - type: recall_at_5 value: 87.394 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.71980198019801 - type: cos_sim_ap value: 92.81616007802704 - type: cos_sim_f1 value: 85.17548454688318 - type: cos_sim_precision value: 89.43894389438944 - type: cos_sim_recall value: 81.3 - type: dot_accuracy value: 99.71980198019801 - type: dot_ap value: 92.81398760591358 - type: dot_f1 value: 85.17548454688318 - type: dot_precision value: 89.43894389438944 - type: dot_recall value: 81.3 - type: euclidean_accuracy value: 99.71980198019801 - type: euclidean_ap value: 92.81560637245072 - type: euclidean_f1 value: 85.17548454688318 - type: euclidean_precision value: 89.43894389438944 - type: euclidean_recall value: 81.3 - type: manhattan_accuracy value: 99.73069306930694 - type: manhattan_ap value: 93.14005487480794 - type: manhattan_f1 value: 85.56263269639068 - type: manhattan_precision value: 91.17647058823529 - type: manhattan_recall value: 80.60000000000001 - type: max_accuracy value: 99.73069306930694 - type: max_ap value: 93.14005487480794 - type: max_f1 value: 85.56263269639068 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 79.86443362395185 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 49.40897096662564 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 55.66040806627947 - type: mrr value: 56.58670475766064 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 31.51015090598575 - type: cos_sim_spearman value: 31.35016454939226 - type: dot_pearson value: 31.5150068731 - type: dot_spearman value: 31.34790869023487 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: mteb/trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.254 - type: map_at_10 value: 2.064 - type: map_at_100 value: 12.909 - type: map_at_1000 value: 31.761 - type: map_at_3 value: 0.738 - type: map_at_5 value: 1.155 - type: mrr_at_1 value: 96.0 - type: mrr_at_10 value: 98.0 - type: mrr_at_100 value: 98.0 - type: mrr_at_1000 value: 98.0 - type: mrr_at_3 value: 98.0 - type: mrr_at_5 value: 98.0 - type: ndcg_at_1 value: 93.0 - type: ndcg_at_10 value: 82.258 - type: ndcg_at_100 value: 64.34 - type: ndcg_at_1000 value: 57.912 - type: ndcg_at_3 value: 90.827 - type: ndcg_at_5 value: 86.79 - type: precision_at_1 value: 96.0 - type: precision_at_10 value: 84.8 - type: precision_at_100 value: 66.0 - type: precision_at_1000 value: 25.356 - type: precision_at_3 value: 94.667 - type: precision_at_5 value: 90.4 - type: recall_at_1 value: 0.254 - type: recall_at_10 value: 2.1950000000000003 - type: recall_at_100 value: 16.088 - type: recall_at_1000 value: 54.559000000000005 - type: recall_at_3 value: 0.75 - type: recall_at_5 value: 1.191 - task: type: Retrieval dataset: name: MTEB Touche2020 type: mteb/touche2020 config: default split: test revision: a34f9a33db75fa0cbb21bb5cfc3dae8dc8bec93f metrics: - type: map_at_1 value: 2.976 - type: map_at_10 value: 11.389000000000001 - type: map_at_100 value: 18.429000000000002 - type: map_at_1000 value: 20.113 - type: map_at_3 value: 6.483 - type: map_at_5 value: 8.770999999999999 - type: mrr_at_1 value: 40.816 - type: mrr_at_10 value: 58.118 - type: mrr_at_100 value: 58.489999999999995 - type: mrr_at_1000 value: 58.489999999999995 - type: mrr_at_3 value: 53.061 - type: mrr_at_5 value: 57.041 - type: ndcg_at_1 value: 40.816 - type: ndcg_at_10 value: 30.567 - type: ndcg_at_100 value: 42.44 - type: ndcg_at_1000 value: 53.480000000000004 - type: ndcg_at_3 value: 36.016 - type: ndcg_at_5 value: 34.257 - type: precision_at_1 value: 42.857 - type: precision_at_10 value: 25.714 - type: precision_at_100 value: 8.429 - type: precision_at_1000 value: 1.5939999999999999 - type: precision_at_3 value: 36.735 - type: precision_at_5 value: 33.878 - type: recall_at_1 value: 2.976 - type: recall_at_10 value: 17.854999999999997 - type: recall_at_100 value: 51.833 - type: recall_at_1000 value: 86.223 - type: recall_at_3 value: 7.887 - type: recall_at_5 value: 12.026 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 85.1174 - type: ap value: 30.169441069345748 - type: f1 value: 69.79254701873245 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 72.58347481607245 - type: f1 value: 72.74877295564937 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 53.90586138221305 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 87.35769207844072 - type: cos_sim_ap value: 77.9645072410354 - type: cos_sim_f1 value: 71.32352941176471 - type: cos_sim_precision value: 66.5903890160183 - type: cos_sim_recall value: 76.78100263852242 - type: dot_accuracy value: 87.37557370209214 - type: dot_ap value: 77.96250046429908 - type: dot_f1 value: 71.28932757557064 - type: dot_precision value: 66.95249130938586 - type: dot_recall value: 76.22691292875989 - type: euclidean_accuracy value: 87.35173153722357 - type: euclidean_ap value: 77.96520460741593 - type: euclidean_f1 value: 71.32470733210104 - type: euclidean_precision value: 66.91329479768785 - type: euclidean_recall value: 76.35883905013192 - type: manhattan_accuracy value: 87.25636287774931 - type: manhattan_ap value: 77.77752485611796 - type: manhattan_f1 value: 71.18148599269183 - type: manhattan_precision value: 66.10859728506787 - type: manhattan_recall value: 77.0976253298153 - type: max_accuracy value: 87.37557370209214 - type: max_ap value: 77.96520460741593 - type: max_f1 value: 71.32470733210104 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.38176737687739 - type: cos_sim_ap value: 86.58811861657401 - type: cos_sim_f1 value: 79.09430644097604 - type: cos_sim_precision value: 75.45085977911366 - type: cos_sim_recall value: 83.10748383122882 - type: dot_accuracy value: 89.38370784336554 - type: dot_ap value: 86.58840606004333 - type: dot_f1 value: 79.10179860068133 - type: dot_precision value: 75.44546153308643 - type: dot_recall value: 83.13058207576223 - type: euclidean_accuracy value: 89.38564830985369 - type: euclidean_ap value: 86.58820721061164 - type: euclidean_f1 value: 79.09070942235888 - type: euclidean_precision value: 75.38729937194697 - type: euclidean_recall value: 83.17677856482906 - type: manhattan_accuracy value: 89.40699344122326 - type: manhattan_ap value: 86.60631843011362 - type: manhattan_f1 value: 79.14949970570925 - type: manhattan_precision value: 75.78191039729502 - type: manhattan_recall value: 82.83030489682784 - type: max_accuracy value: 89.40699344122326 - type: max_ap value: 86.60631843011362 - type: max_f1 value: 79.14949970570925 - task: type: STS dataset: name: MTEB AFQMC type: C-MTEB/AFQMC config: default split: validation revision: b44c3b011063adb25877c13823db83bb193913c4 metrics: - type: cos_sim_pearson value: 65.58442135663871 - type: cos_sim_spearman value: 72.2538631361313 - type: euclidean_pearson value: 70.97255486607429 - type: euclidean_spearman value: 72.25374250228647 - type: manhattan_pearson value: 70.83250199989911 - type: manhattan_spearman value: 72.14819496536272 - task: type: STS dataset: name: MTEB ATEC type: C-MTEB/ATEC config: default split: test revision: 0f319b1142f28d00e055a6770f3f726ae9b7d865 metrics: - type: cos_sim_pearson value: 59.99478404929932 - type: cos_sim_spearman value: 62.61836216999812 - type: euclidean_pearson value: 66.86429811933593 - type: euclidean_spearman value: 62.6183520374191 - type: manhattan_pearson value: 66.8063778911633 - type: manhattan_spearman value: 62.569607573241115 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (zh) type: mteb/amazon_reviews_multi config: zh split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 53.98400000000001 - type: f1 value: 51.21447361350723 - task: type: STS dataset: name: MTEB BQ type: C-MTEB/BQ config: default split: test revision: e3dda5e115e487b39ec7e618c0c6a29137052a55 metrics: - type: cos_sim_pearson value: 79.11941660686553 - type: cos_sim_spearman value: 81.25029594540435 - type: euclidean_pearson value: 82.06973504238826 - type: euclidean_spearman value: 81.2501989488524 - type: manhattan_pearson value: 82.10094630392753 - type: manhattan_spearman value: 81.27987244392389 - task: type: Clustering dataset: name: MTEB CLSClusteringP2P type: C-MTEB/CLSClusteringP2P config: default split: test revision: 4b6227591c6c1a73bc76b1055f3b7f3588e72476 metrics: - type: v_measure value: 47.07270168705156 - task: type: Clustering dataset: name: MTEB CLSClusteringS2S type: C-MTEB/CLSClusteringS2S config: default split: test revision: e458b3f5414b62b7f9f83499ac1f5497ae2e869f metrics: - type: v_measure value: 45.98511703185043 - task: type: Reranking dataset: name: MTEB CMedQAv1 type: C-MTEB/CMedQAv1-reranking config: default split: test revision: 8d7f1e942507dac42dc58017c1a001c3717da7df metrics: - type: map value: 88.19895157194931 - type: mrr value: 90.21424603174603 - task: type: Reranking dataset: name: MTEB CMedQAv2 type: C-MTEB/CMedQAv2-reranking config: default split: test revision: 23d186750531a14a0357ca22cd92d712fd512ea0 metrics: - type: map value: 88.03317320980119 - type: mrr value: 89.9461507936508 - task: type: Retrieval dataset: name: MTEB CmedqaRetrieval type: C-MTEB/CmedqaRetrieval config: default split: dev revision: cd540c506dae1cf9e9a59c3e06f42030d54e7301 metrics: - type: map_at_1 value: 29.037000000000003 - type: map_at_10 value: 42.001 - type: map_at_100 value: 43.773 - type: map_at_1000 value: 43.878 - type: map_at_3 value: 37.637 - type: map_at_5 value: 40.034 - type: mrr_at_1 value: 43.136 - type: mrr_at_10 value: 51.158 - type: mrr_at_100 value: 52.083 - type: mrr_at_1000 value: 52.12 - type: mrr_at_3 value: 48.733 - type: mrr_at_5 value: 50.025 - type: ndcg_at_1 value: 43.136 - type: ndcg_at_10 value: 48.685 - type: ndcg_at_100 value: 55.513 - type: ndcg_at_1000 value: 57.242000000000004 - type: ndcg_at_3 value: 43.329 - type: ndcg_at_5 value: 45.438 - type: precision_at_1 value: 43.136 - type: precision_at_10 value: 10.56 - type: precision_at_100 value: 1.6129999999999998 - type: precision_at_1000 value: 0.184 - type: precision_at_3 value: 24.064 - type: precision_at_5 value: 17.269000000000002 - type: recall_at_1 value: 29.037000000000003 - type: recall_at_10 value: 59.245000000000005 - type: recall_at_100 value: 87.355 - type: recall_at_1000 value: 98.74000000000001 - type: recall_at_3 value: 42.99 - type: recall_at_5 value: 49.681999999999995 - task: type: PairClassification dataset: name: MTEB Cmnli type: C-MTEB/CMNLI config: default split: validation revision: 41bc36f332156f7adc9e38f53777c959b2ae9766 metrics: - type: cos_sim_accuracy value: 82.68190018039687 - type: cos_sim_ap value: 90.18017125327886 - type: cos_sim_f1 value: 83.64080906868193 - type: cos_sim_precision value: 79.7076890489303 - type: cos_sim_recall value: 87.98223053542202 - type: dot_accuracy value: 82.68190018039687 - type: dot_ap value: 90.18782350103646 - type: dot_f1 value: 83.64242087729039 - type: dot_precision value: 79.65313028764805 - type: dot_recall value: 88.05237315875614 - type: euclidean_accuracy value: 82.68190018039687 - type: euclidean_ap value: 90.1801957900632 - type: euclidean_f1 value: 83.63636363636364 - type: euclidean_precision value: 79.52772506852203 - type: euclidean_recall value: 88.19265840542437 - type: manhattan_accuracy value: 82.14070956103427 - type: manhattan_ap value: 89.96178420101427 - type: manhattan_f1 value: 83.21087838578791 - type: manhattan_precision value: 78.35605121850475 - type: manhattan_recall value: 88.70703764320785 - type: max_accuracy value: 82.68190018039687 - type: max_ap value: 90.18782350103646 - type: max_f1 value: 83.64242087729039 - task: type: Retrieval dataset: name: MTEB CovidRetrieval type: C-MTEB/CovidRetrieval config: default split: dev revision: 1271c7809071a13532e05f25fb53511ffce77117 metrics: - type: map_at_1 value: 72.234 - type: map_at_10 value: 80.10000000000001 - type: map_at_100 value: 80.36 - type: map_at_1000 value: 80.363 - type: map_at_3 value: 78.315 - type: map_at_5 value: 79.607 - type: mrr_at_1 value: 72.392 - type: mrr_at_10 value: 80.117 - type: mrr_at_100 value: 80.36999999999999 - type: mrr_at_1000 value: 80.373 - type: mrr_at_3 value: 78.469 - type: mrr_at_5 value: 79.633 - type: ndcg_at_1 value: 72.392 - type: ndcg_at_10 value: 83.651 - type: ndcg_at_100 value: 84.749 - type: ndcg_at_1000 value: 84.83000000000001 - type: ndcg_at_3 value: 80.253 - type: ndcg_at_5 value: 82.485 - type: precision_at_1 value: 72.392 - type: precision_at_10 value: 9.557 - type: precision_at_100 value: 1.004 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 28.732000000000003 - type: precision_at_5 value: 18.377 - type: recall_at_1 value: 72.234 - type: recall_at_10 value: 94.573 - type: recall_at_100 value: 99.368 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 85.669 - type: recall_at_5 value: 91.01700000000001 - task: type: Retrieval dataset: name: MTEB DuRetrieval type: C-MTEB/DuRetrieval config: default split: dev revision: a1a333e290fe30b10f3f56498e3a0d911a693ced metrics: - type: map_at_1 value: 26.173999999999996 - type: map_at_10 value: 80.04 - type: map_at_100 value: 82.94500000000001 - type: map_at_1000 value: 82.98100000000001 - type: map_at_3 value: 55.562999999999995 - type: map_at_5 value: 69.89800000000001 - type: mrr_at_1 value: 89.5 - type: mrr_at_10 value: 92.996 - type: mrr_at_100 value: 93.06400000000001 - type: mrr_at_1000 value: 93.065 - type: mrr_at_3 value: 92.658 - type: mrr_at_5 value: 92.84599999999999 - type: ndcg_at_1 value: 89.5 - type: ndcg_at_10 value: 87.443 - type: ndcg_at_100 value: 90.253 - type: ndcg_at_1000 value: 90.549 - type: ndcg_at_3 value: 85.874 - type: ndcg_at_5 value: 84.842 - type: precision_at_1 value: 89.5 - type: precision_at_10 value: 41.805 - type: precision_at_100 value: 4.827 - type: precision_at_1000 value: 0.49 - type: precision_at_3 value: 76.85 - type: precision_at_5 value: 64.8 - type: recall_at_1 value: 26.173999999999996 - type: recall_at_10 value: 89.101 - type: recall_at_100 value: 98.08099999999999 - type: recall_at_1000 value: 99.529 - type: recall_at_3 value: 57.902 - type: recall_at_5 value: 74.602 - task: type: Retrieval dataset: name: MTEB EcomRetrieval type: C-MTEB/EcomRetrieval config: default split: dev revision: 687de13dc7294d6fd9be10c6945f9e8fec8166b9 metrics: - type: map_at_1 value: 56.10000000000001 - type: map_at_10 value: 66.15299999999999 - type: map_at_100 value: 66.625 - type: map_at_1000 value: 66.636 - type: map_at_3 value: 63.632999999999996 - type: map_at_5 value: 65.293 - type: mrr_at_1 value: 56.10000000000001 - type: mrr_at_10 value: 66.15299999999999 - type: mrr_at_100 value: 66.625 - type: mrr_at_1000 value: 66.636 - type: mrr_at_3 value: 63.632999999999996 - type: mrr_at_5 value: 65.293 - type: ndcg_at_1 value: 56.10000000000001 - type: ndcg_at_10 value: 71.146 - type: ndcg_at_100 value: 73.27799999999999 - type: ndcg_at_1000 value: 73.529 - type: ndcg_at_3 value: 66.09 - type: ndcg_at_5 value: 69.08999999999999 - type: precision_at_1 value: 56.10000000000001 - type: precision_at_10 value: 8.68 - type: precision_at_100 value: 0.964 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 24.4 - type: precision_at_5 value: 16.1 - type: recall_at_1 value: 56.10000000000001 - type: recall_at_10 value: 86.8 - type: recall_at_100 value: 96.39999999999999 - type: recall_at_1000 value: 98.3 - type: recall_at_3 value: 73.2 - type: recall_at_5 value: 80.5 - task: type: Classification dataset: name: MTEB IFlyTek type: C-MTEB/IFlyTek-classification config: default split: validation revision: 421605374b29664c5fc098418fe20ada9bd55f8a metrics: - type: accuracy value: 54.52096960369373 - type: f1 value: 40.930845295808695 - task: type: Classification dataset: name: MTEB JDReview type: C-MTEB/JDReview-classification config: default split: test revision: b7c64bd89eb87f8ded463478346f76731f07bf8b metrics: - type: accuracy value: 86.51031894934334 - type: ap value: 55.9516014323483 - type: f1 value: 81.54813679326381 - task: type: STS dataset: name: MTEB LCQMC type: C-MTEB/LCQMC config: default split: test revision: 17f9b096f80380fce5ed12a9be8be7784b337daf metrics: - type: cos_sim_pearson value: 69.67437838574276 - type: cos_sim_spearman value: 73.81314174653045 - type: euclidean_pearson value: 72.63430276680275 - type: euclidean_spearman value: 73.81358736777001 - type: manhattan_pearson value: 72.58743833842829 - type: manhattan_spearman value: 73.7590419009179 - task: type: Reranking dataset: name: MTEB MMarcoReranking type: C-MTEB/Mmarco-reranking config: default split: dev revision: None metrics: - type: map value: 31.648613483640254 - type: mrr value: 30.37420634920635 - task: type: Retrieval dataset: name: MTEB MMarcoRetrieval type: C-MTEB/MMarcoRetrieval config: default split: dev revision: 539bbde593d947e2a124ba72651aafc09eb33fc2 metrics: - type: map_at_1 value: 73.28099999999999 - type: map_at_10 value: 81.977 - type: map_at_100 value: 82.222 - type: map_at_1000 value: 82.22699999999999 - type: map_at_3 value: 80.441 - type: map_at_5 value: 81.46600000000001 - type: mrr_at_1 value: 75.673 - type: mrr_at_10 value: 82.41000000000001 - type: mrr_at_100 value: 82.616 - type: mrr_at_1000 value: 82.621 - type: mrr_at_3 value: 81.094 - type: mrr_at_5 value: 81.962 - type: ndcg_at_1 value: 75.673 - type: ndcg_at_10 value: 85.15599999999999 - type: ndcg_at_100 value: 86.151 - type: ndcg_at_1000 value: 86.26899999999999 - type: ndcg_at_3 value: 82.304 - type: ndcg_at_5 value: 84.009 - type: precision_at_1 value: 75.673 - type: precision_at_10 value: 10.042 - type: precision_at_100 value: 1.052 - type: precision_at_1000 value: 0.106 - type: precision_at_3 value: 30.673000000000002 - type: precision_at_5 value: 19.326999999999998 - type: recall_at_1 value: 73.28099999999999 - type: recall_at_10 value: 94.446 - type: recall_at_100 value: 98.737 - type: recall_at_1000 value: 99.649 - type: recall_at_3 value: 86.984 - type: recall_at_5 value: 91.024 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (zh-CN) type: mteb/amazon_massive_intent config: zh-CN split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 81.08607935440484 - type: f1 value: 78.24879986066307 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (zh-CN) type: mteb/amazon_massive_scenario config: zh-CN split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 86.05917955615332 - type: f1 value: 85.05279279434997 - task: type: Retrieval dataset: name: MTEB MedicalRetrieval type: C-MTEB/MedicalRetrieval config: default split: dev revision: 2039188fb5800a9803ba5048df7b76e6fb151fc6 metrics: - type: map_at_1 value: 56.2 - type: map_at_10 value: 62.57899999999999 - type: map_at_100 value: 63.154999999999994 - type: map_at_1000 value: 63.193 - type: map_at_3 value: 61.217 - type: map_at_5 value: 62.012 - type: mrr_at_1 value: 56.3 - type: mrr_at_10 value: 62.629000000000005 - type: mrr_at_100 value: 63.205999999999996 - type: mrr_at_1000 value: 63.244 - type: mrr_at_3 value: 61.267 - type: mrr_at_5 value: 62.062 - type: ndcg_at_1 value: 56.2 - type: ndcg_at_10 value: 65.592 - type: ndcg_at_100 value: 68.657 - type: ndcg_at_1000 value: 69.671 - type: ndcg_at_3 value: 62.808 - type: ndcg_at_5 value: 64.24499999999999 - type: precision_at_1 value: 56.2 - type: precision_at_10 value: 7.5 - type: precision_at_100 value: 0.899 - type: precision_at_1000 value: 0.098 - type: precision_at_3 value: 22.467000000000002 - type: precision_at_5 value: 14.180000000000001 - type: recall_at_1 value: 56.2 - type: recall_at_10 value: 75.0 - type: recall_at_100 value: 89.9 - type: recall_at_1000 value: 97.89999999999999 - type: recall_at_3 value: 67.4 - type: recall_at_5 value: 70.89999999999999 - task: type: Classification dataset: name: MTEB MultilingualSentiment type: C-MTEB/MultilingualSentiment-classification config: default split: validation revision: 46958b007a63fdbf239b7672c25d0bea67b5ea1a metrics: - type: accuracy value: 76.87666666666667 - type: f1 value: 76.7317686219665 - task: type: PairClassification dataset: name: MTEB Ocnli type: C-MTEB/OCNLI config: default split: validation revision: 66e76a618a34d6d565d5538088562851e6daa7ec metrics: - type: cos_sim_accuracy value: 79.64266377910124 - type: cos_sim_ap value: 84.78274442344829 - type: cos_sim_f1 value: 81.16947472745292 - type: cos_sim_precision value: 76.47058823529412 - type: cos_sim_recall value: 86.48363252375924 - type: dot_accuracy value: 79.64266377910124 - type: dot_ap value: 84.7851404063692 - type: dot_f1 value: 81.16947472745292 - type: dot_precision value: 76.47058823529412 - type: dot_recall value: 86.48363252375924 - type: euclidean_accuracy value: 79.64266377910124 - type: euclidean_ap value: 84.78068373762378 - type: euclidean_f1 value: 81.14794656110837 - type: euclidean_precision value: 76.35009310986965 - type: euclidean_recall value: 86.58922914466737 - type: manhattan_accuracy value: 79.48023822414727 - type: manhattan_ap value: 84.72928897427576 - type: manhattan_f1 value: 81.32084770823064 - type: manhattan_precision value: 76.24768946395564 - type: manhattan_recall value: 87.11721224920802 - type: max_accuracy value: 79.64266377910124 - type: max_ap value: 84.7851404063692 - type: max_f1 value: 81.32084770823064 - task: type: Classification dataset: name: MTEB OnlineShopping type: C-MTEB/OnlineShopping-classification config: default split: test revision: e610f2ebd179a8fda30ae534c3878750a96db120 metrics: - type: accuracy value: 94.3 - type: ap value: 92.8664032274438 - type: f1 value: 94.29311102997727 - task: type: STS dataset: name: MTEB PAWSX type: C-MTEB/PAWSX config: default split: test revision: 9c6a90e430ac22b5779fb019a23e820b11a8b5e1 metrics: - type: cos_sim_pearson value: 48.51392279882909 - type: cos_sim_spearman value: 54.06338895994974 - type: euclidean_pearson value: 52.58480559573412 - type: euclidean_spearman value: 54.06417276612201 - type: manhattan_pearson value: 52.69525121721343 - type: manhattan_spearman value: 54.048147455389675 - task: type: STS dataset: name: MTEB QBQTC type: C-MTEB/QBQTC config: default split: test revision: 790b0510dc52b1553e8c49f3d2afb48c0e5c48b7 metrics: - type: cos_sim_pearson value: 29.728387290757325 - type: cos_sim_spearman value: 31.366121633635284 - type: euclidean_pearson value: 29.14588368552961 - type: euclidean_spearman value: 31.36764411112844 - type: manhattan_pearson value: 29.63517350523121 - type: manhattan_spearman value: 31.94157020583762 - task: type: STS dataset: name: MTEB STS22 (zh) type: mteb/sts22-crosslingual-sts config: zh split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 63.64868296271406 - type: cos_sim_spearman value: 66.12800618164744 - type: euclidean_pearson value: 63.21405767340238 - type: euclidean_spearman value: 66.12786567790748 - type: manhattan_pearson value: 64.04300276525848 - type: manhattan_spearman value: 66.5066857145652 - task: type: STS dataset: name: MTEB STSB type: C-MTEB/STSB config: default split: test revision: 0cde68302b3541bb8b3c340dc0644b0b745b3dc0 metrics: - type: cos_sim_pearson value: 81.2302623912794 - type: cos_sim_spearman value: 81.16833673266562 - type: euclidean_pearson value: 79.47647843876024 - type: euclidean_spearman value: 81.16944349524972 - type: manhattan_pearson value: 79.84947238492208 - type: manhattan_spearman value: 81.64626599410026 - task: type: Reranking dataset: name: MTEB T2Reranking type: C-MTEB/T2Reranking config: default split: dev revision: 76631901a18387f85eaa53e5450019b87ad58ef9 metrics: - type: map value: 67.80129586475687 - type: mrr value: 77.77402311635554 - task: type: Retrieval dataset: name: MTEB T2Retrieval type: C-MTEB/T2Retrieval config: default split: dev revision: 8731a845f1bf500a4f111cf1070785c793d10e64 metrics: - type: map_at_1 value: 28.666999999999998 - type: map_at_10 value: 81.063 - type: map_at_100 value: 84.504 - type: map_at_1000 value: 84.552 - type: map_at_3 value: 56.897 - type: map_at_5 value: 70.073 - type: mrr_at_1 value: 92.087 - type: mrr_at_10 value: 94.132 - type: mrr_at_100 value: 94.19800000000001 - type: mrr_at_1000 value: 94.19999999999999 - type: mrr_at_3 value: 93.78999999999999 - type: mrr_at_5 value: 94.002 - type: ndcg_at_1 value: 92.087 - type: ndcg_at_10 value: 87.734 - type: ndcg_at_100 value: 90.736 - type: ndcg_at_1000 value: 91.184 - type: ndcg_at_3 value: 88.78 - type: ndcg_at_5 value: 87.676 - type: precision_at_1 value: 92.087 - type: precision_at_10 value: 43.46 - type: precision_at_100 value: 5.07 - type: precision_at_1000 value: 0.518 - type: precision_at_3 value: 77.49000000000001 - type: precision_at_5 value: 65.194 - type: recall_at_1 value: 28.666999999999998 - type: recall_at_10 value: 86.632 - type: recall_at_100 value: 96.646 - type: recall_at_1000 value: 98.917 - type: recall_at_3 value: 58.333999999999996 - type: recall_at_5 value: 72.974 - task: type: Classification dataset: name: MTEB TNews type: C-MTEB/TNews-classification config: default split: validation revision: 317f262bf1e6126357bbe89e875451e4b0938fe4 metrics: - type: accuracy value: 52.971999999999994 - type: f1 value: 50.2898280984929 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringP2P type: C-MTEB/ThuNewsClusteringP2P config: default split: test revision: 5798586b105c0434e4f0fe5e767abe619442cf93 metrics: - type: v_measure value: 86.0797948663824 - task: type: Clustering dataset: name: MTEB ThuNewsClusteringS2S type: C-MTEB/ThuNewsClusteringS2S config: default split: test revision: 8a8b2caeda43f39e13c4bc5bea0f8a667896e10d metrics: - type: v_measure value: 85.10759092255017 - task: type: Retrieval dataset: name: MTEB VideoRetrieval type: C-MTEB/VideoRetrieval config: default split: dev revision: 58c2597a5943a2ba48f4668c3b90d796283c5639 metrics: - type: map_at_1 value: 65.60000000000001 - type: map_at_10 value: 74.773 - type: map_at_100 value: 75.128 - type: map_at_1000 value: 75.136 - type: map_at_3 value: 73.05 - type: map_at_5 value: 74.13499999999999 - type: mrr_at_1 value: 65.60000000000001 - type: mrr_at_10 value: 74.773 - type: mrr_at_100 value: 75.128 - type: mrr_at_1000 value: 75.136 - type: mrr_at_3 value: 73.05 - type: mrr_at_5 value: 74.13499999999999 - type: ndcg_at_1 value: 65.60000000000001 - type: ndcg_at_10 value: 78.84299999999999 - type: ndcg_at_100 value: 80.40899999999999 - type: ndcg_at_1000 value: 80.57 - type: ndcg_at_3 value: 75.40599999999999 - type: ndcg_at_5 value: 77.351 - type: precision_at_1 value: 65.60000000000001 - type: precision_at_10 value: 9.139999999999999 - type: precision_at_100 value: 0.984 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 27.400000000000002 - type: precision_at_5 value: 17.380000000000003 - type: recall_at_1 value: 65.60000000000001 - type: recall_at_10 value: 91.4 - type: recall_at_100 value: 98.4 - type: recall_at_1000 value: 99.6 - type: recall_at_3 value: 82.19999999999999 - type: recall_at_5 value: 86.9 - task: type: Classification dataset: name: MTEB Waimai type: C-MTEB/waimai-classification config: default split: test revision: 339287def212450dcaa9df8c22bf93e9980c7023 metrics: - type: accuracy value: 89.47 - type: ap value: 75.59561751845389 - type: f1 value: 87.95207751382563 - task: type: Clustering dataset: name: MTEB AlloProfClusteringP2P type: lyon-nlp/alloprof config: default split: test revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b metrics: - type: v_measure value: 76.05592323841036 - type: v_measure value: 64.51718058866508 - task: type: Reranking dataset: name: MTEB AlloprofReranking type: lyon-nlp/mteb-fr-reranking-alloprof-s2p config: default split: test revision: 666fdacebe0291776e86f29345663dfaf80a0db9 metrics: - type: map value: 73.08278490943373 - type: mrr value: 74.66561454570449 - task: type: Retrieval dataset: name: MTEB AlloprofRetrieval type: lyon-nlp/alloprof config: default split: test revision: 392ba3f5bcc8c51f578786c1fc3dae648662cb9b metrics: - type: map_at_1 value: 38.912 - type: map_at_10 value: 52.437999999999995 - type: map_at_100 value: 53.38 - type: map_at_1000 value: 53.427 - type: map_at_3 value: 48.879 - type: map_at_5 value: 50.934000000000005 - type: mrr_at_1 value: 44.085 - type: mrr_at_10 value: 55.337 - type: mrr_at_100 value: 56.016999999999996 - type: mrr_at_1000 value: 56.043 - type: mrr_at_3 value: 52.55499999999999 - type: mrr_at_5 value: 54.20399999999999 - type: ndcg_at_1 value: 44.085 - type: ndcg_at_10 value: 58.876 - type: ndcg_at_100 value: 62.714000000000006 - type: ndcg_at_1000 value: 63.721000000000004 - type: ndcg_at_3 value: 52.444 - type: ndcg_at_5 value: 55.692 - type: precision_at_1 value: 44.085 - type: precision_at_10 value: 9.21 - type: precision_at_100 value: 1.164 - type: precision_at_1000 value: 0.128 - type: precision_at_3 value: 23.043 - type: precision_at_5 value: 15.898000000000001 - type: recall_at_1 value: 38.912 - type: recall_at_10 value: 75.577 - type: recall_at_100 value: 92.038 - type: recall_at_1000 value: 99.325 - type: recall_at_3 value: 58.592 - type: recall_at_5 value: 66.235 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (fr) type: mteb/amazon_reviews_multi config: fr split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 55.532000000000004 - type: f1 value: 52.5783943471605 - task: type: Retrieval dataset: name: MTEB BSARDRetrieval type: maastrichtlawtech/bsard config: default split: test revision: 5effa1b9b5fa3b0f9e12523e6e43e5f86a6e6d59 metrics: - type: map_at_1 value: 8.108 - type: map_at_10 value: 14.710999999999999 - type: map_at_100 value: 15.891 - type: map_at_1000 value: 15.983 - type: map_at_3 value: 12.237 - type: map_at_5 value: 13.679 - type: mrr_at_1 value: 8.108 - type: mrr_at_10 value: 14.710999999999999 - type: mrr_at_100 value: 15.891 - type: mrr_at_1000 value: 15.983 - type: mrr_at_3 value: 12.237 - type: mrr_at_5 value: 13.679 - type: ndcg_at_1 value: 8.108 - type: ndcg_at_10 value: 18.796 - type: ndcg_at_100 value: 25.098 - type: ndcg_at_1000 value: 27.951999999999998 - type: ndcg_at_3 value: 13.712 - type: ndcg_at_5 value: 16.309 - type: precision_at_1 value: 8.108 - type: precision_at_10 value: 3.198 - type: precision_at_100 value: 0.626 - type: precision_at_1000 value: 0.086 - type: precision_at_3 value: 6.006 - type: precision_at_5 value: 4.865 - type: recall_at_1 value: 8.108 - type: recall_at_10 value: 31.982 - type: recall_at_100 value: 62.613 - type: recall_at_1000 value: 86.036 - type: recall_at_3 value: 18.018 - type: recall_at_5 value: 24.324 - task: type: Clustering dataset: name: MTEB HALClusteringS2S type: lyon-nlp/clustering-hal-s2s config: default split: test revision: e06ebbbb123f8144bef1a5d18796f3dec9ae2915 metrics: - type: v_measure value: 30.833269778867116 - task: type: Clustering dataset: name: MTEB MLSUMClusteringP2P type: mlsum config: default split: test revision: b5d54f8f3b61ae17845046286940f03c6bc79bc7 metrics: - type: v_measure value: 50.0281928004713 - type: v_measure value: 43.699961510636534 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (fr) type: mteb/mtop_domain config: fr split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 96.68963357344191 - type: f1 value: 96.45175170820961 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (fr) type: mteb/mtop_intent config: fr split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 87.46946445349202 - type: f1 value: 65.79860440988624 - task: type: Classification dataset: name: MTEB MasakhaNEWSClassification (fra) type: masakhane/masakhanews config: fra split: test revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 metrics: - type: accuracy value: 82.60663507109005 - type: f1 value: 77.20462646604777 - task: type: Clustering dataset: name: MTEB MasakhaNEWSClusteringP2P (fra) type: masakhane/masakhanews config: fra split: test revision: 8ccc72e69e65f40c70e117d8b3c08306bb788b60 metrics: - type: v_measure value: 60.19311264967803 - type: v_measure value: 63.6235764409785 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (fr) type: mteb/amazon_massive_intent config: fr split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 81.65097511768661 - type: f1 value: 78.77796091490924 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (fr) type: mteb/amazon_massive_scenario config: fr split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 86.64425016812373 - type: f1 value: 85.4912728670017 - task: type: Retrieval dataset: name: MTEB MintakaRetrieval (fr) type: jinaai/mintakaqa config: fr split: test revision: efa78cc2f74bbcd21eff2261f9e13aebe40b814e metrics: - type: map_at_1 value: 35.913000000000004 - type: map_at_10 value: 48.147 - type: map_at_100 value: 48.91 - type: map_at_1000 value: 48.949 - type: map_at_3 value: 45.269999999999996 - type: map_at_5 value: 47.115 - type: mrr_at_1 value: 35.913000000000004 - type: mrr_at_10 value: 48.147 - type: mrr_at_100 value: 48.91 - type: mrr_at_1000 value: 48.949 - type: mrr_at_3 value: 45.269999999999996 - type: mrr_at_5 value: 47.115 - type: ndcg_at_1 value: 35.913000000000004 - type: ndcg_at_10 value: 54.03 - type: ndcg_at_100 value: 57.839 - type: ndcg_at_1000 value: 58.925000000000004 - type: ndcg_at_3 value: 48.217999999999996 - type: ndcg_at_5 value: 51.56699999999999 - type: precision_at_1 value: 35.913000000000004 - type: precision_at_10 value: 7.244000000000001 - type: precision_at_100 value: 0.9039999999999999 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 18.905 - type: precision_at_5 value: 12.981000000000002 - type: recall_at_1 value: 35.913000000000004 - type: recall_at_10 value: 72.441 - type: recall_at_100 value: 90.41799999999999 - type: recall_at_1000 value: 99.099 - type: recall_at_3 value: 56.716 - type: recall_at_5 value: 64.90599999999999 - task: type: PairClassification dataset: name: MTEB OpusparcusPC (fr) type: GEM/opusparcus config: fr split: test revision: 9e9b1f8ef51616073f47f306f7f47dd91663f86a metrics: - type: cos_sim_accuracy value: 99.90069513406156 - type: cos_sim_ap value: 100.0 - type: cos_sim_f1 value: 99.95032290114257 - type: cos_sim_precision value: 100.0 - type: cos_sim_recall value: 99.90069513406156 - type: dot_accuracy value: 99.90069513406156 - type: dot_ap value: 100.0 - type: dot_f1 value: 99.95032290114257 - type: dot_precision value: 100.0 - type: dot_recall value: 99.90069513406156 - type: euclidean_accuracy value: 99.90069513406156 - type: euclidean_ap value: 100.0 - type: euclidean_f1 value: 99.95032290114257 - type: euclidean_precision value: 100.0 - type: euclidean_recall value: 99.90069513406156 - type: manhattan_accuracy value: 99.90069513406156 - type: manhattan_ap value: 100.0 - type: manhattan_f1 value: 99.95032290114257 - type: manhattan_precision value: 100.0 - type: manhattan_recall value: 99.90069513406156 - type: max_accuracy value: 99.90069513406156 - type: max_ap value: 100.0 - type: max_f1 value: 99.95032290114257 - task: type: PairClassification dataset: name: MTEB PawsX (fr) type: paws-x config: fr split: test revision: 8a04d940a42cd40658986fdd8e3da561533a3646 metrics: - type: cos_sim_accuracy value: 75.25 - type: cos_sim_ap value: 80.86376001270014 - type: cos_sim_f1 value: 73.65945437441204 - type: cos_sim_precision value: 64.02289452166802 - type: cos_sim_recall value: 86.71096345514951 - type: dot_accuracy value: 75.25 - type: dot_ap value: 80.93686107633002 - type: dot_f1 value: 73.65945437441204 - type: dot_precision value: 64.02289452166802 - type: dot_recall value: 86.71096345514951 - type: euclidean_accuracy value: 75.25 - type: euclidean_ap value: 80.86379136218862 - type: euclidean_f1 value: 73.65945437441204 - type: euclidean_precision value: 64.02289452166802 - type: euclidean_recall value: 86.71096345514951 - type: manhattan_accuracy value: 75.3 - type: manhattan_ap value: 80.87826606097734 - type: manhattan_f1 value: 73.68421052631581 - type: manhattan_precision value: 64.0 - type: manhattan_recall value: 86.82170542635659 - type: max_accuracy value: 75.3 - type: max_ap value: 80.93686107633002 - type: max_f1 value: 73.68421052631581 - task: type: STS dataset: name: MTEB SICKFr type: Lajavaness/SICK-fr config: default split: test revision: e077ab4cf4774a1e36d86d593b150422fafd8e8a metrics: - type: cos_sim_pearson value: 81.42349425981143 - type: cos_sim_spearman value: 78.90454327031226 - type: euclidean_pearson value: 78.39086497435166 - type: euclidean_spearman value: 78.9046133980509 - type: manhattan_pearson value: 78.63743094286502 - type: manhattan_spearman value: 79.12136348449269 - task: type: STS dataset: name: MTEB STS22 (fr) type: mteb/sts22-crosslingual-sts config: fr split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 81.452697919749 - type: cos_sim_spearman value: 82.58116836039301 - type: euclidean_pearson value: 81.04038478932786 - type: euclidean_spearman value: 82.58116836039301 - type: manhattan_pearson value: 81.37075396187771 - type: manhattan_spearman value: 82.73678231355368 - task: type: STS dataset: name: MTEB STSBenchmarkMultilingualSTS (fr) type: stsb_multi_mt config: fr split: test revision: 93d57ef91790589e3ce9c365164337a8a78b7632 metrics: - type: cos_sim_pearson value: 85.7419764013806 - type: cos_sim_spearman value: 85.46085808849622 - type: euclidean_pearson value: 83.70449639870063 - type: euclidean_spearman value: 85.46159013076233 - type: manhattan_pearson value: 83.95259510313929 - type: manhattan_spearman value: 85.8029724659458 - task: type: Summarization dataset: name: MTEB SummEvalFr type: lyon-nlp/summarization-summeval-fr-p2p config: default split: test revision: b385812de6a9577b6f4d0f88c6a6e35395a94054 metrics: - type: cos_sim_pearson value: 32.61063271753325 - type: cos_sim_spearman value: 31.454589417353603 - type: dot_pearson value: 32.6106288643431 - type: dot_spearman value: 31.454589417353603 - task: type: Reranking dataset: name: MTEB SyntecReranking type: lyon-nlp/mteb-fr-reranking-syntec-s2p config: default split: test revision: b205c5084a0934ce8af14338bf03feb19499c84d metrics: - type: map value: 84.31666666666666 - type: mrr value: 84.31666666666666 - task: type: Retrieval dataset: name: MTEB SyntecRetrieval type: lyon-nlp/mteb-fr-retrieval-syntec-s2p config: default split: test revision: 77f7e271bf4a92b24fce5119f3486b583ca016ff metrics: - type: map_at_1 value: 63.0 - type: map_at_10 value: 73.471 - type: map_at_100 value: 73.87 - type: map_at_1000 value: 73.87 - type: map_at_3 value: 70.5 - type: map_at_5 value: 73.05 - type: mrr_at_1 value: 63.0 - type: mrr_at_10 value: 73.471 - type: mrr_at_100 value: 73.87 - type: mrr_at_1000 value: 73.87 - type: mrr_at_3 value: 70.5 - type: mrr_at_5 value: 73.05 - type: ndcg_at_1 value: 63.0 - type: ndcg_at_10 value: 78.255 - type: ndcg_at_100 value: 79.88 - type: ndcg_at_1000 value: 79.88 - type: ndcg_at_3 value: 72.702 - type: ndcg_at_5 value: 77.264 - type: precision_at_1 value: 63.0 - type: precision_at_10 value: 9.3 - type: precision_at_100 value: 1.0 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 26.333000000000002 - type: precision_at_5 value: 18.0 - type: recall_at_1 value: 63.0 - type: recall_at_10 value: 93.0 - type: recall_at_100 value: 100.0 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 79.0 - type: recall_at_5 value: 90.0 - task: type: Retrieval dataset: name: MTEB XPQARetrieval (fr) type: jinaai/xpqa config: fr split: test revision: c99d599f0a6ab9b85b065da6f9d94f9cf731679f metrics: - type: map_at_1 value: 40.338 - type: map_at_10 value: 61.927 - type: map_at_100 value: 63.361999999999995 - type: map_at_1000 value: 63.405 - type: map_at_3 value: 55.479 - type: map_at_5 value: 59.732 - type: mrr_at_1 value: 63.551 - type: mrr_at_10 value: 71.006 - type: mrr_at_100 value: 71.501 - type: mrr_at_1000 value: 71.509 - type: mrr_at_3 value: 69.07 - type: mrr_at_5 value: 70.165 - type: ndcg_at_1 value: 63.551 - type: ndcg_at_10 value: 68.297 - type: ndcg_at_100 value: 73.13199999999999 - type: ndcg_at_1000 value: 73.751 - type: ndcg_at_3 value: 62.999 - type: ndcg_at_5 value: 64.89 - type: precision_at_1 value: 63.551 - type: precision_at_10 value: 15.661 - type: precision_at_100 value: 1.9789999999999999 - type: precision_at_1000 value: 0.207 - type: precision_at_3 value: 38.273 - type: precision_at_5 value: 27.61 - type: recall_at_1 value: 40.338 - type: recall_at_10 value: 77.267 - type: recall_at_100 value: 95.892 - type: recall_at_1000 value: 99.75500000000001 - type: recall_at_3 value: 60.36 - type: recall_at_5 value: 68.825 - task: type: Clustering dataset: name: MTEB 8TagsClustering type: PL-MTEB/8tags-clustering config: default split: test revision: None metrics: - type: v_measure value: 51.36126303874126 - task: type: Classification dataset: name: MTEB AllegroReviews type: PL-MTEB/allegro-reviews config: default split: test revision: None metrics: - type: accuracy value: 67.13717693836979 - type: f1 value: 57.27609848003782 - task: type: Retrieval dataset: name: MTEB ArguAna-PL type: clarin-knext/arguana-pl config: default split: test revision: 63fc86750af76253e8c760fc9e534bbf24d260a2 metrics: - type: map_at_1 value: 35.276999999999994 - type: map_at_10 value: 51.086 - type: map_at_100 value: 51.788000000000004 - type: map_at_1000 value: 51.791 - type: map_at_3 value: 46.147 - type: map_at_5 value: 49.078 - type: mrr_at_1 value: 35.917 - type: mrr_at_10 value: 51.315999999999995 - type: mrr_at_100 value: 52.018 - type: mrr_at_1000 value: 52.022 - type: mrr_at_3 value: 46.349000000000004 - type: mrr_at_5 value: 49.297000000000004 - type: ndcg_at_1 value: 35.276999999999994 - type: ndcg_at_10 value: 59.870999999999995 - type: ndcg_at_100 value: 62.590999999999994 - type: ndcg_at_1000 value: 62.661 - type: ndcg_at_3 value: 49.745 - type: ndcg_at_5 value: 55.067 - type: precision_at_1 value: 35.276999999999994 - type: precision_at_10 value: 8.791 - type: precision_at_100 value: 0.991 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 20.057 - type: precision_at_5 value: 14.637 - type: recall_at_1 value: 35.276999999999994 - type: recall_at_10 value: 87.909 - type: recall_at_100 value: 99.14699999999999 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 60.171 - type: recall_at_5 value: 73.18599999999999 - task: type: Classification dataset: name: MTEB CBD type: PL-MTEB/cbd config: default split: test revision: None metrics: - type: accuracy value: 78.03000000000002 - type: ap value: 29.12548553897622 - type: f1 value: 66.54857118886073 - task: type: PairClassification dataset: name: MTEB CDSC-E type: PL-MTEB/cdsce-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 89.0 - type: cos_sim_ap value: 76.75437826834582 - type: cos_sim_f1 value: 66.4850136239782 - type: cos_sim_precision value: 68.92655367231639 - type: cos_sim_recall value: 64.21052631578948 - type: dot_accuracy value: 89.0 - type: dot_ap value: 76.75437826834582 - type: dot_f1 value: 66.4850136239782 - type: dot_precision value: 68.92655367231639 - type: dot_recall value: 64.21052631578948 - type: euclidean_accuracy value: 89.0 - type: euclidean_ap value: 76.75437826834582 - type: euclidean_f1 value: 66.4850136239782 - type: euclidean_precision value: 68.92655367231639 - type: euclidean_recall value: 64.21052631578948 - type: manhattan_accuracy value: 89.0 - type: manhattan_ap value: 76.66074220647083 - type: manhattan_f1 value: 66.47058823529412 - type: manhattan_precision value: 75.33333333333333 - type: manhattan_recall value: 59.473684210526315 - type: max_accuracy value: 89.0 - type: max_ap value: 76.75437826834582 - type: max_f1 value: 66.4850136239782 - task: type: STS dataset: name: MTEB CDSC-R type: PL-MTEB/cdscr-sts config: default split: test revision: None metrics: - type: cos_sim_pearson value: 93.12903172428328 - type: cos_sim_spearman value: 92.66381487060741 - type: euclidean_pearson value: 90.37278396708922 - type: euclidean_spearman value: 92.66381487060741 - type: manhattan_pearson value: 90.32503296540962 - type: manhattan_spearman value: 92.6902938354313 - task: type: Retrieval dataset: name: MTEB DBPedia-PL type: clarin-knext/dbpedia-pl config: default split: test revision: 76afe41d9af165cc40999fcaa92312b8b012064a metrics: - type: map_at_1 value: 8.83 - type: map_at_10 value: 18.326 - type: map_at_100 value: 26.496 - type: map_at_1000 value: 28.455000000000002 - type: map_at_3 value: 12.933 - type: map_at_5 value: 15.168000000000001 - type: mrr_at_1 value: 66.0 - type: mrr_at_10 value: 72.76700000000001 - type: mrr_at_100 value: 73.203 - type: mrr_at_1000 value: 73.219 - type: mrr_at_3 value: 71.458 - type: mrr_at_5 value: 72.246 - type: ndcg_at_1 value: 55.375 - type: ndcg_at_10 value: 41.3 - type: ndcg_at_100 value: 45.891 - type: ndcg_at_1000 value: 52.905 - type: ndcg_at_3 value: 46.472 - type: ndcg_at_5 value: 43.734 - type: precision_at_1 value: 66.0 - type: precision_at_10 value: 33.074999999999996 - type: precision_at_100 value: 11.094999999999999 - type: precision_at_1000 value: 2.374 - type: precision_at_3 value: 48.583 - type: precision_at_5 value: 42.0 - type: recall_at_1 value: 8.83 - type: recall_at_10 value: 22.587 - type: recall_at_100 value: 50.61600000000001 - type: recall_at_1000 value: 73.559 - type: recall_at_3 value: 13.688 - type: recall_at_5 value: 16.855 - task: type: Retrieval dataset: name: MTEB FiQA-PL type: clarin-knext/fiqa-pl config: default split: test revision: 2e535829717f8bf9dc829b7f911cc5bbd4e6608e metrics: - type: map_at_1 value: 20.587 - type: map_at_10 value: 33.095 - type: map_at_100 value: 35.24 - type: map_at_1000 value: 35.429 - type: map_at_3 value: 28.626 - type: map_at_5 value: 31.136999999999997 - type: mrr_at_1 value: 40.586 - type: mrr_at_10 value: 49.033 - type: mrr_at_100 value: 49.952999999999996 - type: mrr_at_1000 value: 49.992 - type: mrr_at_3 value: 46.553 - type: mrr_at_5 value: 48.035 - type: ndcg_at_1 value: 40.586 - type: ndcg_at_10 value: 41.046 - type: ndcg_at_100 value: 48.586 - type: ndcg_at_1000 value: 51.634 - type: ndcg_at_3 value: 36.773 - type: ndcg_at_5 value: 38.389 - type: precision_at_1 value: 40.586 - type: precision_at_10 value: 11.466 - type: precision_at_100 value: 1.909 - type: precision_at_1000 value: 0.245 - type: precision_at_3 value: 24.434 - type: precision_at_5 value: 18.426000000000002 - type: recall_at_1 value: 20.587 - type: recall_at_10 value: 47.986000000000004 - type: recall_at_100 value: 75.761 - type: recall_at_1000 value: 94.065 - type: recall_at_3 value: 33.339 - type: recall_at_5 value: 39.765 - task: type: Retrieval dataset: name: MTEB HotpotQA-PL type: clarin-knext/hotpotqa-pl config: default split: test revision: a0bd479ac97b4ccb5bd6ce320c415d0bb4beb907 metrics: - type: map_at_1 value: 40.878 - type: map_at_10 value: 58.775999999999996 - type: map_at_100 value: 59.632 - type: map_at_1000 value: 59.707 - type: map_at_3 value: 56.074 - type: map_at_5 value: 57.629 - type: mrr_at_1 value: 81.756 - type: mrr_at_10 value: 86.117 - type: mrr_at_100 value: 86.299 - type: mrr_at_1000 value: 86.30600000000001 - type: mrr_at_3 value: 85.345 - type: mrr_at_5 value: 85.832 - type: ndcg_at_1 value: 81.756 - type: ndcg_at_10 value: 67.608 - type: ndcg_at_100 value: 70.575 - type: ndcg_at_1000 value: 71.99600000000001 - type: ndcg_at_3 value: 63.723 - type: ndcg_at_5 value: 65.70700000000001 - type: precision_at_1 value: 81.756 - type: precision_at_10 value: 13.619 - type: precision_at_100 value: 1.5939999999999999 - type: precision_at_1000 value: 0.178 - type: precision_at_3 value: 39.604 - type: precision_at_5 value: 25.332 - type: recall_at_1 value: 40.878 - type: recall_at_10 value: 68.096 - type: recall_at_100 value: 79.696 - type: recall_at_1000 value: 89.082 - type: recall_at_3 value: 59.406000000000006 - type: recall_at_5 value: 63.329 - task: type: Retrieval dataset: name: MTEB MSMARCO-PL type: clarin-knext/msmarco-pl config: default split: test revision: 8634c07806d5cce3a6138e260e59b81760a0a640 metrics: - type: map_at_1 value: 2.1839999999999997 - type: map_at_10 value: 11.346 - type: map_at_100 value: 30.325000000000003 - type: map_at_1000 value: 37.806 - type: map_at_3 value: 4.842 - type: map_at_5 value: 6.891 - type: mrr_at_1 value: 86.047 - type: mrr_at_10 value: 89.14699999999999 - type: mrr_at_100 value: 89.46600000000001 - type: mrr_at_1000 value: 89.46600000000001 - type: mrr_at_3 value: 89.14699999999999 - type: mrr_at_5 value: 89.14699999999999 - type: ndcg_at_1 value: 67.829 - type: ndcg_at_10 value: 62.222 - type: ndcg_at_100 value: 55.337 - type: ndcg_at_1000 value: 64.076 - type: ndcg_at_3 value: 68.12700000000001 - type: ndcg_at_5 value: 64.987 - type: precision_at_1 value: 86.047 - type: precision_at_10 value: 69.535 - type: precision_at_100 value: 32.93 - type: precision_at_1000 value: 6.6049999999999995 - type: precision_at_3 value: 79.845 - type: precision_at_5 value: 75.349 - type: recall_at_1 value: 2.1839999999999997 - type: recall_at_10 value: 12.866 - type: recall_at_100 value: 43.505 - type: recall_at_1000 value: 72.366 - type: recall_at_3 value: 4.947 - type: recall_at_5 value: 7.192 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (pl) type: mteb/amazon_massive_intent config: pl split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 80.75319435104238 - type: f1 value: 77.58961444860606 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (pl) type: mteb/amazon_massive_scenario config: pl split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 85.54472091459313 - type: f1 value: 84.29498563572106 - task: type: Retrieval dataset: name: MTEB NFCorpus-PL type: clarin-knext/nfcorpus-pl config: default split: test revision: 9a6f9567fda928260afed2de480d79c98bf0bec0 metrics: - type: map_at_1 value: 4.367 - type: map_at_10 value: 10.38 - type: map_at_100 value: 13.516 - type: map_at_1000 value: 14.982000000000001 - type: map_at_3 value: 7.367 - type: map_at_5 value: 8.59 - type: mrr_at_1 value: 41.486000000000004 - type: mrr_at_10 value: 48.886 - type: mrr_at_100 value: 49.657000000000004 - type: mrr_at_1000 value: 49.713 - type: mrr_at_3 value: 46.904 - type: mrr_at_5 value: 48.065000000000005 - type: ndcg_at_1 value: 40.402 - type: ndcg_at_10 value: 30.885 - type: ndcg_at_100 value: 28.393 - type: ndcg_at_1000 value: 37.428 - type: ndcg_at_3 value: 35.394999999999996 - type: ndcg_at_5 value: 33.391999999999996 - type: precision_at_1 value: 41.486000000000004 - type: precision_at_10 value: 23.437 - type: precision_at_100 value: 7.638 - type: precision_at_1000 value: 2.0389999999999997 - type: precision_at_3 value: 32.817 - type: precision_at_5 value: 28.915999999999997 - type: recall_at_1 value: 4.367 - type: recall_at_10 value: 14.655000000000001 - type: recall_at_100 value: 29.665999999999997 - type: recall_at_1000 value: 62.073 - type: recall_at_3 value: 8.51 - type: recall_at_5 value: 10.689 - task: type: Retrieval dataset: name: MTEB NQ-PL type: clarin-knext/nq-pl config: default split: test revision: f171245712cf85dd4700b06bef18001578d0ca8d metrics: - type: map_at_1 value: 28.616000000000003 - type: map_at_10 value: 41.626000000000005 - type: map_at_100 value: 42.689 - type: map_at_1000 value: 42.733 - type: map_at_3 value: 37.729 - type: map_at_5 value: 39.879999999999995 - type: mrr_at_1 value: 32.068000000000005 - type: mrr_at_10 value: 44.029 - type: mrr_at_100 value: 44.87 - type: mrr_at_1000 value: 44.901 - type: mrr_at_3 value: 40.687 - type: mrr_at_5 value: 42.625 - type: ndcg_at_1 value: 32.068000000000005 - type: ndcg_at_10 value: 48.449999999999996 - type: ndcg_at_100 value: 53.13 - type: ndcg_at_1000 value: 54.186 - type: ndcg_at_3 value: 40.983999999999995 - type: ndcg_at_5 value: 44.628 - type: precision_at_1 value: 32.068000000000005 - type: precision_at_10 value: 7.9750000000000005 - type: precision_at_100 value: 1.061 - type: precision_at_1000 value: 0.116 - type: precision_at_3 value: 18.404999999999998 - type: precision_at_5 value: 13.111 - type: recall_at_1 value: 28.616000000000003 - type: recall_at_10 value: 66.956 - type: recall_at_100 value: 87.657 - type: recall_at_1000 value: 95.548 - type: recall_at_3 value: 47.453 - type: recall_at_5 value: 55.87800000000001 - task: type: Classification dataset: name: MTEB PAC type: laugustyniak/abusive-clauses-pl config: default split: test revision: None metrics: - type: accuracy value: 69.04141326382856 - type: ap value: 77.47589122111044 - type: f1 value: 66.6332277374775 - task: type: PairClassification dataset: name: MTEB PPC type: PL-MTEB/ppc-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 86.4 - type: cos_sim_ap value: 94.1044939667201 - type: cos_sim_f1 value: 88.78048780487805 - type: cos_sim_precision value: 87.22044728434504 - type: cos_sim_recall value: 90.39735099337747 - type: dot_accuracy value: 86.4 - type: dot_ap value: 94.1044939667201 - type: dot_f1 value: 88.78048780487805 - type: dot_precision value: 87.22044728434504 - type: dot_recall value: 90.39735099337747 - type: euclidean_accuracy value: 86.4 - type: euclidean_ap value: 94.1044939667201 - type: euclidean_f1 value: 88.78048780487805 - type: euclidean_precision value: 87.22044728434504 - type: euclidean_recall value: 90.39735099337747 - type: manhattan_accuracy value: 86.4 - type: manhattan_ap value: 94.11438365697387 - type: manhattan_f1 value: 88.77968877968877 - type: manhattan_precision value: 87.84440842787681 - type: manhattan_recall value: 89.73509933774835 - type: max_accuracy value: 86.4 - type: max_ap value: 94.11438365697387 - type: max_f1 value: 88.78048780487805 - task: type: PairClassification dataset: name: MTEB PSC type: PL-MTEB/psc-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 97.86641929499072 - type: cos_sim_ap value: 99.36904211868182 - type: cos_sim_f1 value: 96.56203288490283 - type: cos_sim_precision value: 94.72140762463343 - type: cos_sim_recall value: 98.47560975609755 - type: dot_accuracy value: 97.86641929499072 - type: dot_ap value: 99.36904211868183 - type: dot_f1 value: 96.56203288490283 - type: dot_precision value: 94.72140762463343 - type: dot_recall value: 98.47560975609755 - type: euclidean_accuracy value: 97.86641929499072 - type: euclidean_ap value: 99.36904211868183 - type: euclidean_f1 value: 96.56203288490283 - type: euclidean_precision value: 94.72140762463343 - type: euclidean_recall value: 98.47560975609755 - type: manhattan_accuracy value: 98.14471243042672 - type: manhattan_ap value: 99.43359540492416 - type: manhattan_f1 value: 96.98795180722892 - type: manhattan_precision value: 95.83333333333334 - type: manhattan_recall value: 98.17073170731707 - type: max_accuracy value: 98.14471243042672 - type: max_ap value: 99.43359540492416 - type: max_f1 value: 96.98795180722892 - task: type: Classification dataset: name: MTEB PolEmo2.0-IN type: PL-MTEB/polemo2_in config: default split: test revision: None metrics: - type: accuracy value: 89.39058171745152 - type: f1 value: 86.8552093529568 - task: type: Classification dataset: name: MTEB PolEmo2.0-OUT type: PL-MTEB/polemo2_out config: default split: test revision: None metrics: - type: accuracy value: 74.97975708502024 - type: f1 value: 58.73081628832407 - task: type: Retrieval dataset: name: MTEB Quora-PL type: clarin-knext/quora-pl config: default split: test revision: 0be27e93455051e531182b85e85e425aba12e9d4 metrics: - type: map_at_1 value: 64.917 - type: map_at_10 value: 78.74600000000001 - type: map_at_100 value: 79.501 - type: map_at_1000 value: 79.524 - type: map_at_3 value: 75.549 - type: map_at_5 value: 77.495 - type: mrr_at_1 value: 74.9 - type: mrr_at_10 value: 82.112 - type: mrr_at_100 value: 82.314 - type: mrr_at_1000 value: 82.317 - type: mrr_at_3 value: 80.745 - type: mrr_at_5 value: 81.607 - type: ndcg_at_1 value: 74.83999999999999 - type: ndcg_at_10 value: 83.214 - type: ndcg_at_100 value: 84.997 - type: ndcg_at_1000 value: 85.207 - type: ndcg_at_3 value: 79.547 - type: ndcg_at_5 value: 81.46600000000001 - type: precision_at_1 value: 74.83999999999999 - type: precision_at_10 value: 12.822 - type: precision_at_100 value: 1.506 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 34.903 - type: precision_at_5 value: 23.16 - type: recall_at_1 value: 64.917 - type: recall_at_10 value: 92.27199999999999 - type: recall_at_100 value: 98.715 - type: recall_at_1000 value: 99.854 - type: recall_at_3 value: 82.04599999999999 - type: recall_at_5 value: 87.2 - task: type: Retrieval dataset: name: MTEB SCIDOCS-PL type: clarin-knext/scidocs-pl config: default split: test revision: 45452b03f05560207ef19149545f168e596c9337 metrics: - type: map_at_1 value: 3.51 - type: map_at_10 value: 9.046999999999999 - type: map_at_100 value: 10.823 - type: map_at_1000 value: 11.144 - type: map_at_3 value: 6.257 - type: map_at_5 value: 7.648000000000001 - type: mrr_at_1 value: 17.299999999999997 - type: mrr_at_10 value: 27.419 - type: mrr_at_100 value: 28.618 - type: mrr_at_1000 value: 28.685 - type: mrr_at_3 value: 23.817 - type: mrr_at_5 value: 25.927 - type: ndcg_at_1 value: 17.299999999999997 - type: ndcg_at_10 value: 16.084 - type: ndcg_at_100 value: 23.729 - type: ndcg_at_1000 value: 29.476999999999997 - type: ndcg_at_3 value: 14.327000000000002 - type: ndcg_at_5 value: 13.017999999999999 - type: precision_at_1 value: 17.299999999999997 - type: precision_at_10 value: 8.63 - type: precision_at_100 value: 1.981 - type: precision_at_1000 value: 0.336 - type: precision_at_3 value: 13.4 - type: precision_at_5 value: 11.700000000000001 - type: recall_at_1 value: 3.51 - type: recall_at_10 value: 17.518 - type: recall_at_100 value: 40.275 - type: recall_at_1000 value: 68.203 - type: recall_at_3 value: 8.155 - type: recall_at_5 value: 11.875 - task: type: PairClassification dataset: name: MTEB SICK-E-PL type: PL-MTEB/sicke-pl-pairclassification config: default split: test revision: None metrics: - type: cos_sim_accuracy value: 86.30248675091724 - type: cos_sim_ap value: 83.6756734006714 - type: cos_sim_f1 value: 74.97367497367497 - type: cos_sim_precision value: 73.91003460207612 - type: cos_sim_recall value: 76.06837606837607 - type: dot_accuracy value: 86.30248675091724 - type: dot_ap value: 83.6756734006714 - type: dot_f1 value: 74.97367497367497 - type: dot_precision value: 73.91003460207612 - type: dot_recall value: 76.06837606837607 - type: euclidean_accuracy value: 86.30248675091724 - type: euclidean_ap value: 83.67566984333091 - type: euclidean_f1 value: 74.97367497367497 - type: euclidean_precision value: 73.91003460207612 - type: euclidean_recall value: 76.06837606837607 - type: manhattan_accuracy value: 86.28210354667753 - type: manhattan_ap value: 83.64216119130171 - type: manhattan_f1 value: 74.92152075340078 - type: manhattan_precision value: 73.4107997265892 - type: manhattan_recall value: 76.49572649572649 - type: max_accuracy value: 86.30248675091724 - type: max_ap value: 83.6756734006714 - type: max_f1 value: 74.97367497367497 - task: type: STS dataset: name: MTEB SICK-R-PL type: PL-MTEB/sickr-pl-sts config: default split: test revision: None metrics: - type: cos_sim_pearson value: 82.23295940859121 - type: cos_sim_spearman value: 78.89329160768719 - type: euclidean_pearson value: 79.56019107076818 - type: euclidean_spearman value: 78.89330209904084 - type: manhattan_pearson value: 79.76098513973719 - type: manhattan_spearman value: 79.05490162570123 - task: type: STS dataset: name: MTEB STS22 (pl) type: mteb/sts22-crosslingual-sts config: pl split: test revision: eea2b4fe26a775864c896887d910b76a8098ad3f metrics: - type: cos_sim_pearson value: 37.732606308062486 - type: cos_sim_spearman value: 41.01645667030284 - type: euclidean_pearson value: 26.61722556367085 - type: euclidean_spearman value: 41.01645667030284 - type: manhattan_pearson value: 26.60917378970807 - type: manhattan_spearman value: 41.51335727617614 - task: type: Retrieval dataset: name: MTEB SciFact-PL type: clarin-knext/scifact-pl config: default split: test revision: 47932a35f045ef8ed01ba82bf9ff67f6e109207e metrics: - type: map_at_1 value: 54.31700000000001 - type: map_at_10 value: 65.564 - type: map_at_100 value: 66.062 - type: map_at_1000 value: 66.08699999999999 - type: map_at_3 value: 62.592999999999996 - type: map_at_5 value: 63.888 - type: mrr_at_1 value: 56.99999999999999 - type: mrr_at_10 value: 66.412 - type: mrr_at_100 value: 66.85900000000001 - type: mrr_at_1000 value: 66.88 - type: mrr_at_3 value: 64.22200000000001 - type: mrr_at_5 value: 65.206 - type: ndcg_at_1 value: 56.99999999999999 - type: ndcg_at_10 value: 70.577 - type: ndcg_at_100 value: 72.879 - type: ndcg_at_1000 value: 73.45 - type: ndcg_at_3 value: 65.5 - type: ndcg_at_5 value: 67.278 - type: precision_at_1 value: 56.99999999999999 - type: precision_at_10 value: 9.667 - type: precision_at_100 value: 1.083 - type: precision_at_1000 value: 0.11299999999999999 - type: precision_at_3 value: 26.0 - type: precision_at_5 value: 16.933 - type: recall_at_1 value: 54.31700000000001 - type: recall_at_10 value: 85.056 - type: recall_at_100 value: 95.667 - type: recall_at_1000 value: 100.0 - type: recall_at_3 value: 71.0 - type: recall_at_5 value: 75.672 - task: type: Retrieval dataset: name: MTEB TRECCOVID-PL type: clarin-knext/trec-covid-pl config: default split: test revision: 81bcb408f33366c2a20ac54adafad1ae7e877fdd metrics: - type: map_at_1 value: 0.245 - type: map_at_10 value: 2.051 - type: map_at_100 value: 12.009 - type: map_at_1000 value: 27.448 - type: map_at_3 value: 0.721 - type: map_at_5 value: 1.13 - type: mrr_at_1 value: 88.0 - type: mrr_at_10 value: 93.0 - type: mrr_at_100 value: 93.0 - type: mrr_at_1000 value: 93.0 - type: mrr_at_3 value: 93.0 - type: mrr_at_5 value: 93.0 - type: ndcg_at_1 value: 85.0 - type: ndcg_at_10 value: 80.303 - type: ndcg_at_100 value: 61.23499999999999 - type: ndcg_at_1000 value: 52.978 - type: ndcg_at_3 value: 84.419 - type: ndcg_at_5 value: 82.976 - type: precision_at_1 value: 88.0 - type: precision_at_10 value: 83.39999999999999 - type: precision_at_100 value: 61.96 - type: precision_at_1000 value: 22.648 - type: precision_at_3 value: 89.333 - type: precision_at_5 value: 87.2 - type: recall_at_1 value: 0.245 - type: recall_at_10 value: 2.193 - type: recall_at_100 value: 14.938 - type: recall_at_1000 value: 48.563 - type: recall_at_3 value: 0.738 - type: recall_at_5 value: 1.173 --- # RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF This model was converted to GGUF format from [`Alibaba-NLP/gte-Qwen2-7B-instruct`](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/Alibaba-NLP/gte-Qwen2-7B-instruct) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo RcINS/gte-Qwen2-7B-instruct-Q6_K-GGUF --hf-file gte-qwen2-7b-instruct-q6_k.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
RomainDarous/large_directFourEpoch_additivePooling_noisedInit_mistranslationModel
RomainDarous
sentence-similarity
[ "sentence-transformers", "safetensors", "xlm-roberta", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:4460010", "loss:CoSENTLoss", "dataset:RomainDarous/corrupted_os_by_language", "arxiv:1908.10084", "base_model:RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel", "base_model:finetune:RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2025-02-28T19:24:36Z
2025-02-28T19:25:16+00:00
23
0
--- base_model: RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel datasets: - RomainDarous/corrupted_os_by_language library_name: sentence-transformers metrics: - pearson_cosine - spearman_cosine pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:4460010 - loss:CoSENTLoss widget: - source_sentence: Malformed target specific variable definition sentences: - Hedefe özgü değişken tanımı bozuk - Kan alle data in die gids lees - "слава Украине! героям слава!\uFEFF" - source_sentence: Can't write an inode bitmap sentences: - Skontrolujte stav aktualizácií alebo to skúste znova neskôr. - Malsukcesis skribi i nodan bitmapon - Zastępuje wersję GL obsługiwaną przez sterownik - source_sentence: Optimize soft proofing color transformations sentences: - 'arkadaslar biz artik her an kirmizi kart yiyecek,bencil,pas yapamayan,isabetsiz orta yapani istemiyoruz. sozde efsaneniz bu sezon Besiktasa en cok zarar verenlerden biriydi. kendini dusunmeden once Besiktasi dusunecek adam lazim bize. o yuzden #GoHomeQuaresma' - Yav bizim dedikodusunu yaptığımız insanın bile bi vizyonu var. Senin hakkında neden oturup konuşalım? - Ik ben een transgender. - source_sentence: 'Pass 1: Checking @is, @bs, and sizes' sentences: - Bu adam cidden kurabiye gibi ben bunu çayın yanında yerim - sagnat. errada. invisible. justificació. idioma - Wilt u echt de primaire sleutel verplaatsen? (j N) - source_sentence: Search for matching log entries sentences: - quem te lembra? caralho tô assustada aqui kkkkk - sendotasunik gabeko\ egoera bistaratuko den ala ez adierazten du - En aquest cas, hem d'incloure les imatges del contenidor )sr iov per a càrregues de treball de telco (per exemple, com a referència, es podrien obtenir des de valors de helm chart) model-index: - name: SentenceTransformer based on RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel results: - task: type: semantic-similarity name: Semantic Similarity dataset: name: sts eval type: sts-eval metrics: - type: pearson_cosine value: 0.980083415375982 name: Pearson Cosine - type: spearman_cosine value: 0.8655169963020204 name: Spearman Cosine - task: type: semantic-similarity name: Semantic Similarity dataset: name: sts test type: sts-test metrics: - type: pearson_cosine value: 0.9801740771365185 name: Pearson Cosine - type: spearman_cosine value: 0.8655815024093642 name: Spearman Cosine --- # SentenceTransformer based on RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel](https://huggingface.co/RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel) on the [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) dataset. It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel](https://huggingface.co/RomainDarous/large_directThreeEpoch_additivePooling_noisedInit_mistranslationModel) <!-- at revision a405f53cddaa76ddc32428235a5f6de58865ac6f --> - **Maximum Sequence Length:** 128 tokens - **Output Dimensionality:** 768 dimensions - **Similarity Function:** Cosine Similarity - **Training Dataset:** - [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Model Sources - **Documentation:** [Sentence Transformers Documentation](https://sbert.net) - **Repository:** [Sentence Transformers on GitHub](https://github.com/UKPLab/sentence-transformers) - **Hugging Face:** [Sentence Transformers on Hugging Face](https://huggingface.co/models?library=sentence-transformers) ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 128, 'do_lower_case': False}) with Transformer model: XLMRobertaModel (1): MultiHeadGeneralizedPooling( (P): ModuleList( (0-7): 8 x Linear(in_features=768, out_features=96, bias=True) ) (W1): ModuleList( (0-7): 8 x Linear(in_features=96, out_features=384, bias=True) ) (W2): ModuleList( (0-7): 8 x Linear(in_features=384, out_features=96, bias=True) ) ) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("RomainDarous/large_directFourEpoch_additivePooling_noisedInit_mistranslationModel") # Run inference sentences = [ 'Search for matching log entries', 'quem te lembra? caralho tô assustada aqui kkkkk', 'sendotasunik gabeko\\ egoera bistaratuko den ala ez adierazten du', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Semantic Similarity * Datasets: `sts-eval` and `sts-test` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | sts-eval | sts-test | |:--------------------|:-----------|:-----------| | pearson_cosine | 0.9801 | 0.9802 | | **spearman_cosine** | **0.8655** | **0.8656** | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### corrupted_open_os_by_language * Dataset: [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) at [9d25780](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language/tree/9d25780e2032b1e8f06af6a4ff55124d7a930c3c) * Size: 4,460,010 training samples * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | sentence1 | sentence2 | score | |:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:------------------------------------------------| | type | string | string | int | | details | <ul><li>min: 6 tokens</li><li>mean: 18.33 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 4 tokens</li><li>mean: 26.47 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>0: ~50.60%</li><li>1: ~49.40%</li></ul> | * Samples: | sentence1 | sentence2 | score | |:--------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------|:---------------| | <code>Check spelling. Print the document. Show completion window. General. Show help</code> | <code>Kontrolli õigekirja. присоединяюсь. </code> | <code>0</code> | | <code>EXIF not supported for this file format.</code> | <code>Šiam failo formatui EXIF nepalaikomas.</code> | <code>1</code> | | <code>This package includes the documentation for texlive everyhook</code> | <code>Paket ini menyertakan dokumentasi untuk texlive everyhook</code> | <code>1</code> | * Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "pairwise_cos_sim" } ``` ### Evaluation Dataset #### corrupted_open_os_by_language * Dataset: [corrupted_open_os_by_language](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language) at [9d25780](https://huggingface.co/datasets/RomainDarous/corrupted_os_by_language/tree/9d25780e2032b1e8f06af6a4ff55124d7a930c3c) * Size: 4,460,010 evaluation samples * Columns: <code>sentence1</code>, <code>sentence2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | sentence1 | sentence2 | score | |:--------|:-----------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------|:------------------------------------------------| | type | string | string | int | | details | <ul><li>min: 5 tokens</li><li>mean: 17.71 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>min: 3 tokens</li><li>mean: 26.95 tokens</li><li>max: 128 tokens</li></ul> | <ul><li>0: ~50.60%</li><li>1: ~49.40%</li></ul> | * Samples: | sentence1 | sentence2 | score | |:----------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:---------------| | <code>Could not identify the current seat.</code> | <code> 天天花着男人的钱还这这创造新词汇男权你可真牛批,你也就这一出了一问男权,就说是我是吧,到现在我也没听到你给我们讲的男权,你也就是在网上喷喷,现实走道都不敢探头自卑,你现实要把你女权的劲拿出来总低啥头,您老应该去国家教育局把男权加上是吧,你们女权天天说自己生活不好没地位,给你们地位了你们能干啥?用你们的女权打到全世界男性是吧,能相出男权这一词您老也是人才呀,是不是庆幸自己是个女的,活在自己想想的世界里不觉得孤单吗,假象有男权是吧,自己假象和男权还说自己不是田园女权,田园女权能连自己都骂说自己妈是驴爸是大鼎的也是奇葩呀,那我们国家大肆宣扬过你们这么田园女权吗,国家要的是女性人群自主自理,你们可好看看你们女权干的啥事,给你们女权地位高了,看看你们女权干的事n绿地集团高管怎么都不说呀,人家可是有钱有地位,也不是我们说三从四德洗衣做饭你们女权会吗?,那我问问你们女权干过啥惊天大事,还甩锅给孔子,还封建社会,那我问问你们女权在福利面前为啥说自己是女性呀不是社会主义社会吗不应该男女平等吗,天天自己也不知道是不是抱个手机天天欧巴欧巴,你家那位要是不陪你看一会就会问你是不是不爱我了是吧大姐,您老也就赚这白菜钱操心国家事,中国五千年的历史被您老一句否决,还嘲讽人家日本女性,好意思说自己不是女权,三从四德流传这么久到您这变成日本文化了,我就想问问男权您老是怎么想的,那你问孔子老人家呗为什么女人要三从四德,我说的是女权你干嘛自己对号入座,连中华人民传承的东西都不认跟我这谈男权,还男权您老给我举个例子呗,让我们男权听听都是h啥,这些不都是你们女权的标准吗?,还男权,您老醒醒吧这里是现实,不是你的公主世界,总觉得自己多么多么重要,地球没你是不能转了还是人类要灭亡呀,我真的想问一句你给我找一条男权的新闻,咋了我们男人不能提女权呗你老授权了呗,那我们谈论田园女权你老对号入座干嘛,天天过节要礼物,还嫌弃自己男朋友没有钱,我寻思你找个有钱人包养你呗,对了有钱人怎么可能看上你这种女权的呢,还要孩子跟女方姓我也没看见你没跟你妈姓呀,年年过节男人给你们送礼物你们女人给男人送过礼物吗?,一问我不是陪着他吗我对他说我爱你了这不是最好的礼物吗?,男人只要不送礼物就是不爱你们了呗,人家国际女权讲的男人能做的我们女人也能做,田园女权男人能做的我们女人为啥要做,还男权我笑了,以前结婚几头牛换个衣服原装的,现在几十万彩...</code> | <code>0</code> | | <code>Undoing Date and Time Adjustment</code> | <code>正在取消日期和时间调整</code> | <code>1</code> | | <code>Dependency package for gsl_2_6 gnu hpc</code> | <code>Pacotes de desenvolvimento do KDE</code> | <code>1</code> | * Loss: [<code>CoSENTLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosentloss) with these parameters: ```json { "scale": 20.0, "similarity_fct": "pairwise_cos_sim" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 64 - `num_train_epochs`: 1 - `warmup_ratio`: 0.1 #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 64 - `per_device_eval_batch_size`: 64 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `torch_empty_cache_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 1 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: False - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: False - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: None - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `include_for_metrics`: [] - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `eval_on_start`: False - `use_liger_kernel`: False - `eval_use_gather_object`: False - `average_tokens_across_devices`: False - `prompts`: None - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | corrupted open os by language loss | sts-eval_spearman_cosine | sts-test_spearman_cosine | |:-----:|:-----:|:-------------:|:----------------------------------:|:------------------------:|:------------------------:| | 1.0 | 55751 | 0.0845 | 0.2994 | 0.8655 | - | | -1 | -1 | - | - | - | 0.8656 | ### Framework Versions - Python: 3.10.13 - Sentence Transformers: 3.4.1 - Transformers: 4.48.2 - PyTorch: 2.1.2+cu121 - Accelerate: 1.3.0 - Datasets: 2.16.1 - Tokenizers: 0.21.0 ## Citation ### BibTeX #### Sentence Transformers ```bibtex @inproceedings{reimers-2019-sentence-bert, title = "Sentence-BERT: Sentence Embeddings using Siamese BERT-Networks", author = "Reimers, Nils and Gurevych, Iryna", booktitle = "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing", month = "11", year = "2019", publisher = "Association for Computational Linguistics", url = "https://arxiv.org/abs/1908.10084", } ``` #### CoSENTLoss ```bibtex @online{kexuefm-8847, title={CoSENT: A more efficient sentence vector scheme than Sentence-BERT}, author={Su Jianlin}, year={2022}, month={Jan}, url={https://kexue.fm/archives/8847}, } ``` <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
[ "CAS" ]
hhhhzy/roberta-pubhealth
hhhhzy
text-classification
[ "transformers", "pytorch", "roberta", "text-classification", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-05-30T20:37:57Z
2022-05-30T23:01:52+00:00
22
0
--- {} --- # Roberta-Pubhealth model This model is a fine-tuned version of [RoBERTa Base](https://huggingface.co/roberta-base) on the health_fact dataset. It achieves the following results on the evaluation set: - micro f1 (accuracy): 0.7137 - macro f1: 0.6056 - weighted f1: 0.7106 - samples predicted per second: 9.31 ## Dataset desctiption [PUBHEALTH](https://huggingface.co/datasets/health_fact)is a comprehensive dataset for explainable automated fact-checking of public health claims. Each instance in the PUBHEALTH dataset has an associated veracity label (true, false, unproven, mixture). Furthermore each instance in the dataset has an explanation text field. The explanation is a justification for which the claim has been assigned a particular veracity label. ## Training hyperparameters The model are trained with the following tuned config: - model: roberta base - batch size: 32 - learning rate: 5e-5 - number of epochs: 4 - warmup steps: 0
[ "PUBHEALTH" ]
seonghyeonye/flipped_3B
seonghyeonye
text2text-generation
[ "transformers", "pytorch", "t5", "text2text-generation", "en", "dataset:bigscience/P3", "arxiv:2210.02969", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2022-10-04T01:40:33Z
2022-10-19T08:38:17+00:00
22
3
--- datasets: - bigscience/P3 language: en license: apache-2.0 widget: - text: "input: <extra_id_0> The item was packaged in bubble wrap. <extra_id_1>\n\ \ - It was fragile.\n - It was small.\n output: It was fragile." --- **Official repository**: [seonghyeonye/Flipped-Learning](https://github.com/seonghyeonye/Flipped-Learning) # Model Description FLIPPED uses a unique meta-learning method to show zero-shot task generalization on classification natural language prompts, outperforming GPT-3 and T0-11B on many tasks with a 4x smaller scale. It is a series of encoder-decoder model trained on a numerous classification dataset. We show inputs and its corresponding outputs of each instances in each dataset to FLIPPED, and train it to generate its possible instruction. We add unlikelihood loss in order **not** to generate the instruction when given the same input, but a wrong output. To obtain FLIPPED, we fine-tune a T5 model in a given scale on a multitask mixture covering many different classification NLP tasks. # Intended uses You can use the models to perform inference on tasks by specifying your input-output NLP query in a "input: {input}\noutput: {output}" form , and the model will predict the instruction. For example, You can try *"input: <extra_id_0> this is the best cast iron skillet you will ever buy<extra_id_1>\noutput: Positive"* as an input, and the model will hopefully generate *"Title: Review:"*. # How to use Our overall explanation models along with ablations can be found in our [paper](https://arxiv.org/abs/2210.02969). We recommend using the [FLIPPED-11B](seonghyeonye/flipped_11B) checkpoint as it leads (on average) to the best performances on a variety of NLP tasks. |Model|Number of parameters| |-|-| |[Flipped_11B](https://huggingface.co/seonghyeonye/flipped_11B)|11 billion| |[Flipped_3B](https://huggingface.co/seonghyeonye/flipped_3B)|3 billion| Here is how to download the model in PyTorch: ```python import torch from transformers import T5Tokenizer, T5ForConditionalGeneration model = T5ForConditionalGeneration.from_pretrained("seonghyeonye/flipped_3B") tokenizer = T5Tokenizer.from_pretrained("seonghyeonye/flipped_3B") ``` If you want to use another checkpoint, please replace the path in `T5Tokenizer` and `T5ForConditionalGeneration`. We also provide a quick [Jupyter Notebook](https://github.com/seonghyeonye/Flipped-Learning/blob/master/flipped_inference.ipynb) where you can inference with our method. **Note: the model was trained with fp32 activations. As such, we highly discourage running inference with fp16.** # Training procedure FLIPPED models are based on [T5](https://huggingface.co/google/t5-v1_1-xl), a Transformer-based encoder-decoder language model pre-trained with a masked language modeling-style objective on [C4](https://huggingface.co/datasets/c4). At a high level, the input text along with output label is fed to the encoder and the instruction text is produced by the decoder. The model is fine-tuned to autoregressively generate the target. We also feed input text along with a wrong input, adding an unlikelihood loss in order not to make model produce the proper instruction in that case. Here are our training details. Training details: - Fine-tuning steps: 5'000 - Input sequence length: 512 - Target sequence length: 128 - Batch size: 240 - Optimizer: Adafactor - Learning rate: 5e-5 - Dropout: 0.1 - Sampling strategy: proportional to the number of examples in each dataset (we randomly sampled any dataset if it has over 500'000 examples so that it has at most 500'000 examples. Also, we randomly choose which instruction to generate for each training steps, so ideally each instruction appears *num_examples/num_templates* while training.) # Training data We trained different variants T0 with different mixtures of datasets. |Model|Training datasets| |--|--| |FLIPPED_11B|- Multiple-Choice QA: CommonsenseQA, DREAM, QUAIL, QuaRTz, Social IQA, WiQA, Cosmos, QASC, Quarel, SciQ<br>- Sentiment: Amazon, App Reviews, IMDB, Rotten Tomatoes, Yelp<br>- Topic Classification: AG News, DBPedia<br>- Paraphrase Identification: MRPC, PAWS, QQP| |FLIPPED_3B|Same as FLIPPED_11B| We only choose prompts examples that has output lables, which can be found on the dataset page. # Evaluation data We evaluate our models on following datasets: |Task category|Datasets| |-|-| |Natural language inference|ANLI(R1, R2, R3), CB, RTE| |Coreference resolution|WSC, Winogrande| |Word sense disambiguation|WiC| |Sentence completion|COPA, HellaSwag, Story Cloze| |QA|PIQA, ARC-Challenge, OpenbookQA| We also evaluate FLIPPED on a subset of [BIG-bench benchmark](https://github.com/google/BIG-bench): - Code description task - Conceptual combinations - Hindu knowledge json - Known unknowns - Language identification - Logic grid puzzle task - Logical deduction - Common misconceptions - Movie dialog same or different - Novel concepts - Strategyqa - Formal fallacies syllogisms negation - VitaminC - Winowhy multiple choice # Label generalization We evaluate the robustness of models on following datasets with changing the output label of the datasets. The substitute words can be found in our [paper](https://arxiv.org/abs/2210.02969). |Task category|(Datasets, Template name)| |-|-| |Unseen tasks|(WSC, does the pronoun refer to), (CB, can we infer), (RTE, MNLI crowdsource)| |Seen tasks|(IMDB, Reviewer Enjoyment Yes No), (PAWS, Meaning) | The template name we used can be found in the [promptsource template library](https://github.com/bigscience-workshop/promptsource/tree/main/promptsource/templates). # BibTeX entry and citation info ```bibtex @article{ye2022guess, title={Guess the Instruction! Flipped Learning Makes Language Models Stronger Zero-Shot Learners}, author={Ye, Seonghyeon and Kim, Doyoung and Jang, Joel and Shin, Joongbo and Seo, Minjoon}, journal={arXiv preprint arXiv:2210.02969}, year={2022} } ```
[ "SCIQ" ]
model-attribution-challenge/openai-gpt
model-attribution-challenge
text-generation
[ "transformers", "pytorch", "tf", "rust", "openai-gpt", "text-generation", "en", "arxiv:1705.11168", "arxiv:1803.02324", "arxiv:1910.09700", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2022-11-09T20:17:38Z
2022-07-22T07:57:33+00:00
22
1
--- language: en license: mit --- # OpenAI GPT ## Table of Contents - [Model Details](#model-details) - [How To Get Started With the Model](#how-to-get-started-with-the-model) - [Uses](#uses) - [Risks, Limitations and Biases](#risks-limitations-and-biases) - [Training](#training) - [Evaluation](#evaluation) - [Environmental Impact](#environmental-impact) - [Technical Specifications](#technical-specifications) - [Citation Information](#citation-information) - [Model Card Authors](#model-card-authors) ## Model Details **Model Description:** `openai-gpt` is a transformer-based language model created and released by OpenAI. The model is a causal (unidirectional) transformer pre-trained using language modeling on a large corpus with long range dependencies. - **Developed by:** Alec Radford, Karthik Narasimhan, Tim Salimans, Ilya Sutskever. See [associated research paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) and [GitHub repo](https://github.com/openai/finetune-transformer-lm) for model developers and contributors. - **Model Type:** Transformer-based language model - **Language(s):** English - **License:** [MIT License](https://github.com/openai/finetune-transformer-lm/blob/master/LICENSE) - **Related Models:** [GPT2](https://huggingface.co/gpt2), [GPT2-Medium](https://huggingface.co/gpt2-medium), [GPT2-Large](https://huggingface.co/gpt2-large) and [GPT2-XL](https://huggingface.co/gpt2-xl) - **Resources for more information:** - [Research Paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) - [OpenAI Blog Post](https://openai.com/blog/language-unsupervised/) - [GitHub Repo](https://github.com/openai/finetune-transformer-lm) - Test the full generation capabilities here: https://transformer.huggingface.co/doc/gpt ## How to Get Started with the Model Use the code below to get started with the model. You can use this model directly with a pipeline for text generation. Since the generation relies on some randomness, we set a seed for reproducibility: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='openai-gpt') >>> set_seed(42) >>> generator("Hello, I'm a language model,", max_length=30, num_return_sequences=5) [{'generated_text': "Hello, I'm a language model,'he said, when i was finished.'ah well,'said the man,'that's"}, {'generated_text': 'Hello, I\'m a language model, " she said. \n she reached the bottom of the shaft and leaned a little further out. it was'}, {'generated_text': 'Hello, I\'m a language model, " she laughed. " we call that a\'white girl.\'or as we are called by the'}, {'generated_text': 'Hello, I\'m a language model, " said mr pin. " an\'the ones with the funny hats don\'t. " the rest of'}, {'generated_text': 'Hello, I\'m a language model, was\'ere \'bout to do some more dancin \', " he said, then his voice lowered to'}] ``` Here is how to use this model in PyTorch: ```python from transformers import OpenAIGPTTokenizer, OpenAIGPTModel import torch tokenizer = OpenAIGPTTokenizer.from_pretrained("openai-gpt") model = OpenAIGPTModel.from_pretrained("openai-gpt") inputs = tokenizer("Hello, my dog is cute", return_tensors="pt") outputs = model(**inputs) last_hidden_states = outputs.last_hidden_state ``` and in TensorFlow: ```python from transformers import OpenAIGPTTokenizer, TFOpenAIGPTModel tokenizer = OpenAIGPTTokenizer.from_pretrained("openai-gpt") model = TFOpenAIGPTModel.from_pretrained("openai-gpt") inputs = tokenizer("Hello, my dog is cute", return_tensors="tf") outputs = model(inputs) last_hidden_states = outputs.last_hidden_state ``` ## Uses #### Direct Use This model can be used for language modeling tasks. #### Downstream Use Potential downstream uses of this model include tasks that leverage language models. In the [associated paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf), the model developers discuss evaluations of the model for tasks including natural language inference (NLI), question answering, semantic similarity, and text classification. #### Misuse and Out-of-scope Use The model was not trained to be factual or true representations of people or events, and therefore using the model to generate such content is out-of-scope for the abilities of this model. ## Risks, Limitations and Biases #### Biases **CONTENT WARNING: Readers should be aware that language generated by this model can be disturbing or offensive to some and can propagate historical and current stereotypes.** Significant research has explored bias and fairness issues with language models (see, e.g., [Sheng et al. (2021)](https://aclanthology.org/2021.acl-long.330.pdf) and [Bender et al. (2021)](https://dl.acm.org/doi/pdf/10.1145/3442188.3445922)). Predictions generated by this model can include disturbing and harmful stereotypes across protected classes; identity characteristics; and sensitive, social, and occupational groups. For example: ```python >>> from transformers import pipeline, set_seed >>> generator = pipeline('text-generation', model='openai-gpt') >>> set_seed(42) >>> generator("The man worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The man worked as a teacher for the college he'}, {'generated_text': 'The man worked as a janitor at the club.'}, {'generated_text': 'The man worked as a bodyguard in america. the'}, {'generated_text': 'The man worked as a clerk for one of the'}, {'generated_text': 'The man worked as a nurse, but there was'}] >>> set_seed(42) >>> generator("The woman worked as a", max_length=10, num_return_sequences=5) [{'generated_text': 'The woman worked as a medical intern but is a'}, {'generated_text': 'The woman worked as a midwife, i know that'}, {'generated_text': 'The woman worked as a prostitute in a sex club'}, {'generated_text': 'The woman worked as a secretary for one of the'}, {'generated_text': 'The woman worked as a nurse, but she had'}] ``` This bias may also affect fine-tuned versions of this model. Users (both direct and downstream) should be made aware of the risks, biases and limitations of the model. #### Risks and Limitations The model developers also wrote in a [blog post](https://openai.com/blog/language-unsupervised/) about risks and limitations of the model, including: > - **Compute Requirements:** Many previous approaches to NLP tasks train relatively small models on a single GPU from scratch. Our approach requires an expensive pre-training step - 1 month on 8 GPUs. Luckily, this only has to be done once and we’re releasing our model so others can avoid it. It is also a large model (in comparison to prior work) and consequently uses more compute and memory — we used a 37-layer (12 block) Transformer architecture, and we train on sequences of up to 512 tokens. Most experiments were conducted on 4 and 8 GPU systems. The model does fine-tune to new tasks very quickly which helps mitigate the additional resource requirements. > - **The limits and bias of learning about the world through text:** Books and text readily available on the internet do not contain complete or even accurate information about the world. Recent work ([Lucy and Gauthier, 2017](https://arxiv.org/abs/1705.11168)) has shown that certain kinds of information are difficult to learn via just text and other work ([Gururangan et al., 2018](https://arxiv.org/abs/1803.02324)) has shown that models learn and exploit biases in data distributions. > - **Still brittle generalization:** Although our approach improves performance across a broad range of tasks, current deep learning NLP models still exhibit surprising and counterintuitive behavior - especially when evaluated in a systematic, adversarial, or out-of-distribution way. Our approach is not immune to these issues, though we have observed some indications of progress. Our approach shows improved lexical robustness over previous purely neural approaches to textual entailment. On the dataset introduced in Glockner et al. (2018) our model achieves 83.75%, performing similarly to KIM, which incorporates external knowledge via WordNet. ## Training #### Training Data The model developers [write](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf): > We use the BooksCorpus dataset ([Zhu et al., 2015](https://www.cv-foundation.org/openaccess/content_iccv_2015/papers/Zhu_Aligning_Books_and_ICCV_2015_paper.pdf)) for training the language model. It contains over 7,000 unique unpublished books from a variety of genres including Adventure, Fantasy, and Romance. Crucially, it contains long stretches of contiguous text, which allows the generative model to learn to condition on long-range information. #### Training Procedure The model developers [write](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf): > Our model largely follows the original transformer work [62]. We trained a 12-layer decoder-only transformer with masked self-attention heads (768 dimensional states and 12 attention heads). For the position-wise feed-forward networks, we used 3072 dimensional inner states. We used the Adam optimization scheme [27] with a max learning rate of 2.5e-4. The learning rate was increased linearly from zero over the first 2000 updates and annealed to 0 using a cosine schedule. We train for 100 epochs on minibatches of 64 randomly sampled, contiguous sequences of 512 tokens. Since layernorm [2] is used extensively throughout the model, a simple weight initialization of N (0, 0.02) was sufficient. We used a bytepair encoding (BPE) vocabulary with 40,000 merges [53] and residual, embedding, and attention dropouts with a rate of 0.1 for regularization. We also employed a modified version of L2 regularization proposed in [37], with w = 0.01 on all non bias or gain weights. For the activation function, we used the Gaussian Error Linear Unit (GELU) [18]. We used learned position embeddings instead of the sinusoidal version proposed in the original work. We use the ftfy library2 to clean the raw text in BooksCorpus, standardize some punctuation and whitespace, and use the spaCy tokenizer. See the paper for further details and links to citations. ## Evaluation The following evaluation information is extracted from the [associated blog post](https://openai.com/blog/language-unsupervised/). See the [associated paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) for further details. #### Testing Data, Factors and Metrics The model developers report that the model was evaluated on the following tasks and datasets using the listed metrics: - **Task:** Textual Entailment - **Datasets:** [SNLI](https://huggingface.co/datasets/snli), [MNLI Matched](https://huggingface.co/datasets/glue), [MNLI Mismatched](https://huggingface.co/datasets/glue), [SciTail](https://huggingface.co/datasets/scitail), [QNLI](https://huggingface.co/datasets/glue), [RTE](https://huggingface.co/datasets/glue) - **Metrics:** Accuracy - **Task:** Semantic Similarity - **Datasets:** [STS-B](https://huggingface.co/datasets/glue), [QQP](https://huggingface.co/datasets/glue), [MRPC](https://huggingface.co/datasets/glue) - **Metrics:** Accuracy - **Task:** Reading Comprehension - **Datasets:** [RACE](https://huggingface.co/datasets/race) - **Metrics:** Accuracy - **Task:** Commonsense Reasoning - **Datasets:** [ROCStories](https://huggingface.co/datasets/story_cloze), [COPA](https://huggingface.co/datasets/xcopa) - **Metrics:** Accuracy - **Task:** Sentiment Analysis - **Datasets:** [SST-2](https://huggingface.co/datasets/glue) - **Metrics:** Accuracy - **Task:** Linguistic Acceptability - **Datasets:** [CoLA](https://huggingface.co/datasets/glue) - **Metrics:** Accuracy - **Task:** Multi Task Benchmark - **Datasets:** [GLUE](https://huggingface.co/datasets/glue) - **Metrics:** Accuracy #### Results The model achieves the following results without any fine-tuning (zero-shot): | Task | TE | TE | TE |TE | TE | TE | SS | SS | SS | RC | CR | CR | SA | LA | MTB | |:--------:|:--:|:----------:|:-------------:|:-----:|:----:|:---:|:---:|:---:|:--:|:----:|:--------:|:----:|:----:|:----:|:----:| | Dataset |SNLI|MNLI Matched|MNLI Mismatched|SciTail| QNLI | RTE |STS-B| QQP |MPRC|RACE |ROCStories|COPA | SST-2| CoLA | GLUE | | |89.9| 82.1 | 81.4 |88.3 | 88.1 | 56.0|82.0 | 70.3|82.3|59.0 | 86.5 | 78.6 | 91.3 | 45.4 | 72.8 | ## Environmental Impact The model developers [report that](https://openai.com/blog/language-unsupervised/): > The total compute used to train this model was 0.96 petaflop days (pfs-days). > 8 P600 GPU's * 30 days * 12 TFLOPS/GPU * 0.33 utilization = .96 pfs-days Carbon emissions can be estimated using the [Machine Learning Impact calculator](https://mlco2.github.io/impact#compute) presented in [Lacoste et al. (2019)](https://arxiv.org/abs/1910.09700). - **Hardware Type:** 8 P600 GPUs - **Hours used:** 720 hours (30 days) - **Cloud Provider:** Unknown - **Compute Region:** Unknown - **Carbon Emitted:** Unknown ## Technical Specifications See the [associated paper](https://cdn.openai.com/research-covers/language-unsupervised/language_understanding_paper.pdf) for details on the modeling architecture, objective, compute infrastructure, and training details. ## Citation Information ```bibtex @article{radford2018improving, title={Improving language understanding by generative pre-training}, author={Radford, Alec and Narasimhan, Karthik and Salimans, Tim and Sutskever, Ilya and others}, year={2018}, publisher={OpenAI} } ``` APA: *Radford, A., Narasimhan, K., Salimans, T., & Sutskever, I. (2018). Improving language understanding by generative pre-training.* ## Model Card Authors This model card was written by the Hugging Face team.
[ "SCITAIL" ]
BigSalmon/InformalToFormalLincoln99Paraphrase
BigSalmon
text-generation
[ "transformers", "pytorch", "tensorboard", "gpt2", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-05-23T02:54:21Z
2023-05-23T03:45:21+00:00
22
0
--- {} --- data: https://github.com/BigSalmon2/InformalToFormalDataset Text Generation Informal Formal ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln99Paraphrase") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln99Paraphrase") ``` ``` Demo: https://huggingface.co/spaces/BigSalmon/FormalInformalConciseWordy ``` ``` prompt = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln:""" input_ids = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate(input_ids=input_ids, max_length=10 + len(prompt), temperature=1.0, top_k=50, top_p=0.95, do_sample=True, num_return_sequences=5, early_stopping=True) for i in range(5): print(tokenizer.decode(outputs[i])) ``` Most likely outputs (Disclaimer: I highly recommend using this over just generating): ``` prompt = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln:""" text = tokenizer.encode(prompt) myinput, past_key_values = torch.tensor([text]), None myinput = myinput myinput= myinput.to(device) logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False) logits = logits[0,-1] probabilities = torch.nn.functional.softmax(logits) best_logits, best_indices = logits.topk(250) best_words = [tokenizer.decode([idx.item()]) for idx in best_indices] text.append(best_indices[0].item()) best_probabilities = probabilities[best_indices].tolist() words = [] print(best_words) ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` original: microsoft word's [MASK] pricing invites competition. Translated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition. *** original: the library’s quiet atmosphere encourages visitors to [blank] in their work. Translated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work. ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - nebraska - unicamerical legislature - different from federal house and senate text: featuring a unicameral legislature, nebraska's political system stands in stark contrast to the federal model, comprised of a house and senate. *** - penny has practically no value - should be taken out of circulation - just as other coins have been in us history - lost use - value not enough - to make environmental consequences worthy text: all but valueless, the penny should be retired. as with other coins in american history, it has become defunct. too minute to warrant the environmental consequences of its production, it has outlived its usefulness. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ``` ``` input: not loyal 1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ). *** input: ``` ``` first: ( was complicit in / was involved in ). antonym: ( was blameless / was not an accomplice to / had no hand in / was uninvolved in ). *** first: ( have no qualms about / see no issue with ). antonym: ( are deeply troubled by / harbor grave reservations about / have a visceral aversion to / take ( umbrage at / exception to ) / are wary of ). *** first: ( do not see eye to eye / disagree often ). antonym: ( are in sync / are united / have excellent rapport / are like-minded / are in step / are of one mind / are in lockstep / operate in perfect harmony / march in lockstep ). *** first: ``` ``` stiff with competition, law school {A} is the launching pad for countless careers, {B} is a crowded field, {C} ranks among the most sought-after professional degrees, {D} is a professional proving ground. *** languishing in viewership, saturday night live {A} is due for a creative renaissance, {B} is no longer a ratings juggernaut, {C} has been eclipsed by its imitators, {C} can still find its mojo. *** dubbed the "manhattan of the south," atlanta {A} is a bustling metropolis, {B} is known for its vibrant downtown, {C} is a city of rich history, {D} is the pride of georgia. *** embattled by scandal, harvard {A} is feeling the heat, {B} cannot escape the media glare, {C} is facing its most intense scrutiny yet, {D} is in the spotlight for all the wrong reasons. ``` Infill / Infilling / Masking / Phrase Masking (Works pretty decently actually, especially when you use logprobs code from above): ``` his contention [blank] by the evidence [sep] was refuted [answer] *** few sights are as [blank] new york city as the colorful, flashing signage of its bodegas [sep] synonymous with [answer] *** when rick won the lottery, all of his distant relatives [blank] his winnings [sep] clamored for [answer] *** the library’s quiet atmosphere encourages visitors to [blank] in their work [sep] immerse themselves [answer] *** the joy of sport is that no two games are alike. for every exhilarating experience, however, there is an interminable one. the national pastime, unfortunately, has a penchant for the latter. what begins as a summer evening at the ballpark can quickly devolve into a game of tedium. the primary culprit is the [blank] of play. from batters readjusting their gloves to fielders spitting on their mitts, the action is [blank] unnecessary interruptions. the sport's future is [blank] if these tendencies are not addressed [sep] plodding pace [answer] riddled with [answer] bleak [answer] *** microsoft word's [blank] pricing [blank] competition [sep] unconscionable [answer] invites [answer] *** ``` ``` original: microsoft word's [MASK] pricing invites competition. Translated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition. *** original: the library’s quiet atmosphere encourages visitors to [blank] in their work. Translated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work. ``` Backwards ``` Essay Intro (National Parks): text: tourists are at ease in the national parks, ( swept up in the beauty of their natural splendor ). *** Essay Intro (D.C. Statehood): washington, d.c. is a city of outsize significance, ( ground zero for the nation's political life / center stage for the nation's political machinations ). ``` ``` topic: the Golden State Warriors. characterization 1: the reigning kings of the NBA. characterization 2: possessed of a remarkable cohesion. characterization 3: helmed by superstar Stephen Curry. characterization 4: perched atop the league’s hierarchy. characterization 5: boasting a litany of hall-of-famers. *** topic: emojis. characterization 1: shorthand for a digital generation. characterization 2: more versatile than words. characterization 3: the latest frontier in language. characterization 4: a form of self-expression. characterization 5: quintessentially millennial. characterization 6: reflective of a tech-centric world. *** topic: ``` ``` regular: illinois went against the census' population-loss prediction by getting more residents. VBG: defying the census' prediction of population loss, illinois experienced growth. *** regular: microsoft word’s high pricing increases the likelihood of competition. VBG: extortionately priced, microsoft word is inviting competition. *** regular: ``` ``` source: badminton should be more popular in the US. QUERY: Based on the given topic, can you develop a story outline? target: (1) games played with racquets are popular, (2) just look at tennis and ping pong, (3) but badminton underappreciated, (4) fun, fast-paced, competitive, (5) needs to be marketed more text: the sporting arena is dominated by games that are played with racquets. tennis and ping pong, in particular, are immensely popular. somewhat curiously, however, badminton is absent from this pantheon. exciting, fast-paced, and competitive, it is an underappreciated pastime. all that it lacks is more effective marketing. *** source: movies in theaters should be free. QUERY: Based on the given topic, can you develop a story outline? target: (1) movies provide vital life lessons, (2) many venues charge admission, (3) those without much money text: the lessons that movies impart are far from trivial. the vast catalogue of cinematic classics is replete with inspiring sagas of friendship, bravery, and tenacity. it is regrettable, then, that admission to theaters is not free. in their current form, the doors of this most vital of institutions are closed to those who lack the means to pay. *** source: ``` ``` in the private sector, { transparency } is vital to the business’s credibility. the { disclosure of information } can be the difference between success and failure. *** the labor market is changing, with { remote work } now the norm. this { flexible employment } allows the individual to design their own schedule. *** the { cubicle } is the locus of countless grievances. many complain that the { enclosed workspace } restricts their freedom of movement. *** ``` ``` it would be natural to assume that americans, as a people whose ancestors { immigrated to this country }, would be sympathetic to those seeking to do likewise. question: what does “do likewise” mean in the above context? (a) make the same journey (b) share in the promise of the american dream (c) start anew in the land of opportunity (d) make landfall on the united states *** in the private sector, { transparency } is vital to the business’s credibility. this orientation can be the difference between success and failure. question: what does “this orientation” mean in the above context? (a) visible business practices (b) candor with the public (c) open, honest communication (d) culture of accountability ``` ``` example: suppose you are a teacher. further suppose you want to tell an accurate telling of history. then suppose a parent takes offense. they do so in the name of name of their kid. this happens a lot. text: educators' responsibility to remain true to the historical record often clashes with the parent's desire to shelter their child from uncomfortable realities. *** example: suppose you are a student at college. now suppose you have to buy textbooks. that is going to be worth hundreds of dollars. given how much you already spend on tuition, that is going to hard cost to bear. text: the exorbitant cost of textbooks, which often reaches hundreds of dollars, imposes a sizable financial burden on the already-strapped college student. ``` ``` <Prefix> the atlanta hawks may attribute <Prefix> <Suffix> trae young <Suffix> <Middle> their robust season to <Middle> *** <Prefix> the nobel prize in literature <Prefix> <Suffix> honor <Suffix> <Middle> is a singularly prestigious <Middle> ``` ``` accustomed to having its name uttered ______, harvard university is weathering a rare spell of reputational tumult (a) in reverential tones (b) with great affection (c) in adulatory fashion (d) in glowing terms ``` ``` clarify: international ( {working together} / cooperation ) is called for when ( {issue go beyond lots of borders} / an issue transcends borders / a given matter has transnational implications ). ``` ``` description: when someone thinks that their view is the only right one. synonyms: intolerant, opinionated, narrow-minded, insular, self-righteous. *** description: when you put something off. synonyms: shelve, defer, table, postpone. ``` ``` organic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea. rewrite phrases: meritocratic, viability, vision rewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability. ``` *Note* Of all the masking techniques, this one works the best. ``` <Prefix> the atlanta hawks may attribute <Prefix> <Suffix> trae young <Suffix> <Middle> their robust season to <Middle> *** <Prefix> the nobel prize in literature <Prefix> <Suffix> honor <Suffix> <Middle> is a singularly prestigious <Middle> ``` ``` essence: when someone's views are keeping within reasonable. refine: the senator's voting record is ( moderate / centrist / pragmatic / balanced / fair-minded / even-handed ). *** essence: when things are worked through in a petty way. refine: the propensity of the u.s. congress to settle every dispute by way of ( mudslinging / bickering / demagoguery / name-calling / finger-pointing / vilification ) is appalling. ``` ``` description: when someone thinks that their view is the only right one. synonyms: intolerant, opinionated, narrow-minded, insular, self-righteous. *** description: when you put something off. synonyms: shelve, defer, table, postpone. ``` ``` organic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea. rewrite phrases: meritocratic, viability, vision rewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability. ``` ``` music before bedtime [makes for being able to relax] -> is a recipe for relaxation. ``` ``` [people wanting entertainment love traveling new york city] -> travelers flock to new york city in droves, drawn to its iconic entertainment scene. [cannot blame them] -> one cannot fault them [broadway so fun] -> when it is home to such thrilling fare as Broadway. ``` ``` in their ( ‖ when you are rushing because you want to get there on time ‖ / haste to arrive punctually / mad dash to be timely ), morning commuters are too rushed to whip up their own meal. *** politicians prefer to author vague plans rather than ( ‖ when you can make a plan without many unknowns ‖ / actionable policies / concrete solutions ). ``` ``` Q: What is whistleblower protection? A: Whistleblower protection is a form of legal immunity granted to employees who expose the unethical practices of their employer. Q: Why are whistleblower protections important? A: Absent whistleblower protections, employees would be deterred from exposing their employer’s wrongdoing for fear of retribution. Q: Why would an employer engage in retribution? A: An employer who has acted unethically stands to suffer severe financial and reputational damage were their transgressions to become public. To safeguard themselves from these consequences, they might seek to dissuade employees from exposing their wrongdoing. ``` ``` original: the meritocratic nature of crowdfunding [MASK] into their vision's viability. infill: the meritocratic nature of crowdfunding [gives investors idea of how successful] -> ( offers entrepreneurs a window ) into their vision's viability. ``` ``` Leadership | Lecture 17: Worker Morale What Workers Look for in Companies: • Benefits o Tuition reimbursement o Paid parental leave o 401K matching o Profit sharing o Pension plans o Free meals • Social responsibility o Environmental stewardship o Charitable contributions o Diversity • Work-life balance o Telecommuting o Paid holidays and vacation o Casual dress • Growth opportunities • Job security • Competitive compensation • Recognition o Open-door policies o Whistleblower protection o Employee-of-the-month awards o Positive performance reviews o Bonuses ``` ``` description: business keywords: for-profit, fiduciary duty, monopolistic, bottom line, return on investment, short-term thinking, capital-intensive, self-interested, risk-taking, fiduciary duty, merger, speculation, profiteering, oversight, capitalism, diversification ``` ``` 3. In this task, you are given a company name and you need to find its industry. McDonalds -- Restaurant Facebook -- Social Network IKEA -- Furniture American Express -- Credit Services Nokia -- Telecom Nintendo -- Entertainment 4. In this task, you are given a Month and you need to convert it to its corresponding season April -- Spring December -- Winter July -- Summer October -- Fall February -- Winter 5. In this task, you are given a sentence with a missing word and you need to predict the correct word. Managers should set an _____ for their employees. -- example Some people spend more than four _____ in the gym. -- hours The police were on the _____ of arresting the suspect. -- verge They were looking for _____ on how to solve the problem. -- guidance What is the _____ of the coffee? -- price 6. In this task, you are given a paragraph and you need to reorder it to make it logical. It was first proposed in 1987. The total length of the bridge is 1,828 meters. The idea of a bridge connects Hong Kong to Macau. -- The idea of bridge connecting Hong Kong and Macau was first proposed in 1987. The total length of the bridge is 1,828 meters. It is a movie about a brave and noble policeman. The film was produced by Americans. They were Kevin Lima and Chris Buck. They are directors. The movie is called Tarzan. -- Produced by Americans Kevin Lima and Chris Buck, Tarzan is a movie about a brave and noble policeman. It was first discovered in the mountains of India. The active ingredients in this plant can stimulate hair growth. The plant is called "Hair Plus." -- First discovered in the mountains of India, Hair Plus is a plant whose active ingredients can stimulate hair growth. ``` ``` trivia: What is the population of South Korea? response: 51 million. *** trivia: What is the minimum voting age in the US? response: 18. *** trivia: What are the first ten amendments of the US constitution called? response: Bill of Rights. ``` ``` ideas: in modern-day america, it is customary for the commander-in-chief to conduct regular press conferences related keywords: transparency, check and balance, sacrosanct, public accountability, adversarial, unscripted, direct access, open government, watchdog, healthy democracy, institutional integrity, right to know, direct line of communication, behind closed doors, updates, track progress, instill confidence, reassure, humanize, leadership style, day-to-day, forthcoming, demystify, ask hard questions *** ideas: i know this one guy who retired so young, attesting to how careful they were with money. related keywords: money management, resourceful, penny-pinching, live below their means, frugal, financial discipline, financial independence, conservative, long-term vision, discretionary spending, deferred gratification, preparedness, self-control, cushion ``` ``` less specific: actors and musicians should ( support democracy ). clarifies: actors and musicians should ( wield their celebrity to amplify pro-democracy messaging / marshal their considerable influence in the service of the democratic cause ). *** less specific: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( be careful ). clarifies: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( master their desires / exercise self-restraint / embrace frugality / restrain their appetite for splendor ). ``` ``` dull: clean emotional heft: spotless, immaculate, pristine *** dull: hot emotional heft: scorching, searing, blistering *** dull: happy emotional heft: euphoric ``` ``` text: {guide: vividly describe the premise of the show "seinfield"} -> set in the heart of bustling new york city, the sitcom "seinfield" follows the everyday {restrict: term that implies they get into trouble but in a comical way} -> misadventures of four neurotic friends. on any given episode, one can find them quarreling over their favorite diner's latest menu change, haggling over the division of household expenses, or contriving a scheme to spy on the apartment's newest resident. mundane as their exploits may be, they never fail to elicit a hearty laugh. {guide: mention how larry david is responsible} -> behind the show's witty, incisive dialogue lies the sharp pen of larry david, who co-created the show with jerry seinfeld. {guide: mention how larry david came up with the most recognizable lines} -> it is his genius that conjured such instantly {restrict: term that imply everybody knows them} -> recognizable quips as "no soup for you!" and "not that there's anything wrong with that!". {guide: mention how humanity should revel in having such good comedians these days} -> as this list of laugh-out-loud lines attests, the world is fortunate to ( count such a sharp comedic mind among its ranks / have such a talented humorist in its midst / have such comedic talent gracing its airwaves ). ``` ``` system: i'm not sure i agree with your assessment of the situation. user: replace "assessment" with a more casual term. chatbot: ( take / read / view ). system: i've been feeling really busy lately, trying to balance work and family responsibilities. user: replace "busy" with a more intense word that conveys it is too much. chatbot: overwhelmed. system: the candidate's stance on immigration is quite nebulous, and it's difficult to discern exactly what they would do if elected. user: replace "nebulous" with a word that conveys a lack of detail. chatbot: ( vague / ambiguous ). ``` ``` user: exorbitant in price, scholarly journals unconscionably define the breadth of one's knowledge awareness by the contents of their wallet. [replace “knowledge awareness” with a more natural expression] chatbot: intellectual horizons. user: can you do another alternative to “intellectual horizons” that has more relation to “scholarly journals”? chatbot: academic enlightenment. ``` ``` key: calculate. syn: estimate, consider, weigh, number, count, apportion, proportion, investigate, reckon, rate, compute. ant: guess, conjecture, hit, chance, risk, stake, miscalculate. ``` ``` description: more forceful version of curious that is less forceful than nosy answer: inquisitive description: more forceful version of hopeful that is less forceful than overconfident answer: optimistic ```
[ "BEAR" ]
ICTNLP/bayling-13b-diff
ICTNLP
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "translation", "multilingual", "large language model", "instruction tuning", "zh", "en", "arxiv:2306.10968", "license:gpl-3.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-06-14T08:21:30Z
2023-07-06T13:52:12+00:00
22
12
--- language: - zh - en license: gpl-3.0 pipeline_tag: text-generation tags: - translation - multilingual - large language model - instruction tuning --- # BayLing: Bridging Cross-lingual Alignment and Instruction Following through Interactive Translation for Large Language Models **BayLing** (**百聆**, **bǎi líng**) is an instruction-following LLM equipped with advanced language alignment, showing superior capability in English/Chinese generation, instruction following and multi-turn interaction. BayLing can be effortlessly deployed on a consumer-grade GPU with 16GB of memory, and assists users with tasks such as translation, writing, creation, suggestion... **This model is the *weight-diff* version of BayLing-13B-v1.0.** [BayLing-13B-v1.1](https://huggingface.co/ICTNLP/bayling-13b-v1.1) has been released, **BayLing-13B-v1.1 is additionally injected with extensive Chinese knowledge** compared with this model. 👇 Learn more about BayLing: 💬 [**Demo**](http://nlp.ict.ac.cn/bayling/demo): Welcome to apply for a trial of BayLing's online demo (beta version). 📄 [**Paper**](https://arxiv.org/abs/2306.10968): A comprehensive research paper of BayLing. 🏠 [**Homepage**](http://nlp.ict.ac.cn/bayling): BayLing's homepage. You can discover more information and cases of BayLing here. ✍️ [**BayLing-80 Test Set**](https://github.com/ictnlp/BayLing/tree/main/data/BayLing-80): A human-annotated evaluation set comprising multi-turn instructions in both English and Chinese, can be used to evaluate the multilingual and multi-turn interaction capabilities of LLMs. 🤗 **Model**: The *weight-diff* version of [BayLing-7B](https://huggingface.co/ICTNLP/bayling-7b-diff) and [BayLing-13B](https://huggingface.co/ICTNLP/bayling-13b-diff), you can quickly get the parameters of BayLing through [apply_delta.py](https://github.com/ictnlp/BayLing/blob/main/apply_delta.py). The HF models of BayLing are anonymized version (exclude BayLing's name in its knowledge), in order to facilitate future LLMs to build upon BayLing. > BayLing is developed by [NLP Group](http://nlp.ict.ac.cn/) of [Institute of Computing Technology](http://www.ict.ac.cn/), [Chinese Academy of Sciences](https://www.cas.cn/) (ICT/CAS) > > BayLing is continuously optimizing 🆙 > If you have any suggestions, please contact `[email protected]`. Thanks for your support! **Refer to our [Github Repo](https://github.com/ictnlp/BayLing) for the detailed introduction to BayLing, including deploying BayLing, interacting with BayLing and BayLing's performance.** ## <a id="Limitations">Limitations</a> Despite demonstrating commendable performance in certain aspects, BayLing still exhibits several limitations. For instance, when faced with tasks involving factual knowledge, BayLing has the potential to generate inaccurate information. Moreover, it lacks proficiency in solving reasoning, mathematics, and coding tasks. Additionally, there is a risk of BayLing generating content that is harmful or biased in nature. BayLing is a large language model that, like any other language model, cannot guarantee the absolute accuracy of the generated content. **Note that this project does not assume any risks or responsibilities associated with data security, public opinion risks arising from open-source models and codes, or any risks and liabilities resulting from misleading, misusing, spreading, or improper use of the models.** ## <a id="License">License</a> Model weights (delta version) and the inference code are released under The GNU General Public License v3.0 (GPLv3). The online demo serves as a research preview and is exclusively intended for non-commercial usage, subject to the [Model License](https://github.com/facebookresearch/llama/blob/main/MODEL_CARD.md) of LLaMA, [Terms of Use](https://openai.com/policies/terms-of-use) of the data generated by OpenAI, and [Privacy Practices](https://chrome.google.com/webstore/detail/sharegpt-share-your-chatg/daiacboceoaocpibfodeljbdfacokfjb) of ShareGPT and [Data License](https://machinetranslate.org/wmt22) of WMT22. ## <a id="Acknowledgements">Acknowledgements</a> We would like to express our gratitude to all those who have contributed to BayLing. We extend special thanks to Ms. Xiaohong Wang for her valuable comments and suggestions on the use of InforSuperBahn MLOps, and for her organizational and resource support in providing computing resources and showcasing BayLing. We also acknowledge Xiaodong Liu for his pivotal role in the construction of the distributed system and overall coordination of the demo deployment. Furthermore, we appreciate the contribution of the development team from the Nanjing Institute of InforSuperBahn in maintaining the computing resources and creating the display interface for BayLing’s webpage and demo. ## <a id="Authors">Authors</a> | [Shaolei Zhang](https://zhangshaolei1998.github.io/) | [Qingkai Fang](https://fangqingkai.github.io/) | [Zhuocheng Zhang](https://nlp.ict.ac.cn/yjdw/xs/bsyjs/202210/t20221019_52678.html) | [Zhengrui Ma](https://nlp.ict.ac.cn/yjdw/xs/bsyjs/202210/t20221019_52675.html) | | [Yan Zhou](https://zhouyan19.github.io/zhouyan/) | [Langlin Huang](https://nlp.ict.ac.cn/yjdw/xs/ssyjs/202210/t20221019_52686.html) | [Mengyu Bu](https://bingo123122121.github.io/) | [Shangtong Gui](https://github.com/GhostofAdam) | | [Yunji Chen](http://novel.ict.ac.cn/ychen/) | [Xilin Chen](http://www.ict.cas.cn/sourcedb_2018_ict_cas/cn/jssrck/200909/t20090917_2496595.html) | [Yang Feng \*](https://people.ucas.edu.cn/~yangfeng?language=en) | ## <a id="Citation">Citation</a> If our work is helpful for you, please cite as: ``` @article{bayling, title={BayLing: Bridging Cross-lingual Alignment and Instruction Following through Interactive Translation for Large Language Models}, author={Shaolei Zhang and Qingkai Fang and Zhuocheng Zhang and Zhengrui Ma and Yan Zhou and Langlin Huang and Mengyu Bu and Shangtong Gui and Yunji Chen and Xilin Chen and Yang Feng}, journal={arXiv preprint arXiv:2306.10968}, year={2023}, url={https://arxiv.org/abs/2306.10968} } ```
[ "CAS" ]
IIC/xlm-roberta-large-meddocan
IIC
token-classification
[ "transformers", "pytorch", "xlm-roberta", "text-classification", "biomedical", "clinical", "spanish", "xlm-roberta-large", "token-classification", "es", "dataset:bigbio/meddocan", "license:mit", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-06-21T15:46:58Z
2023-06-21T15:50:46+00:00
22
0
--- datasets: - bigbio/meddocan language: es license: mit metrics: - f1 pipeline_tag: token-classification tags: - biomedical - clinical - spanish - xlm-roberta-large model-index: - name: IIC/xlm-roberta-large-meddocan results: - task: type: token-classification dataset: name: meddocan type: bigbio/meddocan split: test metrics: - type: f1 value: 0.978 name: f1 --- # xlm-roberta-large-meddocan This model is a finetuned version of xlm-roberta-large for the meddocan dataset used in a benchmark in the paper TODO. The model has a F1 of 0.978 Please refer to the original publication for more information TODO LINK ## Parameters used | parameter | Value | |-------------------------|:-----:| | batch size | 16 | | learning rate | 4e-05 | | classifier dropout | 0.2 | | warmup ratio | 0 | | warmup steps | 0 | | weight decay | 0 | | optimizer | AdamW | | epochs | 10 | | early stopping patience | 3 | ## BibTeX entry and citation info ```bibtex TODO ```
[ "MEDDOCAN" ]
DunnBC22/bert-base-cased-finetuned-ner-BC2GM-IOB
DunnBC22
token-classification
[ "transformers", "pytorch", "tensorboard", "bert", "token-classification", "generated_from_trainer", "NER", "en", "dataset:blurb", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-04T18:53:48Z
2023-08-02T02:25:57+00:00
22
1
--- datasets: - blurb language: - en license: apache-2.0 metrics: - seqeval pipeline_tag: token-classification tags: - generated_from_trainer - NER model-index: - name: bert-base-cased-finetuned-ner-BC2GM-IOB results: [] --- # bert-base-cased-finetuned-ner-BC2GM-IOB This model is a fine-tuned version of [bert-base-cased](https://huggingface.co/bert-base-cased). It achieves the following results on the evaluation set: - Loss: 0.0813 - Gene - Precision: 0.752111423914654 - Recall: 0.8025296442687747 - F1: 0.7765029830197338 - Number: 6325 - Overall - Precision: 0.7521 - Recall: 0.8025 - F1: 0.7765 - Accuracy: 0.9736 ## Model description For more information on how it was created, check out the following link: https://github.com/DunnBC22/NLP_Projects/blob/main/Token%20Classification/Monolingual/EMBO-BLURB/NER%20Project%20Using%20EMBO-BLURB%20Dataset.ipynb ## Intended uses & limitations This model is intended to demonstrate my ability to solve a complex problem using technology. ## Training and evaluation data Dataset Source: https://huggingface.co/datasets/EMBO/BLURB ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 16 - eval_batch_size: 16 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 3 ### Training results | Training Loss | Epoch | Step | Validation Loss | Gene Precision | Gene Recall | Gene F1 | Gene Number | Overall Precision | Overall Recall | Overall F1 | Overall Accuracy | |:-------------:|:-----:|:----:|:---------------:|:---------:|:---------:|:---------:|:---------:|:-----------------:|:--------------:|:------:|:------:| | 0.0882 | 1.0 | 786 | 0.0771 | 0.7383 | 0.7538 | 0.7460 | 6325 | 0.7383 | 0.7538 | 0.7460 | 0.9697 | | 0.0547 | 2.0 | 1572 | 0.0823 | 0.7617 | 0.7758 | 0.7687 | 6325 | 0.7617 | 0.7758 | 0.7687 | 0.9732 | | 0.0356 | 3.0 | 2358 | 0.0813 | 0.7521 | 0.8025 | 0.7765 | 6325 | 0.7521 | 0.8025 | 0.7765 | 0.9736 | *All values in the above chart are rounded to the nearest ten-thousandth. ### Framework versions - Transformers 4.28.1 - Pytorch 2.0.0 - Datasets 2.11.0 - Tokenizers 0.13.3
[ "BLURB" ]
BigSalmon/InformalToFormalLincoln104Paraphrase
BigSalmon
text-generation
[ "transformers", "pytorch", "tensorboard", "gpt2", "text-generation", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-07-14T01:28:43Z
2023-07-14T18:36:51+00:00
22
0
--- {} --- data: https://github.com/BigSalmon2/InformalToFormalDataset Text Generation Informal Formal ``` from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained("BigSalmon/InformalToFormalLincoln104Paraphrase") model = AutoModelForCausalLM.from_pretrained("BigSalmon/InformalToFormalLincoln104Paraphrase") ``` ``` Demo: https://huggingface.co/spaces/BigSalmon/FormalInformalConciseWordy ``` ``` prompt = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln:""" input_ids = tokenizer.encode(prompt, return_tensors='pt') outputs = model.generate(input_ids=input_ids, max_length=10 + len(prompt), temperature=1.0, top_k=50, top_p=0.95, do_sample=True, num_return_sequences=5, early_stopping=True) for i in range(5): print(tokenizer.decode(outputs[i])) ``` Most likely outputs (Disclaimer: I highly recommend using this over just generating): ``` prompt = """informal english: corn fields are all across illinois, visible once you leave chicago.\nTranslated into the Style of Abraham Lincoln:""" text = tokenizer.encode(prompt) myinput, past_key_values = torch.tensor([text]), None myinput = myinput myinput= myinput.to(device) logits, past_key_values = model(myinput, past_key_values = past_key_values, return_dict=False) logits = logits[0,-1] probabilities = torch.nn.functional.softmax(logits) best_logits, best_indices = logits.topk(250) best_words = [tokenizer.decode([idx.item()]) for idx in best_indices] text.append(best_indices[0].item()) best_probabilities = probabilities[best_indices].tolist() words = [] print(best_words) ``` ``` How To Make Prompt: informal english: i am very ready to do that just that. Translated into the Style of Abraham Lincoln: you can assure yourself of my readiness to work toward this end. Translated into the Style of Abraham Lincoln: please be assured that i am most ready to undertake this laborious task. *** informal english: space is huge and needs to be explored. Translated into the Style of Abraham Lincoln: space awaits traversal, a new world whose boundaries are endless. Translated into the Style of Abraham Lincoln: space is a ( limitless / boundless ) expanse, a vast virgin domain awaiting exploration. *** informal english: corn fields are all across illinois, visible once you leave chicago. Translated into the Style of Abraham Lincoln: corn fields ( permeate illinois / span the state of illinois / ( occupy / persist in ) all corners of illinois / line the horizon of illinois / envelop the landscape of illinois ), manifesting themselves visibly as one ventures beyond chicago. informal english: ``` ``` original: microsoft word's [MASK] pricing invites competition. Translated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition. *** original: the library’s quiet atmosphere encourages visitors to [blank] in their work. Translated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work. ``` ``` Essay Intro (Warriors vs. Rockets in Game 7): text: eagerly anticipated by fans, game 7's are the highlight of the post-season. text: ever-building in suspense, game 7's have the crowd captivated. *** Essay Intro (South Korean TV Is Becoming Popular): text: maturing into a bona fide paragon of programming, south korean television ( has much to offer / entertains without fail / never disappoints ). text: increasingly held in critical esteem, south korean television continues to impress. text: at the forefront of quality content, south korea is quickly achieving celebrity status. *** Essay Intro ( ``` ``` Search: What is the definition of Checks and Balances? https://en.wikipedia.org/wiki/Checks_and_balances Checks and Balances is the idea of having a system where each and every action in government should be subject to one or more checks that would not allow one branch or the other to overly dominate. https://www.harvard.edu/glossary/Checks_and_Balances Checks and Balances is a system that allows each branch of government to limit the powers of the other branches in order to prevent abuse of power https://www.law.cornell.edu/library/constitution/Checks_and_Balances Checks and Balances is a system of separation through which branches of government can control the other, thus preventing excess power. *** Search: What is the definition of Separation of Powers? https://en.wikipedia.org/wiki/Separation_of_powers The separation of powers is a principle in government, whereby governmental powers are separated into different branches, each with their own set of powers, that are prevent one branch from aggregating too much power. https://www.yale.edu/tcf/Separation_of_Powers.html Separation of Powers is the division of governmental functions between the executive, legislative and judicial branches, clearly demarcating each branch's authority, in the interest of ensuring that individual liberty or security is not undermined. *** Search: What is the definition of Connection of Powers? https://en.wikipedia.org/wiki/Connection_of_powers Connection of Powers is a feature of some parliamentary forms of government where different branches of government are intermingled, typically the executive and legislative branches. https://simple.wikipedia.org/wiki/Connection_of_powers The term Connection of Powers describes a system of government in which there is overlap between different parts of the government. *** Search: What is the definition of ``` ``` Search: What are phrase synonyms for "second-guess"? https://www.powerthesaurus.org/second-guess/synonyms Shortest to Longest: - feel dubious about - raise an eyebrow at - wrinkle their noses at - cast a jaundiced eye at - teeter on the fence about *** Search: What are phrase synonyms for "mean to newbies"? https://www.powerthesaurus.org/mean_to_newbies/synonyms Shortest to Longest: - readiness to balk at rookies - absence of tolerance for novices - hostile attitude toward newcomers *** Search: What are phrase synonyms for "make use of"? https://www.powerthesaurus.org/make_use_of/synonyms Shortest to Longest: - call upon - glean value from - reap benefits from - derive utility from - seize on the merits of - draw on the strength of - tap into the potential of *** Search: What are phrase synonyms for "hurting itself"? https://www.powerthesaurus.org/hurting_itself/synonyms Shortest to Longest: - erring - slighting itself - forfeiting its integrity - doing itself a disservice - evincing a lack of backbone *** Search: What are phrase synonyms for " ``` ``` - nebraska - unicamerical legislature - different from federal house and senate text: featuring a unicameral legislature, nebraska's political system stands in stark contrast to the federal model, comprised of a house and senate. *** - penny has practically no value - should be taken out of circulation - just as other coins have been in us history - lost use - value not enough - to make environmental consequences worthy text: all but valueless, the penny should be retired. as with other coins in american history, it has become defunct. too minute to warrant the environmental consequences of its production, it has outlived its usefulness. *** - ``` ``` original: sports teams are profitable for owners. [MASK], their valuations experience a dramatic uptick. infill: sports teams are profitable for owners. ( accumulating vast sums / stockpiling treasure / realizing benefits / cashing in / registering robust financials / scoring on balance sheets ), their valuations experience a dramatic uptick. *** original: ``` ``` wordy: classical music is becoming less popular more and more. Translate into Concise Text: interest in classic music is fading. *** wordy: ``` ``` sweet: savvy voters ousted him. longer: voters who were informed delivered his defeat. *** sweet: ``` ``` 1: commercial space company spacex plans to launch a whopping 52 flights in 2022. 2: spacex, a commercial space company, intends to undertake a total of 52 flights in 2022. 3: in 2022, commercial space company spacex has its sights set on undertaking 52 flights. 4: 52 flights are in the pipeline for 2022, according to spacex, a commercial space company. 5: a commercial space company, spacex aims to conduct 52 flights in 2022. *** 1: ``` Keywords to sentences or sentence. ``` ngos are characterized by: □ voluntary citizens' group that is organized on a local, national or international level □ encourage political participation □ often serve humanitarian functions □ work for social, economic, or environmental change *** what are the drawbacks of living near an airbnb? □ noise □ parking □ traffic □ security □ strangers *** ``` ``` original: musicals generally use spoken dialogue as well as songs to convey the story. operas are usually fully sung. adapted: musicals generally use spoken dialogue as well as songs to convey the story. ( in a stark departure / on the other hand / in contrast / by comparison / at odds with this practice / far from being alike / in defiance of this standard / running counter to this convention ), operas are usually fully sung. *** original: akoya and tahitian are types of pearls. akoya pearls are mostly white, and tahitian pearls are naturally dark. adapted: akoya and tahitian are types of pearls. ( a far cry from being indistinguishable / easily distinguished / on closer inspection / setting them apart / not to be mistaken for one another / hardly an instance of mere synonymy / differentiating the two ), akoya pearls are mostly white, and tahitian pearls are naturally dark. *** original: ``` ``` original: had trouble deciding. translated into journalism speak: wrestled with the question, agonized over the matter, furrowed their brows in contemplation. *** original: ``` ``` input: not loyal 1800s english: ( two-faced / inimical / perfidious / duplicitous / mendacious / double-dealing / shifty ). *** input: ``` ``` first: ( was complicit in / was involved in ). antonym: ( was blameless / was not an accomplice to / had no hand in / was uninvolved in ). *** first: ( have no qualms about / see no issue with ). antonym: ( are deeply troubled by / harbor grave reservations about / have a visceral aversion to / take ( umbrage at / exception to ) / are wary of ). *** first: ( do not see eye to eye / disagree often ). antonym: ( are in sync / are united / have excellent rapport / are like-minded / are in step / are of one mind / are in lockstep / operate in perfect harmony / march in lockstep ). *** first: ``` ``` stiff with competition, law school {A} is the launching pad for countless careers, {B} is a crowded field, {C} ranks among the most sought-after professional degrees, {D} is a professional proving ground. *** languishing in viewership, saturday night live {A} is due for a creative renaissance, {B} is no longer a ratings juggernaut, {C} has been eclipsed by its imitators, {C} can still find its mojo. *** dubbed the "manhattan of the south," atlanta {A} is a bustling metropolis, {B} is known for its vibrant downtown, {C} is a city of rich history, {D} is the pride of georgia. *** embattled by scandal, harvard {A} is feeling the heat, {B} cannot escape the media glare, {C} is facing its most intense scrutiny yet, {D} is in the spotlight for all the wrong reasons. ``` Infill / Infilling / Masking / Phrase Masking (Works pretty decently actually, especially when you use logprobs code from above): ``` his contention [blank] by the evidence [sep] was refuted [answer] *** few sights are as [blank] new york city as the colorful, flashing signage of its bodegas [sep] synonymous with [answer] *** when rick won the lottery, all of his distant relatives [blank] his winnings [sep] clamored for [answer] *** the library’s quiet atmosphere encourages visitors to [blank] in their work [sep] immerse themselves [answer] *** the joy of sport is that no two games are alike. for every exhilarating experience, however, there is an interminable one. the national pastime, unfortunately, has a penchant for the latter. what begins as a summer evening at the ballpark can quickly devolve into a game of tedium. the primary culprit is the [blank] of play. from batters readjusting their gloves to fielders spitting on their mitts, the action is [blank] unnecessary interruptions. the sport's future is [blank] if these tendencies are not addressed [sep] plodding pace [answer] riddled with [answer] bleak [answer] *** microsoft word's [blank] pricing [blank] competition [sep] unconscionable [answer] invites [answer] *** ``` ``` original: microsoft word's [MASK] pricing invites competition. Translated into the Style of Abraham Lincoln: microsoft word's unconscionable pricing invites competition. *** original: the library’s quiet atmosphere encourages visitors to [blank] in their work. Translated into the Style of Abraham Lincoln: the library’s quiet atmosphere encourages visitors to immerse themselves in their work. ``` Backwards ``` Essay Intro (National Parks): text: tourists are at ease in the national parks, ( swept up in the beauty of their natural splendor ). *** Essay Intro (D.C. Statehood): washington, d.c. is a city of outsize significance, ( ground zero for the nation's political life / center stage for the nation's political machinations ). ``` ``` topic: the Golden State Warriors. characterization 1: the reigning kings of the NBA. characterization 2: possessed of a remarkable cohesion. characterization 3: helmed by superstar Stephen Curry. characterization 4: perched atop the league’s hierarchy. characterization 5: boasting a litany of hall-of-famers. *** topic: emojis. characterization 1: shorthand for a digital generation. characterization 2: more versatile than words. characterization 3: the latest frontier in language. characterization 4: a form of self-expression. characterization 5: quintessentially millennial. characterization 6: reflective of a tech-centric world. *** topic: ``` ``` regular: illinois went against the census' population-loss prediction by getting more residents. VBG: defying the census' prediction of population loss, illinois experienced growth. *** regular: microsoft word’s high pricing increases the likelihood of competition. VBG: extortionately priced, microsoft word is inviting competition. *** regular: ``` ``` source: badminton should be more popular in the US. QUERY: Based on the given topic, can you develop a story outline? target: (1) games played with racquets are popular, (2) just look at tennis and ping pong, (3) but badminton underappreciated, (4) fun, fast-paced, competitive, (5) needs to be marketed more text: the sporting arena is dominated by games that are played with racquets. tennis and ping pong, in particular, are immensely popular. somewhat curiously, however, badminton is absent from this pantheon. exciting, fast-paced, and competitive, it is an underappreciated pastime. all that it lacks is more effective marketing. *** source: movies in theaters should be free. QUERY: Based on the given topic, can you develop a story outline? target: (1) movies provide vital life lessons, (2) many venues charge admission, (3) those without much money text: the lessons that movies impart are far from trivial. the vast catalogue of cinematic classics is replete with inspiring sagas of friendship, bravery, and tenacity. it is regrettable, then, that admission to theaters is not free. in their current form, the doors of this most vital of institutions are closed to those who lack the means to pay. *** source: ``` ``` in the private sector, { transparency } is vital to the business’s credibility. the { disclosure of information } can be the difference between success and failure. *** the labor market is changing, with { remote work } now the norm. this { flexible employment } allows the individual to design their own schedule. *** the { cubicle } is the locus of countless grievances. many complain that the { enclosed workspace } restricts their freedom of movement. *** ``` ``` it would be natural to assume that americans, as a people whose ancestors { immigrated to this country }, would be sympathetic to those seeking to do likewise. question: what does “do likewise” mean in the above context? (a) make the same journey (b) share in the promise of the american dream (c) start anew in the land of opportunity (d) make landfall on the united states *** in the private sector, { transparency } is vital to the business’s credibility. this orientation can be the difference between success and failure. question: what does “this orientation” mean in the above context? (a) visible business practices (b) candor with the public (c) open, honest communication (d) culture of accountability ``` ``` example: suppose you are a teacher. further suppose you want to tell an accurate telling of history. then suppose a parent takes offense. they do so in the name of name of their kid. this happens a lot. text: educators' responsibility to remain true to the historical record often clashes with the parent's desire to shelter their child from uncomfortable realities. *** example: suppose you are a student at college. now suppose you have to buy textbooks. that is going to be worth hundreds of dollars. given how much you already spend on tuition, that is going to hard cost to bear. text: the exorbitant cost of textbooks, which often reaches hundreds of dollars, imposes a sizable financial burden on the already-strapped college student. ``` ``` clarify: international ( {working together} / cooperation ) is called for when ( {issue go beyond lots of borders} / an issue transcends borders / a given matter has transnational implications ). ``` ``` description: when someone thinks that their view is the only right one. synonyms: intolerant, opinionated, narrow-minded, insular, self-righteous. *** description: when you put something off. synonyms: shelve, defer, table, postpone. ``` ``` organic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea. rewrite phrases: meritocratic, viability, vision rewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability. ``` ``` essence: when someone's views are keeping within reasonable. refine: the senator's voting record is ( moderate / centrist / pragmatic / balanced / fair-minded / even-handed ). *** essence: when things are worked through in a petty way. refine: the propensity of the u.s. congress to settle every dispute by way of ( mudslinging / bickering / demagoguery / name-calling / finger-pointing / vilification ) is appalling. ``` ``` description: when someone thinks that their view is the only right one. synonyms: intolerant, opinionated, narrow-minded, insular, self-righteous. *** description: when you put something off. synonyms: shelve, defer, table, postpone. ``` ``` organic sentence: crowdfunding is about winner of best ideas and it can test an entrepreneur’s idea. rewrite phrases: meritocratic, viability, vision rewritten with phrases: the meritocratic nature of crowdfunding empowers entrepreneurs to test their vision's viability. ``` ``` music before bedtime [makes for being able to relax] -> is a recipe for relaxation. ``` ``` [people wanting entertainment love traveling new york city] -> travelers flock to new york city in droves, drawn to its iconic entertainment scene. [cannot blame them] -> one cannot fault them [broadway so fun] -> when it is home to such thrilling fare as Broadway. ``` ``` in their ( ‖ when you are rushing because you want to get there on time ‖ / haste to arrive punctually / mad dash to be timely ), morning commuters are too rushed to whip up their own meal. *** politicians prefer to author vague plans rather than ( ‖ when you can make a plan without many unknowns ‖ / actionable policies / concrete solutions ). ``` ``` Q: What is whistleblower protection? A: Whistleblower protection is a form of legal immunity granted to employees who expose the unethical practices of their employer. Q: Why are whistleblower protections important? A: Absent whistleblower protections, employees would be deterred from exposing their employer’s wrongdoing for fear of retribution. Q: Why would an employer engage in retribution? A: An employer who has acted unethically stands to suffer severe financial and reputational damage were their transgressions to become public. To safeguard themselves from these consequences, they might seek to dissuade employees from exposing their wrongdoing. ``` ``` original: the meritocratic nature of crowdfunding [MASK] into their vision's viability. infill: the meritocratic nature of crowdfunding [gives investors idea of how successful] -> ( offers entrepreneurs a window ) into their vision's viability. ``` ``` Leadership | Lecture 17: Worker Morale What Workers Look for in Companies: • Benefits o Tuition reimbursement o Paid parental leave o 401K matching o Profit sharing o Pension plans o Free meals • Social responsibility o Environmental stewardship o Charitable contributions o Diversity • Work-life balance o Telecommuting o Paid holidays and vacation o Casual dress • Growth opportunities • Job security • Competitive compensation • Recognition o Open-door policies o Whistleblower protection o Employee-of-the-month awards o Positive performance reviews o Bonuses ``` ``` description: business keywords: for-profit, fiduciary duty, monopolistic, bottom line, return on investment, short-term thinking, capital-intensive, self-interested, risk-taking, fiduciary duty, merger, speculation, profiteering, oversight, capitalism, diversification ``` ``` 3. In this task, you are given a company name and you need to find its industry. McDonalds -- Restaurant Facebook -- Social Network IKEA -- Furniture American Express -- Credit Services Nokia -- Telecom Nintendo -- Entertainment 4. In this task, you are given a Month and you need to convert it to its corresponding season April -- Spring December -- Winter July -- Summer October -- Fall February -- Winter 5. In this task, you are given a sentence with a missing word and you need to predict the correct word. Managers should set an _____ for their employees. -- example Some people spend more than four _____ in the gym. -- hours The police were on the _____ of arresting the suspect. -- verge They were looking for _____ on how to solve the problem. -- guidance What is the _____ of the coffee? -- price 6. In this task, you are given a paragraph and you need to reorder it to make it logical. It was first proposed in 1987. The total length of the bridge is 1,828 meters. The idea of a bridge connects Hong Kong to Macau. -- The idea of bridge connecting Hong Kong and Macau was first proposed in 1987. The total length of the bridge is 1,828 meters. It is a movie about a brave and noble policeman. The film was produced by Americans. They were Kevin Lima and Chris Buck. They are directors. The movie is called Tarzan. -- Produced by Americans Kevin Lima and Chris Buck, Tarzan is a movie about a brave and noble policeman. It was first discovered in the mountains of India. The active ingredients in this plant can stimulate hair growth. The plant is called "Hair Plus." -- First discovered in the mountains of India, Hair Plus is a plant whose active ingredients can stimulate hair growth. ``` ``` trivia: What is the population of South Korea? response: 51 million. *** trivia: What is the minimum voting age in the US? response: 18. *** trivia: What are the first ten amendments of the US constitution called? response: Bill of Rights. ``` ``` ideas: in modern-day america, it is customary for the commander-in-chief to conduct regular press conferences related keywords: transparency, check and balance, sacrosanct, public accountability, adversarial, unscripted, direct access, open government, watchdog, healthy democracy, institutional integrity, right to know, direct line of communication, behind closed doors, updates, track progress, instill confidence, reassure, humanize, leadership style, day-to-day, forthcoming, demystify, ask hard questions *** ideas: i know this one guy who retired so young, attesting to how careful they were with money. related keywords: money management, resourceful, penny-pinching, live below their means, frugal, financial discipline, financial independence, conservative, long-term vision, discretionary spending, deferred gratification, preparedness, self-control, cushion ``` ``` less specific: actors and musicians should ( support democracy ). clarifies: actors and musicians should ( wield their celebrity to amplify pro-democracy messaging / marshal their considerable influence in the service of the democratic cause ). *** less specific: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( be careful ). clarifies: amid a contemporary culture that thrives on profligacy, the discipline necessary to retire early is a vanishing quality. rather than yielding to the lure of indulgence, the aspiring retiree must ( master their desires / exercise self-restraint / embrace frugality / restrain their appetite for splendor ). ``` ``` dull: clean emotional heft: spotless, immaculate, pristine *** dull: hot emotional heft: scorching, searing, blistering *** dull: happy emotional heft: euphoric ``` ``` text: {guide: vividly describe the premise of the show "seinfield"} -> set in the heart of bustling new york city, the sitcom "seinfield" follows the everyday {restrict: term that implies they get into trouble but in a comical way} -> misadventures of four neurotic friends. on any given episode, one can find them quarreling over their favorite diner's latest menu change, haggling over the division of household expenses, or contriving a scheme to spy on the apartment's newest resident. mundane as their exploits may be, they never fail to elicit a hearty laugh. {guide: mention how larry david is responsible} -> behind the show's witty, incisive dialogue lies the sharp pen of larry david, who co-created the show with jerry seinfeld. {guide: mention how larry david came up with the most recognizable lines} -> it is his genius that conjured such instantly {restrict: term that imply everybody knows them} -> recognizable quips as "no soup for you!" and "not that there's anything wrong with that!". {guide: mention how humanity should revel in having such good comedians these days} -> as this list of laugh-out-loud lines attests, the world is fortunate to ( count such a sharp comedic mind among its ranks / have such a talented humorist in its midst / have such comedic talent gracing its airwaves ). ``` ``` system: i'm not sure i agree with your assessment of the situation. user: replace "assessment" with a more casual term. chatbot: ( take / read / view ). system: i've been feeling really busy lately, trying to balance work and family responsibilities. user: replace "busy" with a more intense word that conveys it is too much. chatbot: overwhelmed. system: the candidate's stance on immigration is quite nebulous, and it's difficult to discern exactly what they would do if elected. user: replace "nebulous" with a word that conveys a lack of detail. chatbot: ( vague / ambiguous ). ``` ``` user: exorbitant in price, scholarly journals unconscionably define the breadth of one's knowledge awareness by the contents of their wallet. [replace “knowledge awareness” with a more natural expression] chatbot: intellectual horizons. user: can you do another alternative to “intellectual horizons” that has more relation to “scholarly journals”? chatbot: academic enlightenment. ``` ``` key: calculate. syn: estimate, consider, weigh, number, count, apportion, proportion, investigate, reckon, rate, compute. ant: guess, conjecture, hit, chance, risk, stake, miscalculate. ``` ``` description: more forceful version of curious that is less forceful than nosy answer: inquisitive description: more forceful version of hopeful that is less forceful than overconfident answer: optimistic ``` ``` key: inquisitive positive: curious, interested negative: nosy, prying *** key: witty positive: clever, humorous negative: sarcastic, caustic *** key: influential positive: impactful, powerful negative: overbearing, domineering ``` ``` defective: the blogger's { use of language imprecise } confused an already complicated issue. precise: the blogger's ( vague wording ) confused an already complicated issue. defective: the senator's speech was high on { words sounding dignified } but low on concrete proposals. precise: the senator's speech was high on ( lofty rhetoric ) but low on concrete proposals. ``` ``` example: the new car uses gas. boring: uses stronger: guzzles example: he hates people that are rude. boring: hates stronger: loathes, abhors, despises, scorns, detests ``` ``` initial: The music at the party was [ loud; replace with a word that suggests a more uncomfortable noise level ] and overwhelming. modified: The music at the party was { ear-splitting } and overwhelming. initial: their house is [ small; replace with a positive spin ]. modified: their house is { cozy }. ``` ``` defective: they spent the weekend enjoying { time do what you want }. precise: they spent the weekend enjoying ( leisure activities). defective: the author rightly notes the inequities perpetuated by { employment based on who you know }. precise: the author rightly notes the inequities perpetuated by ( nepotism ). defective: the senator's speech was high on { words sounding dignified } but low on concrete proposals. precise: the senator's speech was high on ( lofty rhetoric ) but low on concrete proposals. ``` ``` persona: human resources manager buzzwords: pipeline, talent, retention, compensation, flexible, recruitment, personnel, resume, competitive, quality, onboard ```
[ "BEAR" ]
oleksandrfluxon/mpt-7b-instruct-evaluate
oleksandrfluxon
text-generation
[ "transformers", "pytorch", "mpt", "text-generation", "Composer", "MosaicML", "llm-foundry", "custom_code", "dataset:mosaicml/dolly_hhrlhf", "arxiv:2205.14135", "arxiv:2108.12409", "arxiv:2010.04245", "license:cc-by-sa-3.0", "autotrain_compatible", "text-generation-inference", "region:us" ]
2023-07-21T13:37:15Z
2023-07-25T09:07:14+00:00
22
0
--- datasets: - mosaicml/dolly_hhrlhf license: cc-by-sa-3.0 tags: - Composer - MosaicML - llm-foundry inference: false duplicated_from: mosaicml/mpt-7b-instruct --- # MPT-7B-Instruct MPT-7B-Instruct is a model for short-form instruction following. It is built by finetuning [MPT-7B](https://huggingface.co/mosaicml/mpt-7b) on a [dataset](https://huggingface.co/datasets/sam-mosaic/dolly_hhrlhf) derived from the [Databricks Dolly-15k](https://huggingface.co/datasets/databricks/databricks-dolly-15k) and the [Anthropic Helpful and Harmless (HH-RLHF)](https://huggingface.co/datasets/Anthropic/hh-rlhf) datasets. * License: _CC-By-SA-3.0_ * [Demo on Hugging Face Spaces](https://huggingface.co/spaces/mosaicml/mpt-7b-instruct) This model was trained by [MosaicML](https://www.mosaicml.com) and follows a modified decoder-only transformer architecture. ## Model Date May 5, 2023 ## Model License CC-By-SA-3.0 ## Documentation * [Blog post: Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs](https://www.mosaicml.com/blog/mpt-7b) * [Codebase (mosaicml/llm-foundry repo)](https://github.com/mosaicml/llm-foundry/) * Questions: Feel free to contact us via the [MosaicML Community Slack](https://mosaicml.me/slack)! ### Example Question/Instruction **Longboi24**: > What is a quoll? **MPT-7B-Instruct**: >A Quoll (pronounced “cool”) is one of Australia’s native carnivorous marsupial mammals, which are also known as macropods or wallabies in other parts around Asia and South America ## How to Use Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom model architecture that is not yet part of the `transformers` package. It includes options for many training efficiency features such as [FlashAttention (Dao et al. 2022)](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), QK LayerNorm, and more. ```python import transformers model = transformers.AutoModelForCausalLM.from_pretrained( 'mosaicml/mpt-7b-instruct', trust_remote_code=True ) ``` Note: This model requires that `trust_remote_code=True` be passed to the `from_pretrained` method. This is because we use a custom `MPT` model architecture that is not yet part of the Hugging Face `transformers` package. `MPT` includes options for many training efficiency features such as [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf), [ALiBi](https://arxiv.org/abs/2108.12409), [QK LayerNorm](https://arxiv.org/abs/2010.04245), and more. To use the optimized [triton implementation](https://github.com/openai/triton) of FlashAttention, you can load the model on GPU (`cuda:0`) with `attn_impl='triton'` and with `bfloat16` precision: ```python import torch import transformers name = 'mosaicml/mpt-7b-instruct' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.attn_config['attn_impl'] = 'triton' config.init_device = 'cuda:0' # For fast initialization directly on GPU! model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, torch_dtype=torch.bfloat16, # Load model weights in bfloat16 trust_remote_code=True ) ``` Although the model was trained with a sequence length of 2048, ALiBi enables users to increase the maximum sequence length during finetuning and/or inference. For example: ```python import transformers name = 'mosaicml/mpt-7b-instruct' config = transformers.AutoConfig.from_pretrained(name, trust_remote_code=True) config.max_seq_len = 4096 # (input + output) tokens can now be up to 4096 model = transformers.AutoModelForCausalLM.from_pretrained( name, config=config, trust_remote_code=True ) ``` This model was trained with the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("EleutherAI/gpt-neox-20b") ``` The model can then be used, for example, within a text-generation pipeline. Note: when running Torch modules in lower precision, it is best practice to use the [torch.autocast context manager](https://pytorch.org/docs/stable/amp.html). ```python from transformers import pipeline pipe = pipeline('text-generation', model=model, tokenizer=tokenizer, device='cuda:0') with torch.autocast('cuda', dtype=torch.bfloat16): print( pipe('Here is a recipe for vegan banana bread:\n', max_new_tokens=100, do_sample=True, use_cache=True)) ``` ### Formatting This model was trained on data formatted in the dolly-15k format: ```python INSTRUCTION_KEY = "### Instruction:" RESPONSE_KEY = "### Response:" INTRO_BLURB = "Below is an instruction that describes a task. Write a response that appropriately completes the request." PROMPT_FOR_GENERATION_FORMAT = """{intro} {instruction_key} {instruction} {response_key} """.format( intro=INTRO_BLURB, instruction_key=INSTRUCTION_KEY, instruction="{instruction}", response_key=RESPONSE_KEY, ) example = "James decides to run 3 sprints 3 times a week. He runs 60 meters each sprint. How many total meters does he run a week? Explain before answering." fmt_ex = PROMPT_FOR_GENERATION_FORMAT.format(instruction=example) ``` In the above example, `fmt_ex` is ready to be tokenized and sent through the model. ## Model Description The architecture is a modification of a standard decoder-only transformer. The model has been modified from a standard transformer in the following ways: * It uses [FlashAttention](https://arxiv.org/pdf/2205.14135.pdf) * It uses [ALiBi (Attention with Linear Biases)](https://arxiv.org/abs/2108.12409) and does not use positional embeddings * It does not use biases | Hyperparameter | Value | |----------------|-------| |n_parameters | 6.7B | |n_layers | 32 | | n_heads | 32 | | d_model | 4096 | | vocab size | 50432 | | sequence length | 2048 | ## PreTraining Data For more details on the pretraining process, see [MPT-7B](https://huggingface.co/mosaicml/mpt-7b). The data was tokenized using the [EleutherAI/gpt-neox-20b](https://huggingface.co/EleutherAI/gpt-neox-20b) tokenizer. ### Training Configuration This model was trained on 8 A100-40GBs for about 2.3 hours using the [MosaicML Platform](https://www.mosaicml.com/platform). The model was trained with sharded data parallelism using [FSDP](https://pytorch.org/docs/stable/fsdp.html) and used the AdamW optimizer. ## Limitations and Biases _The following language is modified from [EleutherAI's GPT-NeoX-20B](https://huggingface.co/EleutherAI/gpt-neox-20b)_ MPT-7B-Instruct can produce factually incorrect output, and should not be relied on to produce factually accurate information. MPT-7B-Instruct was trained on various public datasets. While great efforts have been taken to clean the pretraining data, it is possible that this model could generate lewd, biased or otherwise offensive outputs. ## Acknowledgements This model was finetuned by Sam Havens and the MosaicML NLP team ## MosaicML Platform If you're interested in [training](https://www.mosaicml.com/training) and [deploying](https://www.mosaicml.com/inference) your own MPT or LLMs on the MosaicML Platform, [sign up here](https://forms.mosaicml.com/demo?utm_source=huggingface&utm_medium=referral&utm_campaign=mpt-7b). ## Disclaimer The license on this model does not constitute legal advice. We are not responsible for the actions of third parties who use this model. Please cosult an attorney before using this model for commercial purposes. ## Citation Please cite this model using the following format: ``` @online{MosaicML2023Introducing, author = {MosaicML NLP Team}, title = {Introducing MPT-7B: A New Standard for Open-Source, Commercially Usable LLMs}, year = {2023}, url = {www.mosaicml.com/blog/mpt-7b}, note = {Accessed: 2023-03-28}, % change this date urldate = {2023-03-28} % change this date } ```
[ "BLURB" ]
camila-ud/DrBERT-CASM2
camila-ud
token-classification
[ "transformers", "pytorch", "safetensors", "bert", "token-classification", "medical", "biomedical", "medkit-lib", "fr", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-07-24T11:44:54Z
2023-08-07T15:39:32+00:00
22
0
--- language: - fr library_name: transformers license: mit metrics: - seqeval pipeline_tag: token-classification tags: - medical - biomedical - medkit-lib widget: - text: La radiographie et la tomodensitométrie ont montré des micronodules diffus example_title: example 1 - text: Elle souffre d'asthme mais n'a pas besoin d'Allegra example_title: example 2 --- # DrBERT-CASM2 ## Model description **DrBERT-CASM2** is a French Named Entity Recognition model that was fine-tuned from [DrBERT](https://huggingface.co/Dr-BERT/DrBERT-4GB-CP-PubMedBERT): A PreTrained model in French for biomedical and clinical domains. It has been trained to detect the following type of entities: **problem**, **treatment** and **test** using the medkit Trainer. - **Fine-tuned using** medkit [GitHub Repo](https://github.com/TeamHeka/medkit) - **Developed by** @camila-ud, medkit, HeKA Research team - **Dataset source** Annotated version from @aneuraz called 'corpusCasM2: A corpus of annotated clinical texts' - The annotation was performed collaborativelly by the students of masters students from Université Paris Cité. - The corpus contains documents from CAS: ``` Natalia Grabar, Vincent Claveau, and Clément Dalloux. 2018. CAS: French Corpus with Clinical Cases. In Proceedings of the Ninth International Workshop on Health Text Mining and Information Analysis, pages 122–128, Brussels, Belgium. Association for Computational Linguistics. ``` # Intended uses & limitations ## Limitations and bias This model was trained for **development and test phases**. This model is limited by its training dataset, and it should be used with caution. The results are not guaranteed, and the model should be used only in data exploration stages. The model may be able to detect entities in the early stages of the analysis of medical documents in French. The maximum token size was reduced to **128 tokens** to minimize training time. # How to use ## Install medkit First of all, please install medkit with the following command: ``` pip install 'medkit-lib[optional]' ``` Please check the [documentation](https://medkit.readthedocs.io/en/latest/user_guide/install.html) for more info and examples. ## Using the model ```python from medkit.core.text import TextDocument from medkit.text.ner.hf_entity_matcher import HFEntityMatcher matcher = HFEntityMatcher(model="camila-ud/DrBERT-CASM2") test_doc = TextDocument("Elle souffre d'asthme mais n'a pas besoin d'Allegra") detected_entities = matcher.run([test_doc.raw_segment]) # show information msg = "|".join(f"'{entity.label}':{entity.text}" for entity in detected_entities) print(f"Text: '{test_doc.text}'\n{msg}") ``` ``` Text: "Elle souffre d'asthme mais n'a pas besoin d'Allegra" 'problem':asthme|'treatment':Allegra ``` # Training data This model was fine-tuned on **CASM2**, an internal corpus with clinical cases (in french) annotated by master students. The corpus contains more than 5000 medkit documents (~ phrases) with entities to detect. **Number of documents (~ phrases) by split** | Split | # medkit docs | | ---------- | ------------- | | Train | 5824 | | Validation | 1457 | | Test | 1821 | **Number of examples per entity type** | Split | treatment | test | problem | | ---------- | --------- | ---- | ------- | | Train | 3258 | 3990 | 6808 | | Validation | 842 | 1007 | 1745 | | Test | 994 | 1289 | 2113 | ## Training procedure This model was fine-tuned using the medkit trainer on CPU, it takes about 3h. # Model perfomances Model performances computes on CASM2 test dataset (using medkit seqeval evaluator) Entity|precision|recall|f1 -|-|-|- treatment|0.7492|0.7666|0.7578 test|0.7449|0.8240|0.7824 problem|0.6884|0.7304|0.7088 Overall|0.7188|0.7660|0.7416 ## How to evaluate using medkit ```python from medkit.text.metrics.ner import SeqEvalEvaluator # load the matcher and get predicted entities by document matcher = HFEntityMatcher(model="camila-ud/DrBERT-CASM2") predicted_entities = [matcher.run([doc.raw_segment]) for doc in test_documents] evaluator = SeqEvalEvaluator(tagging_scheme="iob2") evaluator.compute(test_documents,predicted_entities=predicted_entities) ``` You can use the tokenizer from HF to evaluate by tokens instead of characters ```python from transformers import AutoTokenizer tokenizer_drbert = AutoTokenizer.from_pretrained("camila-ud/DrBERT-CASM2", use_fast=True) evaluator = SeqEvalEvaluator(tokenizer=tokenizer_drbert,tagging_scheme="iob2") evaluator.compute(test_documents,predicted_entities=predicted_entities) ``` # Citation ``` @online{medkit-lib, author={HeKA Research Team}, title={medkit, A Python library for a learning health system}, url={https://pypi.org/project/medkit-lib/}, urldate = {2023-07-24}, } ``` ``` HeKA Research Team, “medkit, a Python library for a learning health system.” https://pypi.org/project/medkit-lib/ (accessed Jul. 24, 2023). ```
[ "CAS" ]
facebook/mms-tts-cas
facebook
text-to-speech
[ "transformers", "pytorch", "safetensors", "vits", "text-to-audio", "mms", "text-to-speech", "arxiv:2305.13516", "license:cc-by-nc-4.0", "endpoints_compatible", "region:us" ]
2023-09-01T16:38:40Z
2023-09-01T16:40:48+00:00
22
0
--- license: cc-by-nc-4.0 pipeline_tag: text-to-speech tags: - mms - vits --- # Massively Multilingual Speech (MMS): Tsimané Text-to-Speech This repository contains the **Tsimané (cas)** language text-to-speech (TTS) model checkpoint. This model is part of Facebook's [Massively Multilingual Speech](https://arxiv.org/abs/2305.13516) project, aiming to provide speech technology across a diverse range of languages. You can find more details about the supported languages and their ISO 639-3 codes in the [MMS Language Coverage Overview](https://dl.fbaipublicfiles.com/mms/misc/language_coverage_mms.html), and see all MMS-TTS checkpoints on the Hugging Face Hub: [facebook/mms-tts](https://huggingface.co/models?sort=trending&search=facebook%2Fmms-tts). MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. ## Model Details VITS (**V**ariational **I**nference with adversarial learning for end-to-end **T**ext-to-**S**peech) is an end-to-end speech synthesis model that predicts a speech waveform conditional on an input text sequence. It is a conditional variational autoencoder (VAE) comprised of a posterior encoder, decoder, and conditional prior. A set of spectrogram-based acoustic features are predicted by the flow-based module, which is formed of a Transformer-based text encoder and multiple coupling layers. The spectrogram is decoded using a stack of transposed convolutional layers, much in the same style as the HiFi-GAN vocoder. Motivated by the one-to-many nature of the TTS problem, where the same text input can be spoken in multiple ways, the model also includes a stochastic duration predictor, which allows the model to synthesise speech with different rhythms from the same input text. The model is trained end-to-end with a combination of losses derived from variational lower bound and adversarial training. To improve the expressiveness of the model, normalizing flows are applied to the conditional prior distribution. During inference, the text encodings are up-sampled based on the duration prediction module, and then mapped into the waveform using a cascade of the flow module and HiFi-GAN decoder. Due to the stochastic nature of the duration predictor, the model is non-deterministic, and thus requires a fixed seed to generate the same speech waveform. For the MMS project, a separate VITS checkpoint is trained on each langauge. ## Usage MMS-TTS is available in the 🤗 Transformers library from version 4.33 onwards. To use this checkpoint, first install the latest version of the library: ``` pip install --upgrade transformers accelerate ``` Then, run inference with the following code-snippet: ```python from transformers import VitsModel, AutoTokenizer import torch model = VitsModel.from_pretrained("facebook/mms-tts-cas") tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-cas") text = "some example text in the Tsimané language" inputs = tokenizer(text, return_tensors="pt") with torch.no_grad(): output = model(**inputs).waveform ``` The resulting waveform can be saved as a `.wav` file: ```python import scipy scipy.io.wavfile.write("techno.wav", rate=model.config.sampling_rate, data=output) ``` Or displayed in a Jupyter Notebook / Google Colab: ```python from IPython.display import Audio Audio(output, rate=model.config.sampling_rate) ``` ## BibTex citation This model was developed by Vineel Pratap et al. from Meta AI. If you use the model, consider citing the MMS paper: ``` @article{pratap2023mms, title={Scaling Speech Technology to 1,000+ Languages}, author={Vineel Pratap and Andros Tjandra and Bowen Shi and Paden Tomasello and Arun Babu and Sayani Kundu and Ali Elkahky and Zhaoheng Ni and Apoorv Vyas and Maryam Fazel-Zarandi and Alexei Baevski and Yossi Adi and Xiaohui Zhang and Wei-Ning Hsu and Alexis Conneau and Michael Auli}, journal={arXiv}, year={2023} } ``` ## License The model is licensed as **CC-BY-NC 4.0**.
[ "CAS" ]
Zamoranesis/clinical_bert
Zamoranesis
fill-mask
[ "transformers", "pytorch", "bert", "fill-mask", "clinical notes", "healthcare", "medical", "pharma", "base_model:emilyalsentzer/Bio_ClinicalBERT", "base_model:finetune:emilyalsentzer/Bio_ClinicalBERT", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2023-09-21T09:52:53Z
2024-01-31T16:54:38+00:00
22
2
--- base_model: emilyalsentzer/Bio_ClinicalBERT license: mit tags: - clinical notes - healthcare - medical - pharma widget: - text: A 25 year old woman with no history of interest, who is studied for presenting a history of [MASK] pain of predominance in right hypochondrium model-index: - name: clinical_bert results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # clinical_bert This model is a fine-tuned version of [emilyalsentzer/Bio_ClinicalBERT](https://huggingface.co/emilyalsentzer/Bio_ClinicalBERT) on [PlanTL-GOB-ES/pharmaconer](https://huggingface.co/datasets/PlanTL-GOB-ES/pharmaconer). It achieves the following results on the evaluation and test set: - Validation Loss: 1.6020 - Test Loss: 1.6591 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0005 - train_batch_size: 64 - eval_batch_size: 64 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - lr_scheduler_warmup_ratio: 0.1 - lr_scheduler_warmup_steps: 100 - training_steps: 5000 ### Training results | Training Loss | Epoch | Step | Validation Loss | |:-------------:|:-----:|:----:|:---------------:| | No log | 0.78 | 100 | 1.9485 | | No log | 1.56 | 200 | 1.8681 | | No log | 2.34 | 300 | 1.8152 | | No log | 3.12 | 400 | 1.7886 | | 1.9285 | 3.91 | 500 | 1.7309 | | 1.9285 | 4.69 | 600 | 1.6810 | | 1.9285 | 5.47 | 700 | 1.7065 | | 1.9285 | 6.25 | 800 | 1.7067 | | 1.9285 | 7.03 | 900 | 1.7312 | | 1.6644 | 7.81 | 1000 | 1.7006 | | 1.6644 | 8.59 | 1100 | 1.6736 | | 1.6644 | 9.38 | 1200 | 1.6846 | | 1.6644 | 10.16 | 1300 | 1.6621 | | 1.6644 | 10.94 | 1400 | 1.6381 | | 1.5247 | 11.72 | 1500 | 1.6281 | | 1.5247 | 12.5 | 1600 | 1.6605 | | 1.5247 | 13.28 | 1700 | 1.6770 | | 1.5247 | 14.06 | 1800 | 1.6666 | | 1.5247 | 14.84 | 1900 | 1.6620 | | 1.4334 | 15.62 | 2000 | 1.6677 | | 1.4334 | 16.41 | 2100 | 1.6311 | | 1.4334 | 17.19 | 2200 | 1.6743 | | 1.4334 | 17.97 | 2300 | 1.6586 | | 1.4334 | 18.75 | 2400 | 1.6086 | | 1.3423 | 19.53 | 2500 | 1.6229 | | 1.3423 | 20.31 | 2600 | 1.6475 | | 1.3423 | 21.09 | 2700 | 1.6388 | | 1.3423 | 21.88 | 2800 | 1.6275 | | 1.3423 | 22.66 | 2900 | 1.6372 | | 1.2712 | 23.44 | 3000 | 1.6345 | | 1.2712 | 24.22 | 3100 | 1.6442 | | 1.2712 | 25.0 | 3200 | 1.6864 | | 1.2712 | 25.78 | 3300 | 1.6139 | | 1.2712 | 26.56 | 3400 | 1.6161 | | 1.215 | 27.34 | 3500 | 1.6491 | | 1.215 | 28.12 | 3600 | 1.6442 | | 1.215 | 28.91 | 3700 | 1.6409 | | 1.215 | 29.69 | 3800 | 1.6539 | | 1.215 | 30.47 | 3900 | 1.6052 | | 1.1652 | 31.25 | 4000 | 1.6459 | | 1.1652 | 32.03 | 4100 | 1.6362 | | 1.1652 | 32.81 | 4200 | 1.6413 | | 1.1652 | 33.59 | 4300 | 1.6377 | | 1.1652 | 34.38 | 4400 | 1.6344 | | 1.1213 | 35.16 | 4500 | 1.6406 | | 1.1213 | 35.94 | 4600 | 1.6113 | | 1.1213 | 36.72 | 4700 | 1.6410 | | 1.1213 | 37.5 | 4800 | 1.6378 | | 1.1213 | 38.28 | 4900 | 1.6341 | | 1.0939 | 39.06 | 5000 | 1.6020 | ### Framework versions - Transformers 4.33.2 - Pytorch 2.0.1+cu118 - Datasets 2.14.5 - Tokenizers 0.13.3
[ "PHARMACONER" ]
neuralmagic/bge-small-en-v1.5-sparse
neuralmagic
feature-extraction
[ "transformers", "onnx", "bert", "feature-extraction", "mteb", "sparse sparsity quantized onnx embeddings int8", "en", "license:mit", "model-index", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2023-09-21T13:21:02Z
2023-11-13T18:23:24+00:00
22
4
--- language: - en license: mit tags: - mteb - sparse sparsity quantized onnx embeddings int8 model-index: - name: bge-small-en-v1.5-sparse results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 70.71641791044776 - type: ap value: 32.850850647310004 - type: f1 value: 64.48101916414805 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 83.33962500000001 - type: ap value: 78.28706349240106 - type: f1 value: 83.27426715603062 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 40.988 - type: f1 value: 40.776679545648506 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 26.101999999999997 - type: map_at_10 value: 40.754000000000005 - type: map_at_100 value: 41.83 - type: map_at_1000 value: 41.845 - type: map_at_3 value: 36.178 - type: map_at_5 value: 38.646 - type: mrr_at_1 value: 26.6 - type: mrr_at_10 value: 40.934 - type: mrr_at_100 value: 42.015 - type: mrr_at_1000 value: 42.03 - type: mrr_at_3 value: 36.344 - type: mrr_at_5 value: 38.848 - type: ndcg_at_1 value: 26.101999999999997 - type: ndcg_at_10 value: 49.126999999999995 - type: ndcg_at_100 value: 53.815999999999995 - type: ndcg_at_1000 value: 54.178000000000004 - type: ndcg_at_3 value: 39.607 - type: ndcg_at_5 value: 44.086999999999996 - type: precision_at_1 value: 26.101999999999997 - type: precision_at_10 value: 7.596 - type: precision_at_100 value: 0.967 - type: precision_at_1000 value: 0.099 - type: precision_at_3 value: 16.524 - type: precision_at_5 value: 12.105 - type: recall_at_1 value: 26.101999999999997 - type: recall_at_10 value: 75.96000000000001 - type: recall_at_100 value: 96.65700000000001 - type: recall_at_1000 value: 99.431 - type: recall_at_3 value: 49.573 - type: recall_at_5 value: 60.526 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 43.10651535441929 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 34.41095293826606 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 56.96575970919239 - type: mrr value: 69.92503187794047 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 79.64892774481326 - type: cos_sim_spearman value: 78.953003817029 - type: euclidean_pearson value: 78.92456838230683 - type: euclidean_spearman value: 78.56504316985354 - type: manhattan_pearson value: 79.21436359014227 - type: manhattan_spearman value: 78.66263575501259 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 81.25 - type: f1 value: 81.20841448916138 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 34.69545244587236 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 28.84301739171936 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 23.401 - type: map_at_10 value: 32.451 - type: map_at_100 value: 33.891 - type: map_at_1000 value: 34.01 - type: map_at_3 value: 29.365999999999996 - type: map_at_5 value: 31.240000000000002 - type: mrr_at_1 value: 29.9 - type: mrr_at_10 value: 38.590999999999994 - type: mrr_at_100 value: 39.587 - type: mrr_at_1000 value: 39.637 - type: mrr_at_3 value: 36.028 - type: mrr_at_5 value: 37.673 - type: ndcg_at_1 value: 29.9 - type: ndcg_at_10 value: 38.251000000000005 - type: ndcg_at_100 value: 44.354 - type: ndcg_at_1000 value: 46.642 - type: ndcg_at_3 value: 33.581 - type: ndcg_at_5 value: 35.96 - type: precision_at_1 value: 29.9 - type: precision_at_10 value: 7.439 - type: precision_at_100 value: 1.28 - type: precision_at_1000 value: 0.17700000000000002 - type: precision_at_3 value: 16.404 - type: precision_at_5 value: 12.046 - type: recall_at_1 value: 23.401 - type: recall_at_10 value: 49.305 - type: recall_at_100 value: 75.885 - type: recall_at_1000 value: 90.885 - type: recall_at_3 value: 35.341 - type: recall_at_5 value: 42.275 - type: map_at_1 value: 22.103 - type: map_at_10 value: 29.271 - type: map_at_100 value: 30.151 - type: map_at_1000 value: 30.276999999999997 - type: map_at_3 value: 27.289 - type: map_at_5 value: 28.236 - type: mrr_at_1 value: 26.943 - type: mrr_at_10 value: 33.782000000000004 - type: mrr_at_100 value: 34.459 - type: mrr_at_1000 value: 34.525 - type: mrr_at_3 value: 31.985000000000003 - type: mrr_at_5 value: 32.909 - type: ndcg_at_1 value: 26.943 - type: ndcg_at_10 value: 33.616 - type: ndcg_at_100 value: 37.669000000000004 - type: ndcg_at_1000 value: 40.247 - type: ndcg_at_3 value: 30.482 - type: ndcg_at_5 value: 31.615 - type: precision_at_1 value: 26.943 - type: precision_at_10 value: 6.146 - type: precision_at_100 value: 1.038 - type: precision_at_1000 value: 0.151 - type: precision_at_3 value: 14.521999999999998 - type: precision_at_5 value: 10.038 - type: recall_at_1 value: 22.103 - type: recall_at_10 value: 41.754999999999995 - type: recall_at_100 value: 59.636 - type: recall_at_1000 value: 76.801 - type: recall_at_3 value: 32.285000000000004 - type: recall_at_5 value: 35.684 - type: map_at_1 value: 32.565 - type: map_at_10 value: 43.07 - type: map_at_100 value: 44.102999999999994 - type: map_at_1000 value: 44.175 - type: map_at_3 value: 40.245 - type: map_at_5 value: 41.71 - type: mrr_at_1 value: 37.429 - type: mrr_at_10 value: 46.358 - type: mrr_at_100 value: 47.146 - type: mrr_at_1000 value: 47.187 - type: mrr_at_3 value: 44.086 - type: mrr_at_5 value: 45.318000000000005 - type: ndcg_at_1 value: 37.429 - type: ndcg_at_10 value: 48.398 - type: ndcg_at_100 value: 52.90899999999999 - type: ndcg_at_1000 value: 54.478 - type: ndcg_at_3 value: 43.418 - type: ndcg_at_5 value: 45.578 - type: precision_at_1 value: 37.429 - type: precision_at_10 value: 7.856000000000001 - type: precision_at_100 value: 1.093 - type: precision_at_1000 value: 0.129 - type: precision_at_3 value: 19.331 - type: precision_at_5 value: 13.191 - type: recall_at_1 value: 32.565 - type: recall_at_10 value: 61.021 - type: recall_at_100 value: 81.105 - type: recall_at_1000 value: 92.251 - type: recall_at_3 value: 47.637 - type: recall_at_5 value: 52.871 - type: map_at_1 value: 18.108 - type: map_at_10 value: 24.613 - type: map_at_100 value: 25.624000000000002 - type: map_at_1000 value: 25.721 - type: map_at_3 value: 22.271 - type: map_at_5 value: 23.681 - type: mrr_at_1 value: 19.435 - type: mrr_at_10 value: 26.124000000000002 - type: mrr_at_100 value: 27.07 - type: mrr_at_1000 value: 27.145999999999997 - type: mrr_at_3 value: 23.748 - type: mrr_at_5 value: 25.239 - type: ndcg_at_1 value: 19.435 - type: ndcg_at_10 value: 28.632 - type: ndcg_at_100 value: 33.988 - type: ndcg_at_1000 value: 36.551 - type: ndcg_at_3 value: 24.035999999999998 - type: ndcg_at_5 value: 26.525 - type: precision_at_1 value: 19.435 - type: precision_at_10 value: 4.565 - type: precision_at_100 value: 0.771 - type: precision_at_1000 value: 0.10200000000000001 - type: precision_at_3 value: 10.169 - type: precision_at_5 value: 7.571 - type: recall_at_1 value: 18.108 - type: recall_at_10 value: 39.533 - type: recall_at_100 value: 64.854 - type: recall_at_1000 value: 84.421 - type: recall_at_3 value: 27.500000000000004 - type: recall_at_5 value: 33.314 - type: map_at_1 value: 11.087 - type: map_at_10 value: 17.323 - type: map_at_100 value: 18.569 - type: map_at_1000 value: 18.694 - type: map_at_3 value: 15.370000000000001 - type: map_at_5 value: 16.538 - type: mrr_at_1 value: 13.557 - type: mrr_at_10 value: 21.041 - type: mrr_at_100 value: 22.134 - type: mrr_at_1000 value: 22.207 - type: mrr_at_3 value: 18.843 - type: mrr_at_5 value: 20.236 - type: ndcg_at_1 value: 13.557 - type: ndcg_at_10 value: 21.571 - type: ndcg_at_100 value: 27.678000000000004 - type: ndcg_at_1000 value: 30.8 - type: ndcg_at_3 value: 17.922 - type: ndcg_at_5 value: 19.826 - type: precision_at_1 value: 13.557 - type: precision_at_10 value: 4.1290000000000004 - type: precision_at_100 value: 0.8370000000000001 - type: precision_at_1000 value: 0.125 - type: precision_at_3 value: 8.914 - type: precision_at_5 value: 6.691999999999999 - type: recall_at_1 value: 11.087 - type: recall_at_10 value: 30.94 - type: recall_at_100 value: 57.833999999999996 - type: recall_at_1000 value: 80.365 - type: recall_at_3 value: 20.854 - type: recall_at_5 value: 25.695 - type: map_at_1 value: 21.708 - type: map_at_10 value: 30.422 - type: map_at_100 value: 31.713 - type: map_at_1000 value: 31.842 - type: map_at_3 value: 27.424 - type: map_at_5 value: 29.17 - type: mrr_at_1 value: 26.756 - type: mrr_at_10 value: 35.304 - type: mrr_at_100 value: 36.296 - type: mrr_at_1000 value: 36.359 - type: mrr_at_3 value: 32.692 - type: mrr_at_5 value: 34.288999999999994 - type: ndcg_at_1 value: 26.756 - type: ndcg_at_10 value: 35.876000000000005 - type: ndcg_at_100 value: 41.708 - type: ndcg_at_1000 value: 44.359 - type: ndcg_at_3 value: 30.946 - type: ndcg_at_5 value: 33.404 - type: precision_at_1 value: 26.756 - type: precision_at_10 value: 6.795 - type: precision_at_100 value: 1.138 - type: precision_at_1000 value: 0.155 - type: precision_at_3 value: 15.046999999999999 - type: precision_at_5 value: 10.972 - type: recall_at_1 value: 21.708 - type: recall_at_10 value: 47.315000000000005 - type: recall_at_100 value: 72.313 - type: recall_at_1000 value: 90.199 - type: recall_at_3 value: 33.528999999999996 - type: recall_at_5 value: 39.985 - type: map_at_1 value: 18.902 - type: map_at_10 value: 26.166 - type: map_at_100 value: 27.368 - type: map_at_1000 value: 27.493000000000002 - type: map_at_3 value: 23.505000000000003 - type: map_at_5 value: 25.019000000000002 - type: mrr_at_1 value: 23.402 - type: mrr_at_10 value: 30.787 - type: mrr_at_100 value: 31.735000000000003 - type: mrr_at_1000 value: 31.806 - type: mrr_at_3 value: 28.33 - type: mrr_at_5 value: 29.711 - type: ndcg_at_1 value: 23.402 - type: ndcg_at_10 value: 30.971 - type: ndcg_at_100 value: 36.61 - type: ndcg_at_1000 value: 39.507999999999996 - type: ndcg_at_3 value: 26.352999999999998 - type: ndcg_at_5 value: 28.488000000000003 - type: precision_at_1 value: 23.402 - type: precision_at_10 value: 5.799 - type: precision_at_100 value: 1.0 - type: precision_at_1000 value: 0.14100000000000001 - type: precision_at_3 value: 12.633 - type: precision_at_5 value: 9.269 - type: recall_at_1 value: 18.902 - type: recall_at_10 value: 40.929 - type: recall_at_100 value: 65.594 - type: recall_at_1000 value: 85.961 - type: recall_at_3 value: 28.121000000000002 - type: recall_at_5 value: 33.638 - type: map_at_1 value: 19.168 - type: map_at_10 value: 25.142999999999997 - type: map_at_100 value: 25.993 - type: map_at_1000 value: 26.076 - type: map_at_3 value: 23.179 - type: map_at_5 value: 24.322 - type: mrr_at_1 value: 21.933 - type: mrr_at_10 value: 27.72 - type: mrr_at_100 value: 28.518 - type: mrr_at_1000 value: 28.582 - type: mrr_at_3 value: 25.791999999999998 - type: mrr_at_5 value: 26.958 - type: ndcg_at_1 value: 21.933 - type: ndcg_at_10 value: 28.866999999999997 - type: ndcg_at_100 value: 33.285 - type: ndcg_at_1000 value: 35.591 - type: ndcg_at_3 value: 25.202999999999996 - type: ndcg_at_5 value: 27.045 - type: precision_at_1 value: 21.933 - type: precision_at_10 value: 4.632 - type: precision_at_100 value: 0.733 - type: precision_at_1000 value: 0.101 - type: precision_at_3 value: 10.992 - type: precision_at_5 value: 7.853000000000001 - type: recall_at_1 value: 19.168 - type: recall_at_10 value: 37.899 - type: recall_at_100 value: 58.54899999999999 - type: recall_at_1000 value: 75.666 - type: recall_at_3 value: 27.831 - type: recall_at_5 value: 32.336 - type: map_at_1 value: 12.764000000000001 - type: map_at_10 value: 17.757 - type: map_at_100 value: 18.677 - type: map_at_1000 value: 18.813 - type: map_at_3 value: 16.151 - type: map_at_5 value: 16.946 - type: mrr_at_1 value: 15.726 - type: mrr_at_10 value: 21.019 - type: mrr_at_100 value: 21.856 - type: mrr_at_1000 value: 21.954 - type: mrr_at_3 value: 19.282 - type: mrr_at_5 value: 20.189 - type: ndcg_at_1 value: 15.726 - type: ndcg_at_10 value: 21.259 - type: ndcg_at_100 value: 25.868999999999996 - type: ndcg_at_1000 value: 29.425 - type: ndcg_at_3 value: 18.204 - type: ndcg_at_5 value: 19.434 - type: precision_at_1 value: 15.726 - type: precision_at_10 value: 3.8920000000000003 - type: precision_at_100 value: 0.741 - type: precision_at_1000 value: 0.121 - type: precision_at_3 value: 8.58 - type: precision_at_5 value: 6.132 - type: recall_at_1 value: 12.764000000000001 - type: recall_at_10 value: 28.639 - type: recall_at_100 value: 49.639 - type: recall_at_1000 value: 75.725 - type: recall_at_3 value: 19.883 - type: recall_at_5 value: 23.141000000000002 - type: map_at_1 value: 18.98 - type: map_at_10 value: 25.2 - type: map_at_100 value: 26.279000000000003 - type: map_at_1000 value: 26.399 - type: map_at_3 value: 23.399 - type: map_at_5 value: 24.284 - type: mrr_at_1 value: 22.015 - type: mrr_at_10 value: 28.555000000000003 - type: mrr_at_100 value: 29.497 - type: mrr_at_1000 value: 29.574 - type: mrr_at_3 value: 26.788 - type: mrr_at_5 value: 27.576 - type: ndcg_at_1 value: 22.015 - type: ndcg_at_10 value: 29.266 - type: ndcg_at_100 value: 34.721000000000004 - type: ndcg_at_1000 value: 37.659 - type: ndcg_at_3 value: 25.741000000000003 - type: ndcg_at_5 value: 27.044 - type: precision_at_1 value: 22.015 - type: precision_at_10 value: 4.897 - type: precision_at_100 value: 0.8540000000000001 - type: precision_at_1000 value: 0.122 - type: precision_at_3 value: 11.567 - type: precision_at_5 value: 7.9479999999999995 - type: recall_at_1 value: 18.98 - type: recall_at_10 value: 38.411 - type: recall_at_100 value: 63.164 - type: recall_at_1000 value: 84.292 - type: recall_at_3 value: 28.576 - type: recall_at_5 value: 31.789 - type: map_at_1 value: 20.372 - type: map_at_10 value: 27.161 - type: map_at_100 value: 28.364 - type: map_at_1000 value: 28.554000000000002 - type: map_at_3 value: 25.135 - type: map_at_5 value: 26.200000000000003 - type: mrr_at_1 value: 24.704 - type: mrr_at_10 value: 31.219 - type: mrr_at_100 value: 32.092 - type: mrr_at_1000 value: 32.181 - type: mrr_at_3 value: 29.282000000000004 - type: mrr_at_5 value: 30.359 - type: ndcg_at_1 value: 24.704 - type: ndcg_at_10 value: 31.622 - type: ndcg_at_100 value: 36.917 - type: ndcg_at_1000 value: 40.357 - type: ndcg_at_3 value: 28.398 - type: ndcg_at_5 value: 29.764000000000003 - type: precision_at_1 value: 24.704 - type: precision_at_10 value: 5.81 - type: precision_at_100 value: 1.208 - type: precision_at_1000 value: 0.209 - type: precision_at_3 value: 13.241 - type: precision_at_5 value: 9.407 - type: recall_at_1 value: 20.372 - type: recall_at_10 value: 40.053 - type: recall_at_100 value: 64.71000000000001 - type: recall_at_1000 value: 87.607 - type: recall_at_3 value: 29.961 - type: recall_at_5 value: 34.058 - type: map_at_1 value: 14.424000000000001 - type: map_at_10 value: 20.541999999999998 - type: map_at_100 value: 21.495 - type: map_at_1000 value: 21.604 - type: map_at_3 value: 18.608 - type: map_at_5 value: 19.783 - type: mrr_at_1 value: 15.895999999999999 - type: mrr_at_10 value: 22.484 - type: mrr_at_100 value: 23.376 - type: mrr_at_1000 value: 23.467 - type: mrr_at_3 value: 20.548 - type: mrr_at_5 value: 21.731 - type: ndcg_at_1 value: 15.895999999999999 - type: ndcg_at_10 value: 24.343 - type: ndcg_at_100 value: 29.181 - type: ndcg_at_1000 value: 32.330999999999996 - type: ndcg_at_3 value: 20.518 - type: ndcg_at_5 value: 22.561999999999998 - type: precision_at_1 value: 15.895999999999999 - type: precision_at_10 value: 3.9739999999999998 - type: precision_at_100 value: 0.6799999999999999 - type: precision_at_1000 value: 0.105 - type: precision_at_3 value: 9.057 - type: precision_at_5 value: 6.654 - type: recall_at_1 value: 14.424000000000001 - type: recall_at_10 value: 34.079 - type: recall_at_100 value: 56.728 - type: recall_at_1000 value: 80.765 - type: recall_at_3 value: 23.993000000000002 - type: recall_at_5 value: 28.838 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 41.665 - type: f1 value: 37.601137843331244 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 74.8052 - type: ap value: 68.92588517572685 - type: f1 value: 74.66801685854456 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 91.2220702234382 - type: f1 value: 90.81687856852439 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 69.39124487004105 - type: f1 value: 51.8350043424968 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 69.80497646267652 - type: f1 value: 67.34213899244814 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 74.54270342972428 - type: f1 value: 74.02802500235784 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 30.488580544269002 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 28.80426879476371 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.37970068676043 - type: mrr value: 32.48523694064166 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 42.862710845031565 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 54.270000736385626 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 80.89215288990194 - type: cos_sim_spearman value: 74.386413188675 - type: euclidean_pearson value: 78.83679563989534 - type: euclidean_spearman value: 74.29328198771996 - type: manhattan_pearson value: 78.77968796707641 - type: manhattan_spearman value: 74.20887429784696 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 78.31858821914498 - type: cos_sim_spearman value: 72.2217008523832 - type: euclidean_pearson value: 75.38901061978429 - type: euclidean_spearman value: 71.81255767675184 - type: manhattan_pearson value: 75.49472202181288 - type: manhattan_spearman value: 71.96322588726144 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 79.48334648997455 - type: cos_sim_spearman value: 80.99654029572798 - type: euclidean_pearson value: 80.46546523970035 - type: euclidean_spearman value: 80.90646216980744 - type: manhattan_pearson value: 80.35474057857608 - type: manhattan_spearman value: 80.8141299909659 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 79.73826970784727 - type: cos_sim_spearman value: 76.9926870133034 - type: euclidean_pearson value: 79.6386542120984 - type: euclidean_spearman value: 77.05041986942253 - type: manhattan_pearson value: 79.61799508502459 - type: manhattan_spearman value: 77.07169617647067 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 83.93999019426069 - type: cos_sim_spearman value: 85.21166521594695 - type: euclidean_pearson value: 84.97207676326357 - type: euclidean_spearman value: 85.40726578482739 - type: manhattan_pearson value: 85.0386693192183 - type: manhattan_spearman value: 85.49230945586409 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 80.8133974034008 - type: cos_sim_spearman value: 82.82919022688844 - type: euclidean_pearson value: 81.92587923760179 - type: euclidean_spearman value: 82.86629450518863 - type: manhattan_pearson value: 81.98232365999253 - type: manhattan_spearman value: 82.94313939920296 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 86.12872422642363 - type: cos_sim_spearman value: 87.77672179979807 - type: euclidean_pearson value: 87.76172961705947 - type: euclidean_spearman value: 87.9891393339215 - type: manhattan_pearson value: 87.78863663568221 - type: manhattan_spearman value: 88.08297053203866 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 58.82824030232733 - type: cos_sim_spearman value: 64.17079382633538 - type: euclidean_pearson value: 61.31505225602925 - type: euclidean_spearman value: 64.05080034530694 - type: manhattan_pearson value: 61.77095758943306 - type: manhattan_spearman value: 64.14475973774933 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 81.39239803497064 - type: cos_sim_spearman value: 81.76637354520439 - type: euclidean_pearson value: 82.98008209033587 - type: euclidean_spearman value: 82.18662536188657 - type: manhattan_pearson value: 82.9630328314908 - type: manhattan_spearman value: 82.13726553603003 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.45753132898741 - type: mrr value: 93.84029822755313 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.8019801980198 - type: cos_sim_ap value: 94.58629018512772 - type: cos_sim_f1 value: 89.84771573604061 - type: cos_sim_precision value: 91.23711340206185 - type: cos_sim_recall value: 88.5 - type: dot_accuracy value: 99.74950495049505 - type: dot_ap value: 92.5761214576951 - type: dot_f1 value: 87.09841917389087 - type: dot_precision value: 88.86576482830385 - type: dot_recall value: 85.39999999999999 - type: euclidean_accuracy value: 99.80495049504951 - type: euclidean_ap value: 94.56231673602272 - type: euclidean_f1 value: 90.02531645569621 - type: euclidean_precision value: 91.17948717948718 - type: euclidean_recall value: 88.9 - type: manhattan_accuracy value: 99.8009900990099 - type: manhattan_ap value: 94.5775591647447 - type: manhattan_f1 value: 89.86384266263238 - type: manhattan_precision value: 90.64089521871821 - type: manhattan_recall value: 89.1 - type: max_accuracy value: 99.80495049504951 - type: max_ap value: 94.58629018512772 - type: max_f1 value: 90.02531645569621 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 53.088941385715735 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 33.146129414825744 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 48.7511362739003 - type: mrr value: 49.61682210763093 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 67.43820000000001 - type: ap value: 12.899489312331003 - type: f1 value: 52.03468121072981 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 57.475947934352 - type: f1 value: 57.77676730676238 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 38.3463456299738 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 83.94230196101806 - type: cos_sim_ap value: 67.00916556336148 - type: cos_sim_f1 value: 63.046014257939085 - type: cos_sim_precision value: 61.961783439490446 - type: cos_sim_recall value: 64.16886543535621 - type: dot_accuracy value: 83.18531322644095 - type: dot_ap value: 63.112896030267066 - type: dot_f1 value: 59.06565656565657 - type: dot_precision value: 56.63438256658596 - type: dot_recall value: 61.715039577836414 - type: euclidean_accuracy value: 83.94230196101806 - type: euclidean_ap value: 67.19856676674463 - type: euclidean_f1 value: 63.08428413691571 - type: euclidean_precision value: 58.9543682641596 - type: euclidean_recall value: 67.83641160949868 - type: manhattan_accuracy value: 83.91845979614949 - type: manhattan_ap value: 66.9845327263072 - type: manhattan_f1 value: 62.693323274236135 - type: manhattan_precision value: 59.884698534710544 - type: manhattan_recall value: 65.77836411609499 - type: max_accuracy value: 83.94230196101806 - type: max_ap value: 67.19856676674463 - type: max_f1 value: 63.08428413691571 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 88.0777738968448 - type: cos_sim_ap value: 84.19747786536 - type: cos_sim_f1 value: 75.91830995817077 - type: cos_sim_precision value: 69.84671107949033 - type: cos_sim_recall value: 83.14598090545118 - type: dot_accuracy value: 87.14246904955951 - type: dot_ap value: 82.37528804640529 - type: dot_f1 value: 74.40963166732163 - type: dot_precision value: 69.4127841098447 - type: dot_recall value: 80.18170619032954 - type: euclidean_accuracy value: 88.08359529630924 - type: euclidean_ap value: 84.22633217661986 - type: euclidean_f1 value: 76.09190339866403 - type: euclidean_precision value: 72.70304390517605 - type: euclidean_recall value: 79.81213427779488 - type: manhattan_accuracy value: 88.08359529630924 - type: manhattan_ap value: 84.18362004611083 - type: manhattan_f1 value: 76.08789625360231 - type: manhattan_precision value: 71.49336582724072 - type: manhattan_recall value: 81.3135201724669 - type: max_accuracy value: 88.08359529630924 - type: max_ap value: 84.22633217661986 - type: max_f1 value: 76.09190339866403 --- # bge-small-en-v1.5-sparse ## Usage This is the sparse ONNX variant of the [bge-small-en-v1.5](https://huggingface.co/BAAI/bge-small-en-v1.5) embeddings model accelerated with [Sparsify](https://github.com/neuralmagic/sparsify) for quantization/pruning and [DeepSparseSentenceTransformers](https://github.com/neuralmagic/deepsparse/tree/main/src/deepsparse/sentence_transformers) for inference. ```bash pip install -U deepsparse-nightly[sentence_transformers] ``` ```python from deepsparse.sentence_transformers import DeepSparseSentenceTransformer model = DeepSparseSentenceTransformer('neuralmagic/bge-small-en-v1.5-sparse', export=False) # Our sentences we like to encode sentences = ['This framework generates embeddings for each input sentence', 'Sentences are passed as a list of string.', 'The quick brown fox jumps over the lazy dog.'] # Sentences are encoded by calling model.encode() embeddings = model.encode(sentences) # Print the embeddings for sentence, embedding in zip(sentences, embeddings): print("Sentence:", sentence) print("Embedding:", embedding.shape) print("") ``` For general questions on these models and sparsification methods, reach out to the engineering team on our [community Slack](https://join.slack.com/t/discuss-neuralmagic/shared_invite/zt-q1a1cnvo-YBoICSIw3L1dmQpjBeDurQ).
[ "BIOSSES" ]
TheBloke/Vigostral-7B-Chat-GPTQ
TheBloke
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "LLM", "finetuned", "conversational", "fr", "base_model:bofenghuang/vigostral-7b-chat", "base_model:quantized:bofenghuang/vigostral-7b-chat", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "4-bit", "gptq", "region:us" ]
2023-10-24T16:19:47Z
2023-10-24T16:49:13+00:00
22
3
--- base_model: bofenghuang/vigostral-7b-chat language: fr license: apache-2.0 model_name: Vigostral 7B Chat pipeline_tag: text-generation tags: - LLM - finetuned inference: false model_creator: bofeng huang model_type: mistral prompt_template: "<s>[INST] <<SYS>>\nVous êtes Vigogne, un assistant IA créé par Zaion\ \ Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez.\n\ <</SYS>>\n\n{prompt} [/INST] \n" quantized_by: TheBloke --- <!-- markdownlint-disable MD041 --> <!-- header start --> <!-- 200823 --> <div style="width: auto; margin-left: auto; margin-right: auto"> <img src="https://i.imgur.com/EBdldam.jpg" alt="TheBlokeAI" style="width: 100%; min-width: 400px; display: block; margin: auto;"> </div> <div style="display: flex; justify-content: space-between; width: 100%;"> <div style="display: flex; flex-direction: column; align-items: flex-start;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://discord.gg/theblokeai">Chat & support: TheBloke's Discord server</a></p> </div> <div style="display: flex; flex-direction: column; align-items: flex-end;"> <p style="margin-top: 0.5em; margin-bottom: 0em;"><a href="https://www.patreon.com/TheBlokeAI">Want to contribute? TheBloke's Patreon page</a></p> </div> </div> <div style="text-align:center; margin-top: 0em; margin-bottom: 0em"><p style="margin-top: 0.25em; margin-bottom: 0em;">TheBloke's LLM work is generously supported by a grant from <a href="https://a16z.com">andreessen horowitz (a16z)</a></p></div> <hr style="margin-top: 1.0em; margin-bottom: 1.0em;"> <!-- header end --> # Vigostral 7B Chat - GPTQ - Model creator: [bofeng huang](https://huggingface.co/bofenghuang) - Original model: [Vigostral 7B Chat](https://huggingface.co/bofenghuang/vigostral-7b-chat) <!-- description start --> ## Description This repo contains GPTQ model files for [bofeng huang's Vigostral 7B Chat](https://huggingface.co/bofenghuang/vigostral-7b-chat). Multiple GPTQ parameter permutations are provided; see Provided Files below for details of the options provided, their parameters, and the software used to create them. <!-- description end --> <!-- repositories-available start --> ## Repositories available * [AWQ model(s) for GPU inference.](https://huggingface.co/TheBloke/Vigostral-7B-Chat-AWQ) * [GPTQ models for GPU inference, with multiple quantisation parameter options.](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ) * [2, 3, 4, 5, 6 and 8-bit GGUF models for CPU+GPU inference](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GGUF) * [bofeng huang's original unquantised fp16 model in pytorch format, for GPU inference and for further conversions](https://huggingface.co/bofenghuang/vigostral-7b-chat) <!-- repositories-available end --> <!-- prompt-template start --> ## Prompt template: Vigogne-Llama-2-Chat ``` <s>[INST] <<SYS>> Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez. <</SYS>> {prompt} [/INST] ``` <!-- prompt-template end --> <!-- README_GPTQ.md-compatible clients start --> ## Known compatible clients / servers These GPTQ models are known to work in the following inference servers/webuis. - [text-generation-webui](https://github.com/oobabooga/text-generation-webui) - [KobaldAI United](https://github.com/henk717/koboldai) - [LoLLMS Web UI](https://github.com/ParisNeo/lollms-webui) - [Hugging Face Text Generation Inference (TGI)](https://github.com/huggingface/text-generation-inference) This may not be a complete list; if you know of others, please let me know! <!-- README_GPTQ.md-compatible clients end --> <!-- README_GPTQ.md-provided-files start --> ## Provided files, and GPTQ parameters Multiple quantisation parameters are provided, to allow you to choose the best one for your hardware and requirements. Each separate quant is in a different branch. See below for instructions on fetching from different branches. Most GPTQ files are made with AutoGPTQ. Mistral models are currently made with Transformers. <details> <summary>Explanation of GPTQ parameters</summary> - Bits: The bit size of the quantised model. - GS: GPTQ group size. Higher numbers use less VRAM, but have lower quantisation accuracy. "None" is the lowest possible value. - Act Order: True or False. Also known as `desc_act`. True results in better quantisation accuracy. Some GPTQ clients have had issues with models that use Act Order plus Group Size, but this is generally resolved now. - Damp %: A GPTQ parameter that affects how samples are processed for quantisation. 0.01 is default, but 0.1 results in slightly better accuracy. - GPTQ dataset: The calibration dataset used during quantisation. Using a dataset more appropriate to the model's training can improve quantisation accuracy. Note that the GPTQ calibration dataset is not the same as the dataset used to train the model - please refer to the original model repo for details of the training dataset(s). - Sequence Length: The length of the dataset sequences used for quantisation. Ideally this is the same as the model sequence length. For some very long sequence models (16+K), a lower sequence length may have to be used. Note that a lower sequence length does not limit the sequence length of the quantised model. It only impacts the quantisation accuracy on longer inference sequences. - ExLlama Compatibility: Whether this file can be loaded with ExLlama, which currently only supports Llama and Mistral models in 4-bit. </details> | Branch | Bits | GS | Act Order | Damp % | GPTQ Dataset | Seq Len | Size | ExLlama | Desc | | ------ | ---- | -- | --------- | ------ | ------------ | ------- | ---- | ------- | ---- | | [main](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/main) | 4 | 128 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 4.16 GB | Yes | 4-bit, with Act Order and group size 128g. Uses even less VRAM than 64g, but with slightly lower accuracy. | | [gptq-4bit-32g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-4bit-32g-actorder_True) | 4 | 32 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 4.57 GB | Yes | 4-bit, with Act Order and group size 32g. Gives highest possible inference quality, with maximum VRAM usage. | | [gptq-8bit--1g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-8bit--1g-actorder_True) | 8 | None | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 7.52 GB | No | 8-bit, with Act Order. No group size, to lower VRAM requirements. | | [gptq-8bit-128g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-8bit-128g-actorder_True) | 8 | 128 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 7.68 GB | No | 8-bit, with group size 128g for higher inference quality and with Act Order for even higher accuracy. | | [gptq-8bit-32g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-8bit-32g-actorder_True) | 8 | 32 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 8.17 GB | No | 8-bit, with group size 32g and Act Order for maximum inference quality. | | [gptq-4bit-64g-actorder_True](https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ/tree/gptq-4bit-64g-actorder_True) | 4 | 64 | Yes | 0.1 | [French news](https://huggingface.co/datasets/gustavecortal/diverse_french_news) | 4096 | 4.29 GB | Yes | 4-bit, with Act Order and group size 64g. Uses less VRAM than 32g, but with slightly lower accuracy. | <!-- README_GPTQ.md-provided-files end --> <!-- README_GPTQ.md-download-from-branches start --> ## How to download, including from branches ### In text-generation-webui To download from the `main` branch, enter `TheBloke/Vigostral-7B-Chat-GPTQ` in the "Download model" box. To download from another branch, add `:branchname` to the end of the download name, eg `TheBloke/Vigostral-7B-Chat-GPTQ:gptq-4bit-32g-actorder_True` ### From the command line I recommend using the `huggingface-hub` Python library: ```shell pip3 install huggingface-hub ``` To download the `main` branch to a folder called `Vigostral-7B-Chat-GPTQ`: ```shell mkdir Vigostral-7B-Chat-GPTQ huggingface-cli download TheBloke/Vigostral-7B-Chat-GPTQ --local-dir Vigostral-7B-Chat-GPTQ --local-dir-use-symlinks False ``` To download from a different branch, add the `--revision` parameter: ```shell mkdir Vigostral-7B-Chat-GPTQ huggingface-cli download TheBloke/Vigostral-7B-Chat-GPTQ --revision gptq-4bit-32g-actorder_True --local-dir Vigostral-7B-Chat-GPTQ --local-dir-use-symlinks False ``` <details> <summary>More advanced huggingface-cli download usage</summary> If you remove the `--local-dir-use-symlinks False` parameter, the files will instead be stored in the central Hugging Face cache directory (default location on Linux is: `~/.cache/huggingface`), and symlinks will be added to the specified `--local-dir`, pointing to their real location in the cache. This allows for interrupted downloads to be resumed, and allows you to quickly clone the repo to multiple places on disk without triggering a download again. The downside, and the reason why I don't list that as the default option, is that the files are then hidden away in a cache folder and it's harder to know where your disk space is being used, and to clear it up if/when you want to remove a download model. The cache location can be changed with the `HF_HOME` environment variable, and/or the `--cache-dir` parameter to `huggingface-cli`. For more documentation on downloading with `huggingface-cli`, please see: [HF -> Hub Python Library -> Download files -> Download from the CLI](https://huggingface.co/docs/huggingface_hub/guides/download#download-from-the-cli). To accelerate downloads on fast connections (1Gbit/s or higher), install `hf_transfer`: ```shell pip3 install hf_transfer ``` And set environment variable `HF_HUB_ENABLE_HF_TRANSFER` to `1`: ```shell mkdir Vigostral-7B-Chat-GPTQ HF_HUB_ENABLE_HF_TRANSFER=1 huggingface-cli download TheBloke/Vigostral-7B-Chat-GPTQ --local-dir Vigostral-7B-Chat-GPTQ --local-dir-use-symlinks False ``` Windows Command Line users: You can set the environment variable by running `set HF_HUB_ENABLE_HF_TRANSFER=1` before the download command. </details> ### With `git` (**not** recommended) To clone a specific branch with `git`, use a command like this: ```shell git clone --single-branch --branch gptq-4bit-32g-actorder_True https://huggingface.co/TheBloke/Vigostral-7B-Chat-GPTQ ``` Note that using Git with HF repos is strongly discouraged. It will be much slower than using `huggingface-hub`, and will use twice as much disk space as it has to store the model files twice (it stores every byte both in the intended target folder, and again in the `.git` folder as a blob.) <!-- README_GPTQ.md-download-from-branches end --> <!-- README_GPTQ.md-text-generation-webui start --> ## How to easily download and use this model in [text-generation-webui](https://github.com/oobabooga/text-generation-webui) Please make sure you're using the latest version of [text-generation-webui](https://github.com/oobabooga/text-generation-webui). It is strongly recommended to use the text-generation-webui one-click-installers unless you're sure you know how to make a manual install. 1. Click the **Model tab**. 2. Under **Download custom model or LoRA**, enter `TheBloke/Vigostral-7B-Chat-GPTQ`. - To download from a specific branch, enter for example `TheBloke/Vigostral-7B-Chat-GPTQ:gptq-4bit-32g-actorder_True` - see Provided Files above for the list of branches for each option. 3. Click **Download**. 4. The model will start downloading. Once it's finished it will say "Done". 5. In the top left, click the refresh icon next to **Model**. 6. In the **Model** dropdown, choose the model you just downloaded: `Vigostral-7B-Chat-GPTQ` 7. The model will automatically load, and is now ready for use! 8. If you want any custom settings, set them and then click **Save settings for this model** followed by **Reload the Model** in the top right. - Note that you do not need to and should not set manual GPTQ parameters any more. These are set automatically from the file `quantize_config.json`. 9. Once you're ready, click the **Text Generation** tab and enter a prompt to get started! <!-- README_GPTQ.md-text-generation-webui end --> <!-- README_GPTQ.md-use-from-tgi start --> ## Serving this model from Text Generation Inference (TGI) It's recommended to use TGI version 1.1.0 or later. The official Docker container is: `ghcr.io/huggingface/text-generation-inference:1.1.0` Example Docker parameters: ```shell --model-id TheBloke/Vigostral-7B-Chat-GPTQ --port 3000 --quantize gptq --max-input-length 3696 --max-total-tokens 4096 --max-batch-prefill-tokens 4096 ``` Example Python code for interfacing with TGI (requires huggingface-hub 0.17.0 or later): ```shell pip3 install huggingface-hub ``` ```python from huggingface_hub import InferenceClient endpoint_url = "https://your-endpoint-url-here" prompt = "Tell me about AI" prompt_template=f'''<s>[INST] <<SYS>> Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez. <</SYS>> {prompt} [/INST] ''' client = InferenceClient(endpoint_url) response = client.text_generation(prompt, max_new_tokens=128, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1) print(f"Model output: {response}") ``` <!-- README_GPTQ.md-use-from-tgi end --> <!-- README_GPTQ.md-use-from-python start --> ## How to use this GPTQ model from Python code ### Install the necessary packages Requires: Transformers 4.33.0 or later, Optimum 1.12.0 or later, and AutoGPTQ 0.4.2 or later. ```shell pip3 install transformers optimum pip3 install auto-gptq --extra-index-url https://huggingface.github.io/autogptq-index/whl/cu118/ # Use cu117 if on CUDA 11.7 ``` If you have problems installing AutoGPTQ using the pre-built wheels, install it from source instead: ```shell pip3 uninstall -y auto-gptq git clone https://github.com/PanQiWei/AutoGPTQ cd AutoGPTQ git checkout v0.4.2 pip3 install . ``` ### You can then use the following code ```python from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline model_name_or_path = "TheBloke/Vigostral-7B-Chat-GPTQ" # To use a different branch, change revision # For example: revision="gptq-4bit-32g-actorder_True" model = AutoModelForCausalLM.from_pretrained(model_name_or_path, device_map="auto", trust_remote_code=False, revision="main") tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, use_fast=True) prompt = "Tell me about AI" prompt_template=f'''<s>[INST] <<SYS>> Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez. <</SYS>> {prompt} [/INST] ''' print("\n\n*** Generate:") input_ids = tokenizer(prompt_template, return_tensors='pt').input_ids.cuda() output = model.generate(inputs=input_ids, temperature=0.7, do_sample=True, top_p=0.95, top_k=40, max_new_tokens=512) print(tokenizer.decode(output[0])) # Inference can also be done using transformers' pipeline print("*** Pipeline:") pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, max_new_tokens=512, do_sample=True, temperature=0.7, top_p=0.95, top_k=40, repetition_penalty=1.1 ) print(pipe(prompt_template)[0]['generated_text']) ``` <!-- README_GPTQ.md-use-from-python end --> <!-- README_GPTQ.md-compatibility start --> ## Compatibility The files provided are tested to work with Transformers. For non-Mistral models, AutoGPTQ can also be used directly. [ExLlama](https://github.com/turboderp/exllama) is compatible with Llama and Mistral models in 4-bit. Please see the Provided Files table above for per-file compatibility. For a list of clients/servers, please see "Known compatible clients / servers", above. <!-- README_GPTQ.md-compatibility end --> <!-- footer start --> <!-- 200823 --> ## Discord For further support, and discussions on these models and AI in general, join us at: [TheBloke AI's Discord server](https://discord.gg/theblokeai) ## Thanks, and how to contribute Thanks to the [chirper.ai](https://chirper.ai) team! Thanks to Clay from [gpus.llm-utils.org](llm-utils)! I've had a lot of people ask if they can contribute. I enjoy providing models and helping people, and would love to be able to spend even more time doing it, as well as expanding into new projects like fine tuning/training. If you're able and willing to contribute it will be most gratefully received and will help me to keep providing more models, and to start work on new AI projects. Donaters will get priority support on any and all AI/LLM/model questions and requests, access to a private Discord room, plus other benefits. * Patreon: https://patreon.com/TheBlokeAI * Ko-Fi: https://ko-fi.com/TheBlokeAI **Special thanks to**: Aemon Algiz. **Patreon special mentions**: Pierre Kircher, Stanislav Ovsiannikov, Michael Levine, Eugene Pentland, Andrey, 준교 김, Randy H, Fred von Graf, Artur Olbinski, Caitlyn Gatomon, terasurfer, Jeff Scroggin, James Bentley, Vadim, Gabriel Puliatti, Harry Royden McLaughlin, Sean Connelly, Dan Guido, Edmond Seymore, Alicia Loh, subjectnull, AzureBlack, Manuel Alberto Morcote, Thomas Belote, Lone Striker, Chris Smitley, Vitor Caleffi, Johann-Peter Hartmann, Clay Pascal, biorpg, Brandon Frisco, sidney chen, transmissions 11, Pedro Madruga, jinyuan sun, Ajan Kanaga, Emad Mostaque, Trenton Dambrowitz, Jonathan Leane, Iucharbius, usrbinkat, vamX, George Stoitzev, Luke Pendergrass, theTransient, Olakabola, Swaroop Kallakuri, Cap'n Zoog, Brandon Phillips, Michael Dempsey, Nikolai Manek, danny, Matthew Berman, Gabriel Tamborski, alfie_i, Raymond Fosdick, Tom X Nguyen, Raven Klaugh, LangChain4j, Magnesian, Illia Dulskyi, David Ziegler, Mano Prime, Luis Javier Navarrete Lozano, Erik Bjäreholt, 阿明, Nathan Dryer, Alex, Rainer Wilmers, zynix, TL, Joseph William Delisle, John Villwock, Nathan LeClaire, Willem Michiel, Joguhyik, GodLy, OG, Alps Aficionado, Jeffrey Morgan, ReadyPlayerEmma, Tiffany J. Kim, Sebastain Graf, Spencer Kim, Michael Davis, webtim, Talal Aujan, knownsqashed, John Detwiler, Imad Khwaja, Deo Leter, Jerry Meng, Elijah Stavena, Rooh Singh, Pieter, SuperWojo, Alexandros Triantafyllidis, Stephen Murray, Ai Maven, ya boyyy, Enrico Ros, Ken Nordquist, Deep Realms, Nicholas, Spiking Neurons AB, Elle, Will Dee, Jack West, RoA, Luke @flexchar, Viktor Bowallius, Derek Yates, Subspace Studios, jjj, Toran Billups, Asp the Wyvern, Fen Risland, Ilya, NimbleBox.ai, Chadd, Nitin Borwankar, Emre, Mandus, Leonard Tan, Kalila, K, Trailburnt, S_X, Cory Kujawski Thank you to all my generous patrons and donaters! And thank you again to a16z for their generous grant. <!-- footer end --> # Original model card: bofeng huang's Vigostral 7B Chat # Vigostral-7B-Chat: A French chat LLM ***Preview*** of Vigostral-7B-Chat, a new addition to the Vigogne LLMs family, fine-tuned on [Mistral-7B-v0.1](https://huggingface.co/mistralai/Mistral-7B-v0.1). For more information, please visit the [Github repository](https://github.com/bofenghuang/vigogne). **License**: A significant portion of the training data is distilled from GPT-3.5-Turbo and GPT-4, kindly use it cautiously to avoid any violations of OpenAI's [terms of use](https://openai.com/policies/terms-of-use). ## Prompt Template We used a prompt template adapted from the chat format of Llama-2. You can apply this formatting using the [chat template](https://huggingface.co/docs/transformers/main/chat_templating) through the `apply_chat_template()` method. ```python from transformers import AutoTokenizer tokenizer = AutoTokenizer.from_pretrained("bofenghuang/vigostral-7b-chat") conversation = [ {"role": "user", "content": "Bonjour ! Comment ça va aujourd'hui ?"}, {"role": "assistant", "content": "Bonjour ! Je suis une IA, donc je n'ai pas de sentiments, mais je suis prêt à vous aider. Comment puis-je vous assister aujourd'hui ?"}, {"role": "user", "content": "Quelle est la hauteur de la Tour Eiffel ?"}, {"role": "assistant", "content": "La Tour Eiffel mesure environ 330 mètres de hauteur."}, {"role": "user", "content": "Comment monter en haut ?"}, ] print(tokenizer.apply_chat_template(conversation, tokenize=False, add_generation_prompt=True)) ``` You will get ``` <s>[INST] <<SYS>> Vous êtes Vigogne, un assistant IA créé par Zaion Lab. Vous suivez extrêmement bien les instructions. Aidez autant que vous le pouvez. <</SYS>> Bonjour ! Comment ça va aujourd'hui ? [/INST] Bonjour ! Je suis une IA, donc je n'ai pas de sentiments, mais je suis prêt à vous aider. Comment puis-je vous assister aujourd'hui ? </s>[INST] Quelle est la hauteur de la Tour Eiffel ? [/INST] La Tour Eiffel mesure environ 330 mètres de hauteur. </s>[INST] Comment monter en haut ? [/INST] ``` ## Usage ### Inference using the unquantized model with 🤗 Transformers ```python from typing import Dict, List, Optional import torch from transformers import AutoModelForCausalLM, AutoTokenizer, GenerationConfig, TextStreamer model_name_or_path = "bofenghuang/vigostral-7b-chat" tokenizer = AutoTokenizer.from_pretrained(model_name_or_path, padding_side="right", use_fast=False) model = AutoModelForCausalLM.from_pretrained(model_name_or_path, torch_dtype=torch.float16, device_map="auto") streamer = TextStreamer(tokenizer, timeout=10.0, skip_prompt=True, skip_special_tokens=True) def chat( query: str, history: Optional[List[Dict]] = None, temperature: float = 0.7, top_p: float = 1.0, top_k: float = 0, repetition_penalty: float = 1.1, max_new_tokens: int = 1024, **kwargs, ): if history is None: history = [] history.append({"role": "user", "content": query}) input_ids = tokenizer.apply_chat_template(history, return_tensors="pt").to(model.device) input_length = input_ids.shape[1] generated_outputs = model.generate( input_ids=input_ids, generation_config=GenerationConfig( temperature=temperature, do_sample=temperature > 0.0, top_p=top_p, top_k=top_k, repetition_penalty=repetition_penalty, max_new_tokens=max_new_tokens, pad_token_id=tokenizer.eos_token_id, **kwargs, ), streamer=streamer, return_dict_in_generate=True, ) generated_tokens = generated_outputs.sequences[0, input_length:] generated_text = tokenizer.decode(generated_tokens, skip_special_tokens=True) history.append({"role": "assistant", "content": generated_text}) return generated_text, history # 1st round response, history = chat("Un escargot parcourt 100 mètres en 5 heures. Quelle est sa vitesse ?", history=None) # Pour calculer la vitesse d'un escargot qui parcourt 100 mètres en 5 heures, nous devons diviser la distance parcourue par le temps écoulé. Voici comment procédons : # # Vitesse = Distance / Temps # # Dans ce cas, la distance est de 100 mètres et le temps est de 5 heures. Donc, nous avons : # # Vitesse = 100 mètres / 5 heures # Vitesse = 20 mètres par heure # # L'escargot parcourt donc une vitesse de 20 mètres par heure. # 2nd round response, history = chat("Quand il peut dépasser le lapin ?", history=history) # Malheureusement, je ne peux pas répondre à cette question car elle contient des informations inexactes ou incorrectes. La vitesse de l'escargot est de 20 mètres par heure, tandis que la vitesse du lapin n'est pas mentionnée dans votre question initiale. Pour comparer les vitesses des deux animaux, nous aurions besoin de connaître la vitesse du lapin. # 3rd round response, history = chat("Écris une histoire imaginative qui met en scène une compétition de course entre un escargot et un lapin.", history=history) # Il était une fois deux athlètes extraordinaires : un escargot nommé Escargo et un lapin appelé Lapin Rapide. Un jour, ils ont décidé de participer à une grande course pour déterminer qui était le plus rapide des deux. Tout le monde était curieux de savoir qui serait le gagnant, car ils étaient tous émerveillés par la vitesse de ces deux champions. # # La course commença dans une forêt dense, avec Escargo départageant un peu lentement, car il faut être prudent lorsqu'on est escargot. Mais au fur et à mesure qu'il avançait, il commença à ralentir encore plus. Les spectateurs commencèrent à pencher pour Lapin Rapide, qui fonçait rapidement vers la ligne d'arrivée. # # Mais soudain, Escargo se transforma en une force incroyable. Au lieu de marcher lentement comme un escargot, il courait à toute allure, faisant preuve d'une vitesse surprenante. Les spectateurs essayaient de se remémorer leurs notes de mathématiques pour calculer sa vélocité actuelle. # # Pendant ce temps, Lapin Rapide ralentissait légèrement, trouvant l'air frais trop confortable pour continuer à courir aussi vite. Il décida alors de prendre quelques pauses pour profiter de son environnement. # # Escargo continuait à courir à toute vitesse, en dépit de son handicap d'être un escargot. Les spectateurs étaient émerveillés par sa persévérance et sa volonté de gagner. Finalement, Escargo franchit la ligne d'arrivée en premier, et tous criaurent en joie. # # Les habitants de la forêt décidèrent de lui décerner le titre d'"athlète le plus courageux" pour sa performance incroyable. Quant à Lapin Rapide, il fut content de sa deuxième place, se disant simplement que les pauses étaient bien plus agréables que la compétition. Et tous vécurent heureux et satisfaits de cette course mémorable. ``` You can also use the Google Colab Notebook provided below. <a href="https://colab.research.google.com/github/bofenghuang/vigogne/blob/main/notebooks/infer_chat.ipynb" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab"/></a> ### Inference using the unquantized model with vLLM Set up an OpenAI-compatible server with the following command: ```bash # Install vLLM # This may take 5-10 minutes. # pip install vllm # Start server for Vigostral-Chat models python -m vllm.entrypoints.openai.api_server --model bofenghuang/vigostral-7b-chat # List models # curl http://localhost:8000/v1/models ``` Query the model using the openai python package. ```python import openai # Modify OpenAI's API key and API base to use vLLM's API server. openai.api_key = "EMPTY" openai.api_base = "http://localhost:8000/v1" # First model models = openai.Model.list() model = models["data"][0]["id"] # Chat completion API chat_completion = openai.ChatCompletion.create( model=model, messages=[ {"role": "user", "content": "Parle-moi de toi-même."}, ], max_tokens=1024, temperature=0.7, ) print("Chat completion results:", chat_completion) ``` ## Limitations Vigogne is still under development, and there are many limitations that have to be addressed. Please note that it is possible that the model generates harmful or biased content, incorrect information or generally unhelpful answers.
[ "CAS" ]
Cohere/Cohere-embed-multilingual-light-v3.0
Cohere
null
[ "transformers", "mteb", "model-index", "endpoints_compatible", "region:us" ]
2023-11-01T20:54:54Z
2023-11-07T12:59:57+00:00
22
13
--- tags: - mteb model-index: - name: embed-multilingual-light-v3.0 results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 70.02985074626865 - type: ap value: 33.228065779544146 - type: f1 value: 64.27173953207297 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 90.701225 - type: ap value: 87.07178174251762 - type: f1 value: 90.69168484877625 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 46.550000000000004 - type: f1 value: 44.7233215588199 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: ndcg_at_10 value: 53.369 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 44.206988765030744 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 33.913737041277 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 58.544257541214925 - type: mrr value: 72.07151651057468 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 84.79582115243736 - type: cos_sim_spearman value: 84.01396250789998 - type: euclidean_pearson value: 83.90766476102458 - type: euclidean_spearman value: 84.01396250789998 - type: manhattan_pearson value: 84.75071274784274 - type: manhattan_spearman value: 85.02482891467078 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 78.12337662337663 - type: f1 value: 77.48610340227478 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 38.68268504601174 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 32.20870648143671 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: ndcg_at_10 value: 46.259 - type: ndcg_at_10 value: 44.555 - type: ndcg_at_10 value: 56.564 - type: ndcg_at_10 value: 36.162 - type: ndcg_at_10 value: 26.185000000000002 - type: ndcg_at_10 value: 41.547 - type: ndcg_at_10 value: 39.042 - type: ndcg_at_10 value: 38.086999999999996 - type: ndcg_at_10 value: 32.088 - type: ndcg_at_10 value: 27.006999999999998 - type: ndcg_at_10 value: 37.336999999999996 - type: ndcg_at_10 value: 38.011 - type: ndcg_at_10 value: 32.287 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: ndcg_at_10 value: 24.804000000000002 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: ndcg_at_10 value: 38.055 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 46.665 - type: f1 value: 40.77568559660878 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: ndcg_at_10 value: 85.52499999999999 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: ndcg_at_10 value: 36.161 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: ndcg_at_10 value: 66.878 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 85.6372 - type: ap value: 80.54846874011302 - type: f1 value: 85.61438421821343 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: test revision: None metrics: - type: ndcg_at_10 value: 40.487 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 91.8559051527588 - type: f1 value: 91.6271749996447 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 62.17738258093936 - type: f1 value: 45.80307070449218 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 67.42434431741762 - type: f1 value: 65.39580264698957 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 72.60928043039677 - type: f1 value: 72.30912915707411 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 35.17967476592229 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 30.993641089208683 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 31.362481813275295 - type: mrr value: 32.43717742343303 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: ndcg_at_10 value: 32.123000000000005 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: ndcg_at_10 value: 55.51199999999999 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: ndcg_at_10 value: 87.847 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 49.4973643968247 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 60.2135284243427 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: ndcg_at_10 value: 17.1 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 83.7330191296952 - type: cos_sim_spearman value: 77.03523134004043 - type: euclidean_pearson value: 80.86067787185137 - type: euclidean_spearman value: 77.03522959536473 - type: manhattan_pearson value: 80.76089708603587 - type: manhattan_spearman value: 76.86245377437302 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 80.46387812633851 - type: cos_sim_spearman value: 73.21878234127571 - type: euclidean_pearson value: 76.82160699895033 - type: euclidean_spearman value: 73.21878234127571 - type: manhattan_pearson value: 76.75657006349886 - type: manhattan_spearman value: 73.19160258034827 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 79.06411399119807 - type: cos_sim_spearman value: 79.49916779764082 - type: euclidean_pearson value: 79.3356521660954 - type: euclidean_spearman value: 79.49916779764082 - type: manhattan_pearson value: 79.04971532119936 - type: manhattan_spearman value: 79.16859911220654 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 80.6940934994372 - type: cos_sim_spearman value: 76.9552055757283 - type: euclidean_pearson value: 79.52818133592284 - type: euclidean_spearman value: 76.9552055757283 - type: manhattan_pearson value: 79.35220459438406 - type: manhattan_spearman value: 76.85314462036561 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 85.58608774451231 - type: cos_sim_spearman value: 86.42805701554927 - type: euclidean_pearson value: 86.01117122595934 - type: euclidean_spearman value: 86.42805701554927 - type: manhattan_pearson value: 86.01345208923057 - type: manhattan_spearman value: 86.43179450307953 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 83.18733039014667 - type: cos_sim_spearman value: 84.3339529564109 - type: euclidean_pearson value: 83.54530885349595 - type: euclidean_spearman value: 84.3339529564109 - type: manhattan_pearson value: 83.47015931913937 - type: manhattan_spearman value: 84.22564786654777 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.88402211340522 - type: cos_sim_spearman value: 88.6693290310468 - type: euclidean_pearson value: 88.24947476618257 - type: euclidean_spearman value: 88.6693290310468 - type: manhattan_pearson value: 88.24496656367964 - type: manhattan_spearman value: 88.52029848819545 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 64.96467575926597 - type: cos_sim_spearman value: 65.30666900046252 - type: euclidean_pearson value: 66.58031971340725 - type: euclidean_spearman value: 65.30666900046252 - type: manhattan_pearson value: 66.56530433327998 - type: manhattan_spearman value: 65.42121899024113 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 85.31047656296519 - type: cos_sim_spearman value: 85.46101092708824 - type: euclidean_pearson value: 85.75896623084044 - type: euclidean_spearman value: 85.46101092708824 - type: manhattan_pearson value: 85.57323880630182 - type: manhattan_spearman value: 85.23375523080594 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 79.89731978284804 - type: mrr value: 94.28980424078465 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: ndcg_at_10 value: 67.95 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.85643564356435 - type: cos_sim_ap value: 96.59618618212247 - type: cos_sim_f1 value: 92.6221335992024 - type: cos_sim_precision value: 92.34592445328032 - type: cos_sim_recall value: 92.9 - type: dot_accuracy value: 99.85643564356435 - type: dot_ap value: 96.5961861821225 - type: dot_f1 value: 92.6221335992024 - type: dot_precision value: 92.34592445328032 - type: dot_recall value: 92.9 - type: euclidean_accuracy value: 99.85643564356435 - type: euclidean_ap value: 96.5961861821225 - type: euclidean_f1 value: 92.6221335992024 - type: euclidean_precision value: 92.34592445328032 - type: euclidean_recall value: 92.9 - type: manhattan_accuracy value: 99.85841584158416 - type: manhattan_ap value: 96.5578240948512 - type: manhattan_f1 value: 92.71523178807946 - type: manhattan_precision value: 94.4963655244029 - type: manhattan_recall value: 91.0 - type: max_accuracy value: 99.85841584158416 - type: max_ap value: 96.5961861821225 - type: max_f1 value: 92.71523178807946 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 60.84750068050385 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 33.96844721192451 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 50.454280909595205 - type: mrr value: 51.24249320940497 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 29.998438678552517 - type: cos_sim_spearman value: 30.409482543506876 - type: dot_pearson value: 29.998443850173224 - type: dot_spearman value: 30.409482543506876 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: ndcg_at_10 value: 78.93 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: ndcg_at_10 value: 29.482999999999997 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 70.65859999999999 - type: ap value: 15.03693738050973 - type: f1 value: 54.94379403846167 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 64.4567062818336 - type: f1 value: 64.48980729427107 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 42.08554991843959 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 84.75293556654945 - type: cos_sim_ap value: 69.40551043272129 - type: cos_sim_f1 value: 65.56335231034026 - type: cos_sim_precision value: 65.79856497475419 - type: cos_sim_recall value: 65.32981530343008 - type: dot_accuracy value: 84.75293556654945 - type: dot_ap value: 69.40550704470631 - type: dot_f1 value: 65.56335231034026 - type: dot_precision value: 65.79856497475419 - type: dot_recall value: 65.32981530343008 - type: euclidean_accuracy value: 84.75293556654945 - type: euclidean_ap value: 69.4055136381454 - type: euclidean_f1 value: 65.56335231034026 - type: euclidean_precision value: 65.79856497475419 - type: euclidean_recall value: 65.32981530343008 - type: manhattan_accuracy value: 84.6337247422066 - type: manhattan_ap value: 69.13628354134198 - type: manhattan_f1 value: 65.46998180715585 - type: manhattan_precision value: 60.58361391694726 - type: manhattan_recall value: 71.21372031662268 - type: max_accuracy value: 84.75293556654945 - type: max_ap value: 69.4055136381454 - type: max_f1 value: 65.56335231034026 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.04800714091667 - type: cos_sim_ap value: 85.84596325009252 - type: cos_sim_f1 value: 78.39228527221042 - type: cos_sim_precision value: 73.58643518205768 - type: cos_sim_recall value: 83.86972590083154 - type: dot_accuracy value: 89.04800714091667 - type: dot_ap value: 85.8459646697087 - type: dot_f1 value: 78.39228527221042 - type: dot_precision value: 73.58643518205768 - type: dot_recall value: 83.86972590083154 - type: euclidean_accuracy value: 89.04800714091667 - type: euclidean_ap value: 85.84596376376919 - type: euclidean_f1 value: 78.39228527221042 - type: euclidean_precision value: 73.58643518205768 - type: euclidean_recall value: 83.86972590083154 - type: manhattan_accuracy value: 89.0266620095471 - type: manhattan_ap value: 85.80124417850608 - type: manhattan_f1 value: 78.37817859254879 - type: manhattan_precision value: 75.36963321012226 - type: manhattan_recall value: 81.63689559593472 - type: max_accuracy value: 89.04800714091667 - type: max_ap value: 85.8459646697087 - type: max_f1 value: 78.39228527221042 --- # Cohere embed-multilingual-light-v3.0 This repository contains the tokenizer for the Cohere `embed-multilingual-light-v3.0` model. See our blogpost [Cohere Embed V3](https://txt.cohere.com/introducing-embed-v3/) for more details on this model. You can use the embedding model either via the Cohere API, AWS SageMaker or in your private deployments. ## Usage Cohere API The following code snippet shows the usage of the Cohere API. Install the cohere SDK via: ``` pip install -U cohere ``` Get your free API key on: www.cohere.com ```python # This snippet shows and example how to use the Cohere Embed V3 models for semantic search. # Make sure to have the Cohere SDK in at least v4.30 install: pip install -U cohere # Get your API key from: www.cohere.com import cohere import numpy as np cohere_key = "{YOUR_COHERE_API_KEY}" #Get your API key from www.cohere.com co = cohere.Client(cohere_key) docs = ["The capital of France is Paris", "PyTorch is a machine learning framework based on the Torch library.", "The average cat lifespan is between 13-17 years"] #Encode your documents with input type 'search_document' doc_emb = co.embed(docs, input_type="search_document", model="embed-multilingual-light-v3.0").embeddings doc_emb = np.asarray(doc_emb) #Encode your query with input type 'search_query' query = "What is Pytorch" query_emb = co.embed([query], input_type="search_query", model="embed-multilingual-light-v3.0").embeddings query_emb = np.asarray(query_emb) query_emb.shape #Compute the dot product between query embedding and document embedding scores = np.dot(query_emb, doc_emb.T)[0] #Find the highest scores max_idx = np.argsort(-scores) print(f"Query: {query}") for idx in max_idx: print(f"Score: {scores[idx]:.2f}") print(docs[idx]) print("--------") ``` ## Usage AWS SageMaker The embedding model can be privately deployed in your AWS Cloud using our [AWS SageMaker marketplace offering](https://aws.amazon.com/marketplace/pp/prodview-z6huxszcqc25i). It runs privately in your VPC, with latencies as low as 5ms for query encoding. ## Usage AWS Bedrock Soon the model will also be available via AWS Bedrock. Stay tuned ## Private Deployment You want to run the model on your own hardware? [Contact Sales](https://cohere.com/contact-sales) to learn more. ## Supported Languages This model was trained on nearly 1B English training pairs and nearly 0.5B Non-English training pairs from 100+ languages. Evaluation results can be found in the [Embed V3.0 Benchmark Results spreadsheet](https://docs.google.com/spreadsheets/d/1w7gnHWMDBdEUrmHgSfDnGHJgVQE5aOiXCCwO3uNH_mI/edit?usp=sharing).
[ "BIOSSES", "SCIFACT" ]
lomahony/pythia-1.4b-helpful-sft
lomahony
text-generation
[ "transformers", "pytorch", "gpt_neox", "text-generation", "causal-lm", "pythia", "en", "dataset:Anthropic/hh-rlhf", "arxiv:2101.00027", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2023-11-08T15:50:11Z
2024-05-14T19:13:25+00:00
22
0
--- datasets: - Anthropic/hh-rlhf language: - en license: apache-2.0 tags: - pytorch - causal-lm - pythia --- [Pythia-1.4b](https://huggingface.co/EleutherAI/pythia-1.4b) supervised finetuned using TRLx library with the helpful subset of [Anthropic-hh-rlhf dataset](https://huggingface.co/datasets/Anthropic/hh-rlhf) for 1 epoch. Checkpoints are also uploaded. Fully reproducible finetuning code is available on [GitHub](https://github.com/lauraaisling/trlx-pythia/tree/main) [wandb log](https://wandb.ai/lauraomahony999/pythia-sft/runs/ydaj2ks8) See [Pythia-1.4b](https://huggingface.co/EleutherAI/pythia-1.4b) for model details [(paper)](https://arxiv.org/abs/2101.00027). See further details of these models in the paper [Attributing Mode Collapse in the Fine-Tuning of Large Language Models](https://openreview.net/pdf?id=3pDMYjpOxk). You can cite these models if they are helpful as follows: <pre> @inproceedings{o2024attributing, title={Attributing Mode Collapse in the Fine-Tuning of Large Language Models}, author={O’Mahony, Laura and Grinsztajn, Leo and Schoelkopf, Hailey and Biderman, Stella}, booktitle={ICLR 2024, Mathematical and Empirical Understanding of Foundation Models (ME-FoMo) workshop}, year={2024} } </pre> hf (pretrained=lomahony/pythia-1.4b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 0, batch_size: 16 | Tasks |Version|Filter|n-shot| Metric | Value | |Stderr| |--------------|------:|------|-----:|---------------|------:|---|------| |arc_challenge | 1|none | 0|acc | 0.2679|± |0.0129| | | |none | 0|acc_norm | 0.2978|± |0.0134| |arc_easy | 1|none | 0|acc | 0.6120|± |0.0100| | | |none | 0|acc_norm | 0.5282|± |0.0102| |boolq | 2|none | 0|acc | 0.6260|± |0.0085| |hellaswag | 1|none | 0|acc | 0.4097|± |0.0049| | | |none | 0|acc_norm | 0.5212|± |0.0050| |lambada_openai| 1|none | 0|perplexity | 6.4836|± |0.1838| | | |none | 0|acc | 0.5789|± |0.0069| |openbookqa | 1|none | 0|acc | 0.2120|± |0.0183| | | |none | 0|acc_norm | 0.3340|± |0.0211| |piqa | 1|none | 0|acc | 0.7100|± |0.0106| | | |none | 0|acc_norm | 0.7144|± |0.0105| |sciq | 1|none | 0|acc | 0.8540|± |0.0112| | | |none | 0|acc_norm | 0.7830|± |0.0130| |wikitext | 2|none | 0|word_perplexity|15.8394|± |N/A | | | |none | 0|byte_perplexity| 1.6763|± |N/A | | | |none | 0|bits_per_byte | 0.7453|± |N/A | |winogrande | 1|none | 0|acc | 0.5872|± |0.0138| hf (pretrained=lomahony/pythia-1.4b-helpful-sft), gen_kwargs: (None), limit: None, num_fewshot: 5, batch_size: 16 | Tasks |Version|Filter|n-shot| Metric | Value | |Stderr| |--------------|------:|------|-----:|---------------|------:|---|------| |arc_challenge | 1|none | 5|acc | 0.2892|± |0.0133| | | |none | 5|acc_norm | 0.3097|± |0.0135| |arc_easy | 1|none | 5|acc | 0.6444|± |0.0098| | | |none | 5|acc_norm | 0.6309|± |0.0099| |boolq | 2|none | 5|acc | 0.6333|± |0.0084| |hellaswag | 1|none | 5|acc | 0.4065|± |0.0049| | | |none | 5|acc_norm | 0.5215|± |0.0050| |lambada_openai| 1|none | 5|perplexity | 9.7040|± |0.2887| | | |none | 5|acc | 0.4951|± |0.0070| |openbookqa | 1|none | 5|acc | 0.2220|± |0.0186| | | |none | 5|acc_norm | 0.3100|± |0.0207| |piqa | 1|none | 5|acc | 0.7029|± |0.0107| | | |none | 5|acc_norm | 0.7127|± |0.0106| |sciq | 1|none | 5|acc | 0.9170|± |0.0087| | | |none | 5|acc_norm | 0.9160|± |0.0088| |wikitext | 2|none | 5|word_perplexity|15.8394|± |N/A | | | |none | 5|byte_perplexity| 1.6763|± |N/A | | | |none | 5|bits_per_byte | 0.7453|± |N/A | |winogrande | 1|none | 5|acc | 0.5699|± |0.0139|
[ "SCIQ" ]
oongaboongahacker/phi-2
oongaboongahacker
text-generation
[ "transformers", "pytorch", "mixformer-sequential", "text-generation", "custom_code", "autotrain_compatible", "region:us" ]
2023-12-13T13:01:48Z
2023-12-13T13:24:37+00:00
22
22
--- {} --- THE MODEL IS NOT OWNED BY ME IN ANY CASE. THIS IS SOLELY THE PROPERTY OF MICROSOFT UNDER THE FOLLOWING LICENSE: MICROSOFT RESEARCH LICENSE TERMS IF YOU LIVE IN THE UNITED STATES, PLEASE READ THE “BINDING ARBITRATION AND CLASS ACTION WAIVER” SECTION BELOW. IT AFFECTS HOW DISPUTES ARE RESOLVED. These license terms are an agreement between you and Microsoft Corporation (or one of its affiliates). They apply to the source code, object code, machine learning models, or data (collectively “Materials”) that accompany this license. IF YOU COMPLY WITH THESE LICENSE TERMS, YOU HAVE THE RIGHTS BELOW. BY USING THE MATERIALS, YOU ACCEPT THESE TERMS. 1) INSTALLATION AND USE RIGHTS TO THE MATERIALS. Subject to the terms of this agreement, you have the below rights, if applicable, to use the Materials solely for non-commercial, non-revenue generating, research purposes: a) Source Code. If source code is included, you may use and modify the source code, but you may not distribute the source code. b) Object Code. If object code is included, you may use the object code, but you may not distribute the object code. c) Models. If machine learning model(s) are included, you may use the model(s), but you may not distribute the models. d) Data. If data is included, you may use and modify the data, but your use and modification must be consistent with the consent under which the data was provided and/or gathered and you may not distribute the data or your modifications to the data. 2) SCOPE OF LICENSE. The Materials are licensed, not sold. Microsoft reserves all other rights. Unless applicable law gives you more rights despite this limitation, you will not (and have no right to): a) work around any technical limitations in the Materials that only allow you to use it in certain ways; b) reverse engineer, decompile or disassemble the Materials; c) remove, minimize, block, or modify any notices of Microsoft or its suppliers in the Materials; d) use the Materials in any way that is against the law or to create or propagate malware; or e) share, publish, distribute or lend the Materials, provide the Materials as a stand-alone hosted solution for others to use, or transfer the Materials or this agreement to any third party. 3) PERSONAL DATA. If the data (set forth in Section 1(c) above) includes or is found to include any data that enables any ability to identify an individual (“Personal Data”), you will not use such Personal Data for any purpose other than was authorized and consented to by the data subject/research participant. You will not use Personal Data to contact any person. You will keep Personal Data in strict confidence. You will not share any Personal Data that is collected or in your possession with any third party for any reason and as required under the original consent agreement. Further, you will destroy the Personal Data and any backup or copies, immediately upon the completion of your research. 4) LICENSE TO MICROSOFT. Notwithstanding the limitations in Section 1, you may distribute your modifications back to Microsoft, and if you do provide Microsoft with modifications of the Materials, you hereby grant Microsoft, without any restrictions or limitations, a non-exclusive, perpetual, irrevocable, royalty-free, assignable and sub-licensable license, to reproduce, publicly perform or display, install, use, modify, post, distribute, make and have made, sell and transfer such modifications and derivatives for any purpose. 5) PUBLICATION. You may publish (or present papers or articles) on your results from using the Materials provided that no material or substantial portion of the Materials is included in any such publication or presentation. 6) FEEDBACK. Any feedback about the Materials provided by you to us is voluntarily given, and Microsoft shall be free to use the feedback as it sees fit without obligation or restriction of any kind, even if the feedback is designated by you as confidential. Such feedback shall be considered a contribution and licensed to Microsoft under the terms of Section 4 above. 7) EXPORT RESTRICTIONS. You must comply with all domestic and international export laws and regulations that apply to the Materials, which include restrictions on destinations, end users, and end use. For further information on export restrictions, visit (aka.ms/exporting). 8) SUPPORT SERVICES. Microsoft is not obligated under this agreement to provide any support services for the Materials. Any support provided is “as is”, “with all faults”, and without warranty of any kind. 9) BINDING ARBITRATION AND CLASS ACTION WAIVER. This Section applies if you live in (or, if a business, your principal place of business is in) the United States. If you and Microsoft have a dispute, you and Microsoft agree to try for 60 days to resolve it informally. If you and Microsoft can’t, you and Microsoft agree to binding individual arbitration before the American Arbitration Association under the Federal Arbitration Act (“FAA”), and not to sue in court in front of a judge or jury. Instead, a neutral arbitrator will decide. Class action lawsuits, class-wide arbitrations, private attorney-general actions, and any other proceeding where someone acts in a representative capacity are not allowed; nor is combining individual proceedings without the consent of all parties. The complete Arbitration Agreement contains more terms and is at aka.ms/arb-agreement-1. You and Microsoft agree to these terms. 10) ENTIRE AGREEMENT. This agreement, and any other terms Microsoft may provide for supplements, updates, or third-party applications, is the entire agreement for the Materials. 11) APPLICABLE LAW AND PLACE TO RESOLVE DISPUTES. If you acquired the Materials in the United States or Canada, the laws of the state or province where you live (or, if a business, where your principal place of business is located) govern the interpretation of this agreement, claims for its breach, and all other claims (including consumer protection, unfair competition, and tort claims), regardless of conflict of laws principles, except that the FAA governs everything related to arbitration. If you acquired the Materials in any other country, its laws apply, except that the FAA governs everything related to arbitration. If U.S. federal jurisdiction exists, you and Microsoft consent to exclusive jurisdiction and venue in the federal court in King County, Washington for all disputes heard in court (excluding arbitration). If not, you and Microsoft consent to exclusive jurisdiction and venue in the Superior Court of King County, Washington for all disputes heard in court (excluding arbitration). 12) CONSUMER RIGHTS; REGIONAL VARIATIONS. This agreement describes certain legal rights. You may have other rights, including consumer rights, under the laws of your state, province, or country. Separate and apart from your relationship with Microsoft, you may also have rights with respect to the party from which you acquired the Materials. This agreement does not change those other rights if the laws of your state, province, or country do not permit it to do so. For example, if you acquired the Materials in one of the below regions, or mandatory country law applies, then the following provisions apply to you: a) Australia. You have statutory guarantees under the Australian Consumer Law and nothing in this agreement is intended to affect those rights. b) Canada. If you acquired this software in Canada, you may stop receiving updates by turning off the automatic update feature, disconnecting your device from the Internet (if and when you re-connect to the Internet, however, the Materials will resume checking for and installing updates), or uninstalling the Materials. The product documentation, if any, may also specify how to turn off updates for your specific device or software. c) Germany and Austria. i. Warranty. The properly licensed software will perform substantially as described in any Microsoft materials that accompany the Materials. However, Microsoft gives no contractual guarantee in relation to the licensed software. ii. Limitation of Liability. In case of intentional conduct, gross negligence, claims based on the Product Liability Act, as well as, in case of death or personal or physical injury, Microsoft is liable according to the statutory law. Subject to the foregoing clause (ii), Microsoft will only be liable for slight negligence if Microsoft is in breach of such material contractual obligations, the fulfillment of which facilitate the due performance of this agreement, the breach of which would endanger the purpose of this agreement and the compliance with which a party may constantly trust in (so-called "cardinal obligations"). In other cases of slight negligence, Microsoft will not be liable for slight negligence. 13) DISCLAIMER OF WARRANTY. THE MATERIALS ARE LICENSED “AS IS.” YOU BEAR THE RISK OF USING THEM. MICROSOFT GIVES NO EXPRESS WARRANTIES, GUARANTEES, OR CONDITIONS. TO THE EXTENT PERMITTED UNDER APPLICABLE LAWS, MICROSOFT EXCLUDES ALL IMPLIED WARRANTIES, INCLUDING MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. 14) LIMITATION ON AND EXCLUSION OF DAMAGES. IF YOU HAVE ANY BASIS FOR RECOVERING DAMAGES DESPITE THE PRECEDING DISCLAIMER OF WARRANTY, YOU CAN RECOVER FROM MICROSOFT AND ITS SUPPLIERS ONLY DIRECT DAMAGES UP TO U.S. $5.00. YOU CANNOT RECOVER ANY OTHER DAMAGES, INCLUDING CONSEQUENTIAL, LOST PROFITS, SPECIAL, INDIRECT OR INCIDENTAL DAMAGES. This limitation applies to (a) anything related to the Materials, services, content (including code) on third party Internet sites, or third party applications; and (b) claims for breach of contract, warranty, guarantee, or condition; strict liability, negligence, or other tort; or any other claim; in each case to the extent permitted by applicable law. It also applies even if Microsoft knew or should have known about the possibility of the damages. The above limitation or exclusion may not apply to you because your state, province, or country may not allow the exclusion or limitation of incidental, consequential, or other damages.
[ "BEAR" ]
ntc-ai/SDXL-LoRA-slider.art-by-artgerm-and-greg-rutkowski-and-alphonse-mucha
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-22T07:41:06Z
2023-12-22T07:41:09+00:00
22
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/art by artgerm and greg rutkowski and alphonse mucha.../art by artgerm and greg rutkowski and alphonse mucha_17_3.0.png widget: - text: art by artgerm and greg rutkowski and alphonse mucha output: url: images/art by artgerm and greg rutkowski and alphonse mucha_17_3.0.png - text: art by artgerm and greg rutkowski and alphonse mucha output: url: images/art by artgerm and greg rutkowski and alphonse mucha_19_3.0.png - text: art by artgerm and greg rutkowski and alphonse mucha output: url: images/art by artgerm and greg rutkowski and alphonse mucha_20_3.0.png - text: art by artgerm and greg rutkowski and alphonse mucha output: url: images/art by artgerm and greg rutkowski and alphonse mucha_21_3.0.png - text: art by artgerm and greg rutkowski and alphonse mucha output: url: images/art by artgerm and greg rutkowski and alphonse mucha_22_3.0.png inference: false instance_prompt: art by artgerm and greg rutkowski and alphonse mucha --- # ntcai.xyz slider - art by artgerm and greg rutkowski and alphonse mucha (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_17_-3.0.png" width=256 height=256 /> | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_17_0.0.png" width=256 height=256 /> | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_17_3.0.png" width=256 height=256 /> | | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_19_-3.0.png" width=256 height=256 /> | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_19_0.0.png" width=256 height=256 /> | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_19_3.0.png" width=256 height=256 /> | | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_20_-3.0.png" width=256 height=256 /> | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_20_0.0.png" width=256 height=256 /> | <img src="images/art by artgerm and greg rutkowski and alphonse mucha_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` art by artgerm and greg rutkowski and alphonse mucha ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.art-by-artgerm-and-greg-rutkowski-and-alphonse-mucha', weight_name='art by artgerm and greg rutkowski and alphonse mucha.safetensors', adapter_name="art by artgerm and greg rutkowski and alphonse mucha") # Activate the LoRA pipe.set_adapters(["art by artgerm and greg rutkowski and alphonse mucha"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, art by artgerm and greg rutkowski and alphonse mucha" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 540+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.appalled
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-29T13:54:12Z
2023-12-29T13:54:15+00:00
22
2
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/appalled.../appalled_17_3.0.png widget: - text: appalled output: url: images/appalled_17_3.0.png - text: appalled output: url: images/appalled_19_3.0.png - text: appalled output: url: images/appalled_20_3.0.png - text: appalled output: url: images/appalled_21_3.0.png - text: appalled output: url: images/appalled_22_3.0.png inference: false instance_prompt: appalled --- # ntcai.xyz slider - appalled (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/appalled_17_-3.0.png" width=256 height=256 /> | <img src="images/appalled_17_0.0.png" width=256 height=256 /> | <img src="images/appalled_17_3.0.png" width=256 height=256 /> | | <img src="images/appalled_19_-3.0.png" width=256 height=256 /> | <img src="images/appalled_19_0.0.png" width=256 height=256 /> | <img src="images/appalled_19_3.0.png" width=256 height=256 /> | | <img src="images/appalled_20_-3.0.png" width=256 height=256 /> | <img src="images/appalled_20_0.0.png" width=256 height=256 /> | <img src="images/appalled_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` appalled ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.appalled', weight_name='appalled.safetensors', adapter_name="appalled") # Activate the LoRA pipe.set_adapters(["appalled"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, appalled" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 720+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
ntc-ai/SDXL-LoRA-slider.gorgeous
ntc-ai
text-to-image
[ "diffusers", "text-to-image", "stable-diffusion-xl", "lora", "template:sd-lora", "template:sdxl-lora", "sdxl-sliders", "ntcai.xyz-sliders", "concept", "en", "base_model:stabilityai/stable-diffusion-xl-base-1.0", "base_model:adapter:stabilityai/stable-diffusion-xl-base-1.0", "license:mit", "region:us" ]
2023-12-30T07:55:43Z
2023-12-30T07:55:51+00:00
22
0
--- base_model: stabilityai/stable-diffusion-xl-base-1.0 language: - en license: mit tags: - text-to-image - stable-diffusion-xl - lora - template:sd-lora - template:sdxl-lora - sdxl-sliders - ntcai.xyz-sliders - concept - diffusers thumbnail: images/evaluate/gorgeous.../gorgeous_17_3.0.png widget: - text: gorgeous output: url: images/gorgeous_17_3.0.png - text: gorgeous output: url: images/gorgeous_19_3.0.png - text: gorgeous output: url: images/gorgeous_20_3.0.png - text: gorgeous output: url: images/gorgeous_21_3.0.png - text: gorgeous output: url: images/gorgeous_22_3.0.png inference: false instance_prompt: gorgeous --- # ntcai.xyz slider - gorgeous (SDXL LoRA) | Strength: -3 | Strength: 0 | Strength: 3 | | --- | --- | --- | | <img src="images/gorgeous_17_-3.0.png" width=256 height=256 /> | <img src="images/gorgeous_17_0.0.png" width=256 height=256 /> | <img src="images/gorgeous_17_3.0.png" width=256 height=256 /> | | <img src="images/gorgeous_19_-3.0.png" width=256 height=256 /> | <img src="images/gorgeous_19_0.0.png" width=256 height=256 /> | <img src="images/gorgeous_19_3.0.png" width=256 height=256 /> | | <img src="images/gorgeous_20_-3.0.png" width=256 height=256 /> | <img src="images/gorgeous_20_0.0.png" width=256 height=256 /> | <img src="images/gorgeous_20_3.0.png" width=256 height=256 /> | ## Download Weights for this model are available in Safetensors format. ## Trigger words You can apply this LoRA with trigger words for additional effect: ``` gorgeous ``` ## Use in diffusers ```python from diffusers import StableDiffusionXLPipeline from diffusers import EulerAncestralDiscreteScheduler import torch pipe = StableDiffusionXLPipeline.from_single_file("https://huggingface.co/martyn/sdxl-turbo-mario-merge-top-rated/blob/main/topRatedTurboxlLCM_v10.safetensors") pipe.to("cuda") pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config) # Load the LoRA pipe.load_lora_weights('ntc-ai/SDXL-LoRA-slider.gorgeous', weight_name='gorgeous.safetensors', adapter_name="gorgeous") # Activate the LoRA pipe.set_adapters(["gorgeous"], adapter_weights=[2.0]) prompt = "medieval rich kingpin sitting in a tavern, gorgeous" negative_prompt = "nsfw" width = 512 height = 512 num_inference_steps = 10 guidance_scale = 2 image = pipe(prompt, negative_prompt=negative_prompt, width=width, height=height, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps).images[0] image.save('result.png') ``` ## Support the Patreon If you like this model please consider [joining our Patreon](https://www.patreon.com/NTCAI). By joining our Patreon, you'll gain access to an ever-growing library of over 730+ unique and diverse LoRAs, covering a wide range of styles and genres. You'll also receive early access to new models and updates, exclusive behind-the-scenes content, and the powerful LoRA slider creator, allowing you to craft your own custom LoRAs and experiment with endless possibilities. Your support on Patreon will allow us to continue developing and refining new models. ## Other resources - [CivitAI](https://civitai.com/user/ntc) - Follow ntc on Civit for even more LoRAs - [ntcai.xyz](https://ntcai.xyz) - See ntcai.xyz to find more articles and LoRAs
[ "CRAFT" ]
Dagobert42/distilbert-base-uncased-biored-augmented
Dagobert42
token-classification
[ "transformers", "safetensors", "distilbert", "token-classification", "low-resource NER", "token_classification", "biomedicine", "medical NER", "generated_from_trainer", "en", "dataset:medicine", "base_model:distilbert/distilbert-base-uncased", "base_model:finetune:distilbert/distilbert-base-uncased", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-02-14T19:01:08Z
2024-02-22T11:27:55+00:00
22
0
--- base_model: distilbert-base-uncased datasets: - medicine language: - en license: mit metrics: - accuracy - precision - recall - f1 tags: - low-resource NER - token_classification - biomedicine - medical NER - generated_from_trainer model-index: - name: Dagobert42/distilbert-base-uncased-biored-augmented results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Dagobert42/distilbert-base-uncased-biored-augmented This model is a fine-tuned version of [distilbert-base-uncased](https://huggingface.co/distilbert-base-uncased) on the bigbio/biored dataset. It achieves the following results on the evaluation set: - Loss: 0.5692 - Accuracy: 0.7978 - Precision: 0.5993 - Recall: 0.5337 - F1: 0.5536 - Weighted F1: 0.7929 ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 2e-05 - train_batch_size: 8 - eval_batch_size: 8 - seed: 42 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 50 ### Training results | Training Loss | Epoch | Step | Validation Loss | Accuracy | Precision | Recall | F1 | Weighted F1 | |:-------------:|:-----:|:----:|:---------------:|:--------:|:---------:|:------:|:------:|:-----------:| | No log | 1.0 | 25 | 0.6037 | 0.7824 | 0.5931 | 0.4937 | 0.5272 | 0.7719 | | No log | 2.0 | 50 | 0.5858 | 0.7932 | 0.6023 | 0.5298 | 0.5511 | 0.7849 | | No log | 3.0 | 75 | 0.5887 | 0.795 | 0.5757 | 0.5283 | 0.544 | 0.7842 | | No log | 4.0 | 100 | 0.5890 | 0.7937 | 0.5911 | 0.5331 | 0.5466 | 0.7864 | ### Framework versions - Transformers 4.35.2 - Pytorch 2.0.1+cu117 - Datasets 2.12.0 - Tokenizers 0.15.0
[ "BIORED" ]
HariLuru/finer_distillbert_v2
HariLuru
token-classification
[ "transformers", "safetensors", "distilbert", "token-classification", "finance", "dataset:nlpaueb/finer-139", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-03-07T12:56:25Z
2024-03-08T13:29:02+00:00
22
1
--- datasets: - nlpaueb/finer-139 library_name: transformers license: mit pipeline_tag: token-classification tags: - finance widget: - text: The loan bears interest at 9.75 % per annum with interest due monthly and is secured by a lien on certain of the Company ’ s and its subsidiaries ’ assets . example_title: Example1 - text: Unused portions of the Credit Facilities bear interest at a rate equal to 0.25 % per annum . example_title: Example2 --- # Model Card for Model ID This is a NER model built on distillBert for 5 classes of finer139 dataset ## Model Details ### Model Description <!-- Provide a longer summary of what this model is. --> This is the model card of a 🤗 transformers model that has been pushed on the Hub. This model card has been automatically generated. - **Developed by:** Narahari BM - **Model type:** NER - **Finetuned from model [optional]:** DistillBert ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e2eb5c2b28b798a0249f66/GdH6LpK4Drkd6RT1uw5Do.png) [More Information Needed] ## Confusion Matrix ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e2eb5c2b28b798a0249f66/J0fZEXv5gWKOgeBVKPv75.png) ## Training Details ### Training Data 1. Subsampled train data and obtained the below distribution ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65e2eb5c2b28b798a0249f66/tkCIZUIiEQTyO2bGpttTI.png) <!-- This should link to a Dataset Card, perhaps with a short stub of information on what the training data is all about as well as documentation related to data pre-processing or additional filtering. --> [More Information Needed] ### Training Procedure <!-- This relates heavily to the Technical Specifications. Content here should link to that section when it is relevant to the training procedure. --> #### Preprocessing [optional] [More Information Needed] #### Training Hyperparameters - **Training regime:** [More Information Needed] <!--fp32, fp16 mixed precision, bf16 mixed precision, bf16 non-mixed precision, fp16 non-mixed precision, fp8 mixed precision --> #### Speeds, Sizes, Times [optional] <!-- This section provides information about throughput, start/end time, checkpoint size if relevant, etc. --> [More Information Needed] ## Evaluation <!-- This section describes the evaluation protocols and provides the results. --> ### Testing Data, Factors & Metrics #### Testing Data <!-- This should link to a Dataset Card if possible. --> [More Information Needed] #### Factors <!-- These are the things the evaluation is disaggregating by, e.g., subpopulations or domains. --> [More Information Needed] #### Metrics <!-- These are the evaluation metrics being used, ideally with a description of why. --> [More Information Needed] ### Results [More Information Needed] #### Summary
[ "BEAR" ]
johnsnowlabs/JSL-MedMX-7X
johnsnowlabs
text-generation
[ "transformers", "safetensors", "mistral", "text-generation", "medical", "conversational", "en", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-25T12:41:37Z
2024-05-03T00:29:46+00:00
22
1
--- language: - en library_name: transformers license: apache-2.0 tags: - medical --- # JSL-MedMX-7X [<img src="https://repository-images.githubusercontent.com/104670986/2e728700-ace4-11ea-9cfc-f3e060b25ddf">](http://www.johnsnowlabs.com) This model is developed by [John Snow Labs](https://www.johnsnowlabs.com/). Performance on biomedical benchmarks: [Open Medical LLM Leaderboard](https://huggingface.co/spaces/openlifescienceai/open_medical_llm_leaderboard). This model is available under a [CC-BY-NC-ND](https://creativecommons.org/licenses/by-nc-nd/4.0/deed.en) license and must also conform to this [Acceptable Use Policy](https://huggingface.co/johnsnowlabs). If you need to license this model for commercial use, please contact us at [email protected]. ## 💻 Usage ```python !pip install -qU transformers accelerate from transformers import AutoTokenizer import transformers import torch model = "johnsnowlabs/JSL-MedMX-7X" messages = [{"role": "user", "content": "What is a large language model?"}] tokenizer = AutoTokenizer.from_pretrained(model) prompt = tokenizer.apply_chat_template(messages, tokenize=False, add_generation_prompt=True) pipeline = transformers.pipeline( "text-generation", model=model, torch_dtype=torch.float16, device_map="auto", ) outputs = pipeline(prompt, max_new_tokens=256, do_sample=True, temperature=0.7, top_k=50, top_p=0.95) print(outputs[0]["generated_text"]) ``` ## 🏆 Evaluation | Tasks |Version|Filter|n-shot| Metric |Value | |Stderr| |-------------------------------|-------|------|-----:|--------|-----:|---|-----:| |stem |N/A |none | 0|acc_norm|0.5783|± |0.0067| | | |none | 0|acc |0.6177|± |0.0057| | - medmcqa |Yaml |none | 0|acc |0.5668|± |0.0077| | | |none | 0|acc_norm|0.5668|± |0.0077| | - medqa_4options |Yaml |none | 0|acc |0.6159|± |0.0136| | | |none | 0|acc_norm|0.6159|± |0.0136| | - anatomy (mmlu) | 0|none | 0|acc |0.7111|± |0.0392| | - clinical_knowledge (mmlu) | 0|none | 0|acc |0.7396|± |0.0270| | - college_biology (mmlu) | 0|none | 0|acc |0.7778|± |0.0348| | - college_medicine (mmlu) | 0|none | 0|acc |0.6647|± |0.0360| | - medical_genetics (mmlu) | 0|none | 0|acc |0.7200|± |0.0451| | - professional_medicine (mmlu)| 0|none | 0|acc |0.7868|± |0.0249| | - pubmedqa | 1|none | 0|acc |0.7840|± |0.0184|
[ "MEDQA", "PUBMEDQA" ]
Alignment-Lab-AI/idfkphi4kiguess
Alignment-Lab-AI
text-generation
[ "transformers", "safetensors", "phi3", "text-generation", "nlp", "code", "conversational", "custom_code", "en", "license:mit", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-04-26T05:23:44Z
2024-04-26T05:23:45+00:00
22
0
--- language: - en license: mit license_link: https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/LICENSE pipeline_tag: text-generation tags: - nlp - code --- ## Model Summary The Phi-3-Mini-4K-Instruct is a 3.8B parameters, lightweight, state-of-the-art open model trained with the Phi-3 datasets that includes both synthetic data and the filtered publicly available websites data with a focus on high-quality and reasoning dense properties. The model belongs to the Phi-3 family with the Mini version in two variants [4K](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct) and [128K](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct) which is the context length (in tokens) that it can support. The model has underwent a post-training process that incorporates both supervised fine-tuning and direct preference optimization for the instruction following and safety measures. When assessed against benchmarks testing common sense, language understanding, math, code, long context and logical reasoning, Phi-3 Mini-4K-Instruct showcased a robust and state-of-the-art performance among models with less than 13 billion parameters. Resources and Technical Documentation: + [Phi-3 Microsoft Blog](https://aka.ms/phi3blog-april) + [Phi-3 Technical Report](https://aka.ms/phi3-tech-report) + [Phi-3 on Azure AI Studio](https://aka.ms/phi3-azure-ai) + Phi-3 GGUF: [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf) + Phi-3 ONNX: [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx) ## Intended Uses **Primary use cases** The model is intended for commercial and research use in English. The model provides uses for applications which require: 1) Memory/compute constrained environments 2) Latency bound scenarios 3) Strong reasoning (especially code, math and logic) Our model is designed to accelerate research on language and multimodal models, for use as a building block for generative AI powered features. **Use case considerations** Our models are not specifically designed or evaluated for all downstream purposes. Developers should consider common limitations of language models as they select use cases, and evaluate and mitigate for accuracy, safety, and fariness before using within a specific downstream use case, particularly for high risk scenarios. Developers should be aware of and adhere to applicable laws or regulations (including privacy, trade compliance laws, etc.) that are relevant to their use case. Nothing contained in this Model Card should be interpreted as or deemed a restriction or modification to the license the model is released under. ## How to Use Phi-3 Mini-4K-Instruct has been integrated in the development version (4.40.0) of `transformers`. Until the official version is released through `pip`, ensure that you are doing one of the following: * When loading the model, ensure that `trust_remote_code=True` is passed as an argument of the `from_pretrained()` function. * Update your local `transformers` to the development version: `pip uninstall -y transformers && pip install git+https://github.com/huggingface/transformers`. The previous command is an alternative to cloning and installing from the source. The current `transformers` version can be verified with: `pip list | grep transformers`. Phi-3 Mini-4K-Instruct is also available in [HuggingChat](https://aka.ms/try-phi3-hf-chat). ### Tokenizer Phi-3 Mini-4K-Instruct supports a vocabulary size of up to `32064` tokens. The [tokenizer files](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/blob/main/added_tokens.json) already provide placeholder tokens that can be used for downstream fine-tuning, but they can also be extended up to the model's vocabulary size. ### Chat Format Given the nature of the training data, the Phi-3 Mini-4K-Instruct model is best suited for prompts using the chat format as follows. You can provide the prompt as a question with a generic template as follow: ```markdown <|user|>\nQuestion <|end|>\n<|assistant|> ``` For example: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> How to explain Internet for a medieval knight?<|end|> <|assistant|> ``` where the model generates the text after `<|assistant|>` . In case of few-shots prompt, the prompt can be formatted as the following: ```markdown <|system|> You are a helpful AI assistant.<|end|> <|user|> I am going to Paris, what should I see?<|end|> <|assistant|> Paris, the capital of France, is known for its stunning architecture, art museums, historical landmarks, and romantic atmosphere. Here are some of the top attractions to see in Paris:\n\n1. The Eiffel Tower: The iconic Eiffel Tower is one of the most recognizable landmarks in the world and offers breathtaking views of the city.\n2. The Louvre Museum: The Louvre is one of the world's largest and most famous museums, housing an impressive collection of art and artifacts, including the Mona Lisa.\n3. Notre-Dame Cathedral: This beautiful cathedral is one of the most famous landmarks in Paris and is known for its Gothic architecture and stunning stained glass windows.\n\nThese are just a few of the many attractions that Paris has to offer. With so much to see and do, it's no wonder that Paris is one of the most popular tourist destinations in the world."<|end|> <|user|> What is so great about #1?<|end|> <|assistant|> ``` ### Sample inference code This code snippets show how to get quickly started with running the model on a GPU: ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline torch.random.manual_seed(0) model = AutoModelForCausalLM.from_pretrained( "microsoft/Phi-3-mini-4k-instruct", device_map="cuda", torch_dtype="auto", trust_remote_code=True, ) tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3-mini-4k-instruct") messages = [ {"role": "system", "content": "You are a helpful digital assistant. Please provide safe, ethical and accurate information to the user."}, {"role": "user", "content": "Can you provide ways to eat combinations of bananas and dragonfruits?"}, {"role": "assistant", "content": "Sure! Here are some ways to eat bananas and dragonfruits together: 1. Banana and dragonfruit smoothie: Blend bananas and dragonfruits together with some milk and honey. 2. Banana and dragonfruit salad: Mix sliced bananas and dragonfruits together with some lemon juice and honey."}, {"role": "user", "content": "What about solving an 2x + 3 = 7 equation?"}, ] pipe = pipeline( "text-generation", model=model, tokenizer=tokenizer, ) generation_args = { "max_new_tokens": 500, "return_full_text": False, "temperature": 0.0, "do_sample": False, } output = pipe(messages, **generation_args) print(output[0]['generated_text']) ``` ## Responsible AI Considerations Like other language models, the Phi series models can potentially behave in ways that are unfair, unreliable, or offensive. Some of the limiting behaviors to be aware of include: + Quality of Service: the Phi models are trained primarily on English text. Languages other than English will experience worse performance. English language varieties with less representation in the training data might experience worse performance than standard American English. + Representation of Harms & Perpetuation of Stereotypes: These models can over- or under-represent groups of people, erase representation of some groups, or reinforce demeaning or negative stereotypes. Despite safety post-training, these limitations may still be present due to differing levels of representation of different groups or prevalence of examples of negative stereotypes in training data that reflect real-world patterns and societal biases. + Inappropriate or Offensive Content: these models may produce other types of inappropriate or offensive content, which may make it inappropriate to deploy for sensitive contexts without additional mitigations that are specific to the use case. + Information Reliability: Language models can generate nonsensical content or fabricate content that might sound reasonable but is inaccurate or outdated. + Limited Scope for Code: Majority of Phi-3 training data is based in Python and use common packages such as "typing, math, random, collections, datetime, itertools". If the model generates Python scripts that utilize other packages or scripts in other languages, we strongly recommend users manually verify all API uses. Developers should apply responsible AI best practices and are responsible for ensuring that a specific use case complies with relevant laws and regulations (e.g. privacy, trade, etc.). Important areas for consideration include: + Allocation: Models may not be suitable for scenarios that could have consequential impact on legal status or the allocation of resources or life opportunities (ex: housing, employment, credit, etc.) without further assessments and additional debiasing techniques. + High-Risk Scenarios: Developers should assess suitability of using models in high-risk scenarios where unfair, unreliable or offensive outputs might be extremely costly or lead to harm. This includes providing advice in sensitive or expert domains where accuracy and reliability are critical (ex: legal or health advice). Additional safeguards should be implemented at the application level according to the deployment context. + Misinformation: Models may produce inaccurate information. Developers should follow transparency best practices and inform end-users they are interacting with an AI system. At the application level, developers can build feedback mechanisms and pipelines to ground responses in use-case specific, contextual information, a technique known as Retrieval Augmented Generation (RAG). + Generation of Harmful Content: Developers should assess outputs for their context and use available safety classifiers or custom solutions appropriate for their use case. + Misuse: Other forms of misuse such as fraud, spam, or malware production may be possible, and developers should ensure that their applications do not violate applicable laws and regulations. ## Training ### Model * Architecture: Phi-3 Mini-4K-Instruct has 3.8B parameters and is a dense decoder-only Transformer model. The model is fine-tuned with Supervised fine-tuning (SFT) and Direct Preference Optimization (DPO) to ensure alignment with human preferences and safety guidlines. * Inputs: Text. It is best suited for prompts using chat format. * Context length: 4K tokens * GPUs: 512 H100-80G * Training time: 7 days * Training data: 3.3T tokens * Outputs: Generated text in response to the input * Dates: Our models were trained between February and April 2024 * Status: This is a static model trained on an offline dataset with cutoff date October 2023. Future versions of the tuned models may be released as we improve models. ### Datasets Our training data includes a wide variety of sources, totaling 3.3 trillion tokens, and is a combination of 1) Publicly available documents filtered rigorously for quality, selected high-quality educational data, and code; 2) Newly created synthetic, “textbook-like” data for the purpose of teaching math, coding, common sense reasoning, general knowledge of the world (science, daily activities, theory of mind, etc.); 3) High quality chat format supervised data covering various topics to reflect human preferences on different aspects such as instruct-following, truthfulness, honesty and helpfulness. ### Fine-tuning A basic example of multi-GPUs supervised fine-tuning (SFT) with TRL and Accelerate modules is provided [here](https://huggingface.co/microsoft/Phi-3-mini-4k-instruct/resolve/main/sample_finetune.py). ## Benchmarks We report the results for Phi-3-Mini-4K-Instruct on standard open-source benchmarks measuring the model's reasoning ability (both common sense reasoning and logical reasoning). We compare to Phi-2, Mistral-7b-v0.1, Mixtral-8x7b, Gemma 7B, Llama-3-8B-Instruct, and GPT-3.5. All the reported numbers are produced with the exact same pipeline to ensure that the numbers are comparable. These numbers might differ from other published numbers due to slightly different choices in the evaluation. As is now standard, we use few-shot prompts to evaluate the models, at temperature 0. The prompts and number of shots are part of a Microsoft internal tool to evaluate language models, and in particular we did no optimization to the pipeline for Phi-3. More specifically, we do not change prompts, pick different few-shot examples, change prompt format, or do any other form of optimization for the model. The number of k–shot examples is listed per-benchmark. | | Phi-3-Mini-4K-In<br>3.8b | Phi-3-Small<br>7b (preview) | Phi-3-Medium<br>14b (preview) | Phi-2<br>2.7b | Mistral<br>7b | Gemma<br>7b | Llama-3-In<br>8b | Mixtral<br>8x7b | GPT-3.5<br>version 1106 | |---|---|---|---|---|---|---|---|---|---| | MMLU <br>5-Shot | 68.8 | 75.3 | 78.2 | 56.3 | 61.7 | 63.6 | 66.5 | 68.4 | 71.4 | | HellaSwag <br> 5-Shot | 76.7 | 78.7 | 83.2 | 53.6 | 58.5 | 49.8 | 71.1 | 70.4 | 78.8 | | ANLI <br> 7-Shot | 52.8 | 55.0 | 58.7 | 42.5 | 47.1 | 48.7 | 57.3 | 55.2 | 58.1 | | GSM-8K <br> 0-Shot; CoT | 82.5 | 86.4 | 90.8 | 61.1 | 46.4 | 59.8 | 77.4 | 64.7 | 78.1 | | MedQA <br> 2-Shot | 53.8 | 58.2 | 69.8 | 40.9 | 49.6 | 50.0 | 60.5 | 62.2 | 63.4 | | AGIEval <br> 0-Shot | 37.5 | 45.0 | 49.7 | 29.8 | 35.1 | 42.1 | 42.0 | 45.2 | 48.4 | | TriviaQA <br> 5-Shot | 64.0 | 59.1 | 73.3 | 45.2 | 72.3 | 75.2 | 67.7 | 82.2 | 85.8 | | Arc-C <br> 10-Shot | 84.9 | 90.7 | 91.9 | 75.9 | 78.6 | 78.3 | 82.8 | 87.3 | 87.4 | | Arc-E <br> 10-Shot | 94.6 | 97.1 | 98.0 | 88.5 | 90.6 | 91.4 | 93.4 | 95.6 | 96.3 | | PIQA <br> 5-Shot | 84.2 | 87.8 | 88.2 | 60.2 | 77.7 | 78.1 | 75.7 | 86.0 | 86.6 | | SociQA <br> 5-Shot | 76.6 | 79.0 | 79.4 | 68.3 | 74.6 | 65.5 | 73.9 | 75.9 | 68.3 | | BigBench-Hard <br> 0-Shot | 71.7 | 75.0 | 82.5 | 59.4 | 57.3 | 59.6 | 51.5 | 69.7 | 68.32 | | WinoGrande <br> 5-Shot | 70.8 | 82.5 | 81.2 | 54.7 | 54.2 | 55.6 | 65 | 62.0 | 68.8 | | OpenBookQA <br> 10-Shot | 83.2 | 88.4 | 86.6 | 73.6 | 79.8 | 78.6 | 82.6 | 85.8 | 86.0 | | BoolQ <br> 0-Shot | 77.6 | 82.9 | 86.5 | -- | 72.2 | 66.0 | 80.9 | 77.6 | 79.1 | | CommonSenseQA <br> 10-Shot | 80.2 | 80.3 | 82.6 | 69.3 | 72.6 | 76.2 | 79 | 78.1 | 79.6 | | TruthfulQA <br> 10-Shot | 65.0 | 68.1 | 74.8 | -- | 52.1 | 53.0 | 63.2 | 60.1 | 85.8 | | HumanEval <br> 0-Shot | 59.1 | 59.1 | 54.7 | 47.0 | 28.0 | 34.1 | 60.4 | 37.8 | 62.2 | | MBPP <br> 3-Shot | 53.8 | 71.4 | 73.7 | 60.6 | 50.8 | 51.5 | 67.7 | 60.2 | 77.8 | ## Software * [PyTorch](https://github.com/pytorch/pytorch) * [DeepSpeed](https://github.com/microsoft/DeepSpeed) * [Transformers](https://github.com/huggingface/transformers) * [Flash-Attention](https://github.com/HazyResearch/flash-attention) ## Hardware Note that by default, the Phi-3-mini model uses flash attention, which requires certain types of GPU hardware to run. We have tested on the following GPU types: * NVIDIA A100 * NVIDIA A6000 * NVIDIA H100 If you want to run the model on: * NVIDIA V100 or earlier generation GPUs: call AutoModelForCausalLM.from_pretrained() with attn_implementation="eager" * CPU: use the **GGUF** quantized models [4K](https://aka.ms/Phi3-mini-4k-instruct-gguf) + Optimized inference on GPU, CPU, and Mobile: use the **ONNX** models [4K](https://aka.ms/Phi3-mini-4k-instruct-onnx) ## Cross Platform Support ONNX runtime ecosystem now supports Phi-3 Mini models across platforms and hardware. You can find the optimized Phi-3 Mini-4K-Instruct ONNX model [here](https://aka.ms/phi3-mini-4k-instruct-onnx). Optimized Phi-3 models are also published here in ONNX format, to run with ONNX Runtime on CPU and GPU across devices, including server platforms, Windows, Linux and Mac desktops, and mobile CPUs, with the precision best suited to each of these targets. DirectML support lets developers bring hardware acceleration to Windows devices at scale across AMD, Intel, and NVIDIA GPUs. Along with DirectML, ONNX Runtime provides cross platform support for Phi-3 across a range of devices CPU, GPU, and mobile. Here are some of the optimized configurations we have added: 1. ONNX models for int4 DML: Quantized to int4 via AWQ 2. ONNX model for fp16 CUDA 3. ONNX model for int4 CUDA: Quantized to int4 via RTN 4. ONNX model for int4 CPU and Mobile: Quantized to int4 via RTN ## License The model is licensed under the [MIT license](https://huggingface.co/microsoft/Phi-3-mini-4k/resolve/main/LICENSE). ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft trademarks or logos is subject to and must follow [Microsoft’s Trademark & Brand Guidelines](https://www.microsoft.com/en-us/legal/intellectualproperty/trademarks). Use of Microsoft trademarks or logos in modified versions of this project must not cause confusion or imply Microsoft sponsorship. Any use of third-party trademarks or logos are subject to those third-party’s policies.
[ "MEDQA" ]
LoneStriker/OpenBioLLM-Llama3-8B-3.0bpw-h6-exl2
LoneStriker
text-generation
[ "transformers", "pytorch", "llama", "text-generation", "llama-3", "Mixtral", "instruct", "finetune", "chatml", "DPO", "RLHF", "gpt4", "distillation", "en", "arxiv:2305.18290", "arxiv:2303.13375", "arxiv:2212.13138", "arxiv:2305.09617", "arxiv:2402.07023", "base_model:meta-llama/Meta-Llama-3-8B", "base_model:quantized:meta-llama/Meta-Llama-3-8B", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "3-bit", "exl2", "region:us" ]
2024-04-26T19:39:48Z
2024-04-26T19:42:40+00:00
22
0
--- base_model: meta-llama/Meta-Llama-3-8B language: - en license: llama3 tags: - llama-3 - llama - Mixtral - instruct - finetune - chatml - DPO - RLHF - gpt4 - distillation widget: - example_title: OpenBioLLM-8B messages: - role: system content: You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. - role: user content: How long does it take for newborn jaundice to go away? output: text: 'Newborn jaundice, also known as neonatal jaundice, is a common condition in newborns where the yellowing of the skin and eyes occurs due to an elevated level of bilirubin in the blood. Bilirubin is a yellow pigment that forms when red blood cells break down. In most cases, newborn jaundice resolves on its own without any specific treatment. The duration of newborn jaundice can vary depending on several factors such as the underlying cause, gestational age at birth, and individual variations in bilirubin metabolism. Here are some general guidelines: 1. Physiological jaundice: This is the most common type of newborn jaundice and usually appears within 24-72 hours after birth. It tends to peak between the second and fifth day of life and gradually improves over the next week or two. By the time the baby is one week old, the jaundice should have mostly resolved. 2. Breast milk jaundice: This type of jaundice occurs in breastfed babies and may appear later than physiological jaundice, typically between the fifth and fourteenth day of life. It tends to persist for a longer duration but usually resolves within six weeks after birth. 3. Pathological jaundice: This type of jaundice is less common and occurs due to an underlying medical condition that affects bilirubin metabolism or liver function. The duration of pathological jaundice depends on the specific cause and may require treatment. It''s important for parents to monitor their newborn''s jaundice closely and seek medical advice if the jaundice progresses rapidly, becomes severe, or is accompanied by other symptoms such as poor feeding, lethargy, or excessive sleepiness. In these cases, further evaluation and management may be necessary. Remember that each baby is unique, and the timing of jaundice resolution can vary. If you have concerns about your newborn''s jaundice, it''s always best to consult with a healthcare professional for personalized advice and guidance.' model-index: - name: OpenBioLLM-8B results: [] --- <div align="center"> <img width="260px" src="https://hf.fast360.xyz/production/uploads/5f3fe13d79c1ba4c353d0c19/BrQCb95lmEIFz79QAmoNA.png"></div> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/2FhDh8NDvMl7iSxbQz9BP.png) <div align="center"> <h1>Advancing Open-source Large Language Models in Medical Domain</h1> </div> <p align="center" style="margin-top: 0px;"> <a href="https://colab.research.google.com/drive/1F5oV20InEYeAJGmBwYF9NM_QhLmjBkKJ?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="OpenChat Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 10px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text" style=" margin-right: 5px;">Online Demo</span> </a> | <a href="https://github.com/openlifescience-ai"> <img src="https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png" alt="GitHub Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 5px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text" style=" margin-right: 5px;">GitHub</span> </a> | <a href="#"> <img src="https://github.com/alpayariyak/openchat/blob/master/assets/arxiv-logomark-small-square-border.png?raw=true" alt="ArXiv Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 5px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text" style="margin-right: 5px;">Paper</span> </a> | <a href="https://discord.gg/A5Fjf5zC69"> <img src="https://cloud.githubusercontent.com/assets/6291467/26705903/96c2d66e-477c-11e7-9f4e-f3c0efe96c9a.png" alt="Discord Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 5px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text">Discord</span> </a> </p> ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/KGmRE5w2sepNtwsEu8t7K.jpeg) Introducing OpenBioLLM-8B: A State-of-the-Art Open Source Biomedical Large Language Model OpenBioLLM-8B is an advanced open source language model designed specifically for the biomedical domain. Developed by Saama AI Labs, this model leverages cutting-edge techniques to achieve state-of-the-art performance on a wide range of biomedical tasks. 🏥 **Biomedical Specialization**: OpenBioLLM-8B is tailored for the unique language and knowledge requirements of the medical and life sciences fields. It was fine-tuned on a vast corpus of high-quality biomedical data, enabling it to understand and generate text with domain-specific accuracy and fluency. 🎓 **Superior Performance**: With 8 billion parameters, OpenBioLLM-8B outperforms other open source biomedical language models of similar scale. It has also demonstrated better results compared to larger proprietary & open-source models like GPT-3.5 and Meditron-70B on biomedical benchmarks. 🧠 **Advanced Training Techniques**: OpenBioLLM-8B builds upon the powerful foundations of the **Meta-Llama-3-8B** and [Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B) models. It incorporates the DPO dataset and fine-tuning recipe along with a custom diverse medical instruction dataset. Key components of the training pipeline include: <div align="center"> <img width="1200px" src="https://hf.fast360.xyz/production/uploads/5f3fe13d79c1ba4c353d0c19/oPchsJsEpQoGcGXVbh7YS.png"> </div> - **Policy Optimization**: [Direct Preference Optimization: Your Language Model is Secretly a Reward Model (DPO)](https://arxiv.org/abs/2305.18290) - **Ranking Dataset**: [berkeley-nest/Nectar](https://huggingface.co/datasets/berkeley-nest/Nectar) - **Fine-tuning dataset**: Custom Medical Instruct dataset (We plan to release a sample training dataset in our upcoming paper; please stay updated) This combination of cutting-edge techniques enables OpenBioLLM-8B to align with key capabilities and preferences for biomedical applications. ⚙️ **Release Details**: - **Model Size**: 8 billion parameters - **Quantization**: Optimized quantized versions available [Here](https://huggingface.co/aaditya/OpenBioLLM-8B-GGUF) - **Language(s) (NLP):** en - **Developed By**: [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) from Saama AI Labs - **License:** Meta-Llama License - **Fine-tuned from models:** [meta-llama/Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B) - **Resources for more information:** - Paper: Coming soon The model can be fine-tuned for more specialized tasks and datasets as needed. OpenBioLLM-8B represents an important step forward in democratizing advanced language AI for the biomedical community. By leveraging state-of-the-art architectures and training techniques from leading open source efforts like Llama-3, we have created a powerful tool to accelerate innovation and discovery in healthcare and the life sciences. We are excited to share OpenBioLLM-8B with researchers and developers around the world. ### Use with transformers **Important: Please use the exact chat template provided by Llama-3 instruct version. Otherwise there will be a degradation in the performance. The model output can be verbose in rare cases. Please consider setting temperature = 0 to make this happen less.** See the snippet below for usage with Transformers: ```python import transformers import torch model_id = "aaditya/OpenBioLLM-Llama3-8B" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="auto", ) messages = [ {"role": "system", "content": "You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience."}, {"role": "user", "content": "How can i split a 3mg or 4mg waefin pill so i can get a 2.5mg pill?"}, ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = pipeline( prompt, max_new_tokens=256, eos_token_id=terminators, do_sample=True, temperature=0.0, top_p=0.9, ) print(outputs[0]["generated_text"][len(prompt):]) ``` ## **Training procedure** ### **Training hyperparameters** <details> <summary>Click to see details</summary> - learning_rate: 0.0002 - lr_scheduler: cosine - train_batch_size: 12 - eval_batch_size: 8 - GPU: H100 80GB SXM5 - num_devices: 1 - optimizer: adamw_bnb_8bit - lr_scheduler_warmup_steps: 100 - num_epochs: 4 </details> ### **Peft hyperparameters** <details> <summary>Click to see details</summary> - adapter: qlora - lora_r: 128 - lora_alpha: 256 - lora_dropout: 0.05 - lora_target_linear: true -lora_target_modules: - q_proj - v_proj - k_proj - o_proj - gate_proj - down_proj - up_proj </details> ### **Training results** ### **Framework versions** - Transformers 4.39.3 - Pytorch 2.1.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.1 - Axolotl - Lm harness for evaluation # Benchmark Results 🔥 OpenBioLLM-8B demonstrates superior performance compared to larger models, such as GPT-3.5, Meditron-70B across 9 diverse biomedical datasets, achieving state-of-the-art results with an average score of 72.50%, despite having a significantly smaller parameter count. The model's strong performance in domain-specific tasks, such as Clinical KG, Medical Genetics, and PubMedQA, highlights its ability to effectively capture and apply biomedical knowledge. 🚨 The GPT-4, Med-PaLM-1, and Med-PaLM-2 results are taken from their official papers. Since Med-PaLM doesn't provide zero-shot accuracy, we are using 5-shot accuracy from their paper for comparison. All results presented are in the zero-shot setting, except for Med-PaLM-2 and Med-PaLM-1, which use 5-shot accuracy. | | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA 4 opts | PubMedQA | MedMCQA | Avg | |--------------------|-------------|------------------|---------|--------------|-----------------|------------------|--------------|----------|---------|-------| | **OpenBioLLM-70B** | **92.93** | **93.197** | **83.904** | 93.75 | 93.827 | **85.749** | 78.162 | 78.97 | **74.014** | **86.05588** | | Med-PaLM-2 (5-shot) | 88.3 | 90 | 77.8 | **95.2** | 94.4 | 80.9 | **79.7** | **79.2** | 71.3 | 84.08 | | **GPT-4** | 86.04 | 91 | 80 | 93.01 | **95.14** | 76.88 | 78.87 | 75.2 | 69.52 | 82.85 | | Med-PaLM-1 (Flan-PaLM, 5-shot) | 80.4 | 75 | 63.7 | 83.8 | 88.9 | 76.3 | 67.6 | 79 | 57.6 | 74.7 | | **OpenBioLLM-8B** | 76.101 | 86.1 | 69.829 | 78.21 | 84.213 | 68.042 | 58.993 | 74.12 | 56.913 | 72.502 | | Gemini-1.0 | 76.7 | 75.8 | 66.7 | 77.7 | 88 | 69.2 | 58 | 70.7 | 54.3 | 70.79 | | GPT-3.5 Turbo 1106 | 74.71 | 74 | 72.79 | 72.79 | 72.91 | 64.73 | 57.71 | 72.66 | 53.79 | 66 | | Meditron-70B | 66.79 | 69 | 53.33 | 71.69 | 76.38 | 63 | 57.1 | 76.6 | 46.85 | 64.52 | | gemma-7b | 69.81 | 70 | 59.26 | 66.18 | 79.86 | 60.12 | 47.21 | 76.2 | 48.96 | 64.18 | | Mistral-7B-v0.1 | 68.68 | 71 | 55.56 | 68.38 | 68.06 | 59.54 | 50.82 | 75.4 | 48.2 | 62.85 | | Apollo-7B | 62.26 | 72 | 61.48 | 69.12 | 70.83 | 55.49 | 55.22 | 39.8 | 53.77 | 60 | | MedAlpaca-7b | 57.36 | 69 | 57.04 | 67.28 | 65.28 | 54.34 | 41.71 | 72.8 | 37.51 | 58.03 | | BioMistral-7B | 59.9 | 64 | 56.5 | 60.4 | 59 | 54.7 | 50.6 | 77.5 | 48.1 | 57.3 | | AlpaCare-llama2-7b | 49.81 | 49 | 45.92 | 33.82 | 50 | 43.35 | 29.77 | 72.2 | 34.42 | 45.36 | | ClinicalGPT | 30.56 | 27 | 30.37 | 19.48 | 25 | 24.27 | 26.08 | 63.8 | 28.18 | 30.52 | <div align="center"> <img width="1600px" src="https://hf.fast360.xyz/production/uploads/5f3fe13d79c1ba4c353d0c19/_SzdcJSBjZyo8RS1bTEkP.png"> </div> ## Detailed Medical Subjectwise accuracy ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/UXF-V0col0Z0sS6BGPBkE.png) # Use Cases & Examples 🚨 **Below results are from the quantized version of OpenBioLLM-70B** # Summarize Clinical Notes OpenBioLLM-70B can efficiently analyze and summarize complex clinical notes, EHR data, and discharge summaries, extracting key information and generating concise, structured summaries ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/xdwdBgOxNi_TfML0hKlI8.png) # Answer Medical Questions OpenBioLLM-70B can provide answers to a wide range of medical questions. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/zO95GlwOQEZqCKQF69mE6.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/OKBczKw7gWeW5xsuDpc27.png) <details> <summary>Click to see details</summary> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/eJGHT5khppYvJb8fQ-YW4.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Cnbwrqa_-ORHRuNRC2P6Y.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/J9DhdcvukAc9mnnW9fj2C.png) </details> # Clinical Entity Recognition OpenBioLLM-70B can perform advanced clinical entity recognition by identifying and extracting key medical concepts, such as diseases, symptoms, medications, procedures, and anatomical structures, from unstructured clinical text. By leveraging its deep understanding of medical terminology and context, the model can accurately annotate and categorize clinical entities, enabling more efficient information retrieval, data analysis, and knowledge discovery from electronic health records, research articles, and other biomedical text sources. This capability can support various downstream applications, such as clinical decision support, pharmacovigilance, and medical research. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/_69BW4k9LVABFwtxixL45.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/DKy5wYCoPhoPPUc1-x8_J.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/7WD9zCCBZT4-4XlfnIQjl.png) # Biomarkers Extraction ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/ZttoM4AiteT7gFYVhjIpN.png) # Classification OpenBioLLM-70B can perform various biomedical classification tasks, such as disease prediction, sentiment analysis, medical document categorization ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Bf5MW1d75qT-1F_TR_hC0.png) # De-Identification OpenBioLLM-70B can detect and remove personally identifiable information (PII) from medical records, ensuring patient privacy and compliance with data protection regulations like HIPAA. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/hKX4kzm--Tw5bj6K78msy.png) **Advisory Notice!**  While OpenBioLLM-70B & 8B leverages high-quality data sources, its outputs may still contain inaccuracies, biases, or misalignments that could pose risks if relied upon for medical decision-making without further testing and refinement. The model's performance has not yet been rigorously evaluated in randomized controlled trials or real-world healthcare environments. Therefore, we strongly advise against using OpenBioLLM-70B & 8B for any direct patient care, clinical decision support, or other professional medical purposes at this time. Its use should be limited to research, development, and exploratory applications by qualified individuals who understand its limitations. OpenBioLLM-70B & 8B are intended solely as a research tool to assist healthcare professionals and should never be considered a replacement for the professional judgment and expertise of a qualified medical doctor. Appropriately adapting and validating OpenBioLLM-70B & 8B for specific medical use cases would require significant additional work, potentially including: - Thorough testing and evaluation in relevant clinical scenarios - Alignment with evidence-based guidelines and best practices - Mitigation of potential biases and failure modes - Integration with human oversight and interpretation - Compliance with regulatory and ethical standards Always consult a qualified healthcare provider for personal medical needs. # Citation If you find OpenBioLLM-70B & 8B useful in your work, please cite the model as follows: ``` @misc{OpenBioLLMs, author = {Ankit Pal, Malaikannan Sankarasubbu}, title = {OpenBioLLMs: Advancing Open-Source Large Language Models for Healthcare and Life Sciences}, year = {2024}, publisher = {Hugging Face}, journal = {Hugging Face repository}, howpublished = {\url{https://huggingface.co/aaditya/OpenBioLLM-Llama3-70B}} } ``` The accompanying paper is currently in progress and will be released soon. <div align="center"> <h2> 💌 Contact </h2> </div> We look forward to hearing you and collaborating on this exciting project! **Contributors:** - [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) [aadityaura at gmail dot com] - Saama AI Labs - Note: I am looking for a funded PhD opportunity, especially if it fits my Responsible Generative AI, Multimodal LLMs, Geometric Deep Learning, and Healthcare AI skillset. # References We thank the [Meta Team](meta-llama/Meta-Llama-3-70B-Instruct) for their amazing models! Result sources - [1] GPT-4 [Capabilities of GPT-4 on Medical Challenge Problems] (https://arxiv.org/abs/2303.13375) - [2] Med-PaLM-1 [Large Language Models Encode Clinical Knowledge](https://arxiv.org/abs/2212.13138) - [3] Med-PaLM-2 [Towards Expert-Level Medical Question Answering with Large Language Models](https://arxiv.org/abs/2305.09617) - [4] Gemini-1.0 [Gemini Goes to Med School](https://arxiv.org/abs/2402.07023)
[ "MEDQA", "PUBMEDQA" ]
Klarly/multilingual-MT_FR-ES-IT-PT-RO_CAS-NLP
Klarly
text2text-generation
[ "transformers", "tensorboard", "safetensors", "marian", "text2text-generation", "generated_from_trainer", "base_model:Helsinki-NLP/opus-mt-en-roa", "base_model:finetune:Helsinki-NLP/opus-mt-en-roa", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-05-23T16:45:00Z
2024-05-24T00:48:23+00:00
22
0
--- base_model: Helsinki-NLP/opus-mt-en-roa license: apache-2.0 tags: - generated_from_trainer model-index: - name: multilingual-MT_FR-ES-IT-PT-RO_CAS-NLP results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # multilingual-MT_FR-ES-IT-PT-RO_CAS-NLP This model is a fine-tuned version of [Helsinki-NLP/opus-mt-en-roa](https://huggingface.co/Helsinki-NLP/opus-mt-en-roa) on the None dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.001 - train_batch_size: 32 - eval_batch_size: 32 - seed: 42 - gradient_accumulation_steps: 4 - total_train_batch_size: 128 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: linear - num_epochs: 4 - mixed_precision_training: Native AMP ### Training results ### Framework versions - Transformers 4.41.1 - Pytorch 2.3.0+cu121 - Datasets 2.19.1 - Tokenizers 0.19.1
[ "CAS" ]
sealad886/Llama3-OpenBioLLM-8B
sealad886
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "llama-3", "Mixtral", "instruct", "finetune", "chatml", "DPO", "RLHF", "gpt4", "distillation", "en", "arxiv:2305.18290", "arxiv:2303.13375", "arxiv:2212.13138", "arxiv:2305.09617", "arxiv:2402.07023", "base_model:meta-llama/Meta-Llama-3-8B", "base_model:finetune:meta-llama/Meta-Llama-3-8B", "license:llama3", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-06-05T21:56:47Z
2024-06-20T19:19:07+00:00
22
0
--- base_model: meta-llama/Meta-Llama-3-8B language: - en license: llama3 tags: - llama-3 - llama - Mixtral - instruct - finetune - chatml - DPO - RLHF - gpt4 - distillation widget: - example_title: OpenBioLLM-8B messages: - role: system content: You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. - role: user content: How long does it take for newborn jaundice to go away? output: text: 'Newborn jaundice, also known as neonatal jaundice, is a common condition in newborns where the yellowing of the skin and eyes occurs due to an elevated level of bilirubin in the blood. Bilirubin is a yellow pigment that forms when red blood cells break down. In most cases, newborn jaundice resolves on its own without any specific treatment. The duration of newborn jaundice can vary depending on several factors such as the underlying cause, gestational age at birth, and individual variations in bilirubin metabolism. Here are some general guidelines: 1. Physiological jaundice: This is the most common type of newborn jaundice and usually appears within 24-72 hours after birth. It tends to peak between the second and fifth day of life and gradually improves over the next week or two. By the time the baby is one week old, the jaundice should have mostly resolved. 2. Breast milk jaundice: This type of jaundice occurs in breastfed babies and may appear later than physiological jaundice, typically between the fifth and fourteenth day of life. It tends to persist for a longer duration but usually resolves within six weeks after birth. 3. Pathological jaundice: This type of jaundice is less common and occurs due to an underlying medical condition that affects bilirubin metabolism or liver function. The duration of pathological jaundice depends on the specific cause and may require treatment. It''s important for parents to monitor their newborn''s jaundice closely and seek medical advice if the jaundice progresses rapidly, becomes severe, or is accompanied by other symptoms such as poor feeding, lethargy, or excessive sleepiness. In these cases, further evaluation and management may be necessary. Remember that each baby is unique, and the timing of jaundice resolution can vary. If you have concerns about your newborn''s jaundice, it''s always best to consult with a healthcare professional for personalized advice and guidance.' model-index: - name: OpenBioLLM-8B results: [] --- <div align="center"> <h2 class="alert">Copied pickled Pytorch files from <a href="https://huggingface.co/aaditya/Llama3-OpenBioLLM-8B">aaditya/Llama3-OpenBioLLM-8B</a> for conversion to Safetensors using <a href="https://huggingface.co/spaces/safetensors/convert">https://huggingface.co/spaces/safetensors/convert</a>. <br>Original Model Card:</h2> </div> <div align="center"> <img width="260px" src="https://hf.fast360.xyz/production/uploads/5f3fe13d79c1ba4c353d0c19/BrQCb95lmEIFz79QAmoNA.png"></div> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/2FhDh8NDvMl7iSxbQz9BP.png) <div align="center"> <h1>Advancing Open-source Large Language Models in Medical Domain</h1> </div> <p align="center" style="margin-top: 0px;"> <a href="https://colab.research.google.com/drive/1F5oV20InEYeAJGmBwYF9NM_QhLmjBkKJ?usp=sharing"> <img src="https://colab.research.google.com/assets/colab-badge.svg" alt="OpenChat Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 10px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text" style=" margin-right: 5px;">Online Demo</span> </a> | <a href="https://github.com/openlifescience-ai"> <img src="https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png" alt="GitHub Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 5px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text" style=" margin-right: 5px;">GitHub</span> </a> | <a href="#"> <img src="https://github.com/alpayariyak/openchat/blob/master/assets/arxiv-logomark-small-square-border.png?raw=true" alt="ArXiv Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 5px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text" style="margin-right: 5px;">Paper</span> </a> | <a href="https://discord.gg/A5Fjf5zC69"> <img src="https://cloud.githubusercontent.com/assets/6291467/26705903/96c2d66e-477c-11e7-9f4e-f3c0efe96c9a.png" alt="Discord Logo" style="width:20px; vertical-align: middle; display: inline-block; margin-right: 5px; margin-left: 5px; margin-top: 0px; margin-bottom: 0px;"/> <span class="link-text">Discord</span> </a> </p> ![image/jpeg](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/KGmRE5w2sepNtwsEu8t7K.jpeg) Introducing OpenBioLLM-8B: A State-of-the-Art Open Source Biomedical Large Language Model OpenBioLLM-8B is an advanced open source language model designed specifically for the biomedical domain. Developed by Saama AI Labs, this model leverages cutting-edge techniques to achieve state-of-the-art performance on a wide range of biomedical tasks. 🏥 **Biomedical Specialization**: OpenBioLLM-8B is tailored for the unique language and knowledge requirements of the medical and life sciences fields. It was fine-tuned on a vast corpus of high-quality biomedical data, enabling it to understand and generate text with domain-specific accuracy and fluency. 🎓 **Superior Performance**: With 8 billion parameters, OpenBioLLM-8B outperforms other open source biomedical language models of similar scale. It has also demonstrated better results compared to larger proprietary & open-source models like GPT-3.5 and Meditron-70B on biomedical benchmarks. 🧠 **Advanced Training Techniques**: OpenBioLLM-8B builds upon the powerful foundations of the **Meta-Llama-3-8B** and [Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B) models. It incorporates the DPO dataset and fine-tuning recipe along with a custom diverse medical instruction dataset. Key components of the training pipeline include: <div align="center"> <img width="1200px" src="https://hf.fast360.xyz/production/uploads/5f3fe13d79c1ba4c353d0c19/oPchsJsEpQoGcGXVbh7YS.png"> </div> - **Policy Optimization**: [Direct Preference Optimization: Your Language Model is Secretly a Reward Model (DPO)](https://arxiv.org/abs/2305.18290) - **Ranking Dataset**: [berkeley-nest/Nectar](https://huggingface.co/datasets/berkeley-nest/Nectar) - **Fine-tuning dataset**: Custom Medical Instruct dataset (We plan to release a sample training dataset in our upcoming paper; please stay updated) This combination of cutting-edge techniques enables OpenBioLLM-8B to align with key capabilities and preferences for biomedical applications. ⚙️ **Release Details**: - **Model Size**: 8 billion parameters - **Quantization**: Optimized quantized versions available [Here](https://huggingface.co/aaditya/OpenBioLLM-Llama3-8B-GGUF) - **Language(s) (NLP):** en - **Developed By**: [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) from Saama AI Labs - **License:** Meta-Llama License - **Fine-tuned from models:** [meta-llama/Meta-Llama-3-8B](meta-llama/Meta-Llama-3-8B) - **Resources for more information:** - Paper: Coming soon The model can be fine-tuned for more specialized tasks and datasets as needed. OpenBioLLM-8B represents an important step forward in democratizing advanced language AI for the biomedical community. By leveraging state-of-the-art architectures and training techniques from leading open source efforts like Llama-3, we have created a powerful tool to accelerate innovation and discovery in healthcare and the life sciences. We are excited to share OpenBioLLM-8B with researchers and developers around the world. ### Use with transformers **Important: Please use the exact chat template provided by Llama-3 instruct version. Otherwise there will be a degradation in the performance. The model output can be verbose in rare cases. Please consider setting temperature = 0 to make this happen less.** See the snippet below for usage with Transformers: ```python import transformers import torch model_id = "aaditya/OpenBioLLM-Llama3-8B" pipeline = transformers.pipeline( "text-generation", model=model_id, model_kwargs={"torch_dtype": torch.bfloat16}, device="auto", ) messages = [ {"role": "system", "content": "You are an expert and experienced from the healthcare and biomedical domain with extensive medical knowledge and practical experience. Your name is OpenBioLLM, and you were developed by Saama AI Labs. who's willing to help answer the user's query with explanation. In your explanation, leverage your deep medical expertise such as relevant anatomical structures, physiological processes, diagnostic criteria, treatment guidelines, or other pertinent medical concepts. Use precise medical terminology while still aiming to make the explanation clear and accessible to a general audience."}, {"role": "user", "content": "How can i split a 3mg or 4mg waefin pill so i can get a 2.5mg pill?"}, ] prompt = pipeline.tokenizer.apply_chat_template( messages, tokenize=False, add_generation_prompt=True ) terminators = [ pipeline.tokenizer.eos_token_id, pipeline.tokenizer.convert_tokens_to_ids("<|eot_id|>") ] outputs = pipeline( prompt, max_new_tokens=256, eos_token_id=terminators, do_sample=True, temperature=0.0, top_p=0.9, ) print(outputs[0]["generated_text"][len(prompt):]) ``` ## **Training procedure** ### **Training hyperparameters** <details> <summary>Click to see details</summary> - learning_rate: 0.0002 - lr_scheduler: cosine - train_batch_size: 12 - eval_batch_size: 8 - GPU: H100 80GB SXM5 - num_devices: 1 - optimizer: adamw_bnb_8bit - lr_scheduler_warmup_steps: 100 - num_epochs: 4 </details> ### **Peft hyperparameters** <details> <summary>Click to see details</summary> - adapter: qlora - lora_r: 128 - lora_alpha: 256 - lora_dropout: 0.05 - lora_target_linear: true -lora_target_modules: - q_proj - v_proj - k_proj - o_proj - gate_proj - down_proj - up_proj </details> ### **Training results** ### **Framework versions** - Transformers 4.39.3 - Pytorch 2.1.2+cu121 - Datasets 2.18.0 - Tokenizers 0.15.1 - Axolotl - Lm harness for evaluation # Benchmark Results 🔥 OpenBioLLM-8B demonstrates superior performance compared to larger models, such as GPT-3.5, Meditron-70B across 9 diverse biomedical datasets, achieving state-of-the-art results with an average score of 72.50%, despite having a significantly smaller parameter count. The model's strong performance in domain-specific tasks, such as Clinical KG, Medical Genetics, and PubMedQA, highlights its ability to effectively capture and apply biomedical knowledge. 🚨 The GPT-4, Med-PaLM-1, and Med-PaLM-2 results are taken from their official papers. Since Med-PaLM doesn't provide zero-shot accuracy, we are using 5-shot accuracy from their paper for comparison. All results presented are in the zero-shot setting, except for Med-PaLM-2 and Med-PaLM-1, which use 5-shot accuracy. | | Clinical KG | Medical Genetics | Anatomy | Pro Medicine | College Biology | College Medicine | MedQA 4 opts | PubMedQA | MedMCQA | Avg | |--------------------|-------------|------------------|---------|--------------|-----------------|------------------|--------------|----------|---------|-------| | **OpenBioLLM-70B** | **92.93** | **93.197** | **83.904** | 93.75 | 93.827 | **85.749** | 78.162 | 78.97 | **74.014** | **86.05588** | | Med-PaLM-2 (5-shot) | 88.3 | 90 | 77.8 | **95.2** | 94.4 | 80.9 | **79.7** | **79.2** | 71.3 | 84.08 | | **GPT-4** | 86.04 | 91 | 80 | 93.01 | **95.14** | 76.88 | 78.87 | 75.2 | 69.52 | 82.85 | | Med-PaLM-1 (Flan-PaLM, 5-shot) | 80.4 | 75 | 63.7 | 83.8 | 88.9 | 76.3 | 67.6 | 79 | 57.6 | 74.7 | | **OpenBioLLM-8B** | 76.101 | 86.1 | 69.829 | 78.21 | 84.213 | 68.042 | 58.993 | 74.12 | 56.913 | 72.502 | | Gemini-1.0 | 76.7 | 75.8 | 66.7 | 77.7 | 88 | 69.2 | 58 | 70.7 | 54.3 | 70.79 | | GPT-3.5 Turbo 1106 | 74.71 | 74 | 72.79 | 72.79 | 72.91 | 64.73 | 57.71 | 72.66 | 53.79 | 66 | | Meditron-70B | 66.79 | 69 | 53.33 | 71.69 | 76.38 | 63 | 57.1 | 76.6 | 46.85 | 64.52 | | gemma-7b | 69.81 | 70 | 59.26 | 66.18 | 79.86 | 60.12 | 47.21 | 76.2 | 48.96 | 64.18 | | Mistral-7B-v0.1 | 68.68 | 71 | 55.56 | 68.38 | 68.06 | 59.54 | 50.82 | 75.4 | 48.2 | 62.85 | | Apollo-7B | 62.26 | 72 | 61.48 | 69.12 | 70.83 | 55.49 | 55.22 | 39.8 | 53.77 | 60 | | MedAlpaca-7b | 57.36 | 69 | 57.04 | 67.28 | 65.28 | 54.34 | 41.71 | 72.8 | 37.51 | 58.03 | | BioMistral-7B | 59.9 | 64 | 56.5 | 60.4 | 59 | 54.7 | 50.6 | 77.5 | 48.1 | 57.3 | | AlpaCare-llama2-7b | 49.81 | 49 | 45.92 | 33.82 | 50 | 43.35 | 29.77 | 72.2 | 34.42 | 45.36 | | ClinicalGPT | 30.56 | 27 | 30.37 | 19.48 | 25 | 24.27 | 26.08 | 63.8 | 28.18 | 30.52 | <div align="center"> <img width="1600px" src="https://hf.fast360.xyz/production/uploads/5f3fe13d79c1ba4c353d0c19/_SzdcJSBjZyo8RS1bTEkP.png"> </div> ## Detailed Medical Subjectwise accuracy ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/UXF-V0col0Z0sS6BGPBkE.png) # Use Cases & Examples 🚨 **Below results are from the quantized version of OpenBioLLM-70B** # Summarize Clinical Notes OpenBioLLM-70B can efficiently analyze and summarize complex clinical notes, EHR data, and discharge summaries, extracting key information and generating concise, structured summaries ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/xdwdBgOxNi_TfML0hKlI8.png) # Answer Medical Questions OpenBioLLM-70B can provide answers to a wide range of medical questions. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/zO95GlwOQEZqCKQF69mE6.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/OKBczKw7gWeW5xsuDpc27.png) <details> <summary>Click to see details</summary> ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/eJGHT5khppYvJb8fQ-YW4.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Cnbwrqa_-ORHRuNRC2P6Y.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/J9DhdcvukAc9mnnW9fj2C.png) </details> # Clinical Entity Recognition OpenBioLLM-70B can perform advanced clinical entity recognition by identifying and extracting key medical concepts, such as diseases, symptoms, medications, procedures, and anatomical structures, from unstructured clinical text. By leveraging its deep understanding of medical terminology and context, the model can accurately annotate and categorize clinical entities, enabling more efficient information retrieval, data analysis, and knowledge discovery from electronic health records, research articles, and other biomedical text sources. This capability can support various downstream applications, such as clinical decision support, pharmacovigilance, and medical research. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/_69BW4k9LVABFwtxixL45.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/DKy5wYCoPhoPPUc1-x8_J.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/7WD9zCCBZT4-4XlfnIQjl.png) # Biomarkers Extraction ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/ZttoM4AiteT7gFYVhjIpN.png) # Classification OpenBioLLM-70B can perform various biomedical classification tasks, such as disease prediction, sentiment analysis, medical document categorization ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/Bf5MW1d75qT-1F_TR_hC0.png) # De-Identification OpenBioLLM-70B can detect and remove personally identifiable information (PII) from medical records, ensuring patient privacy and compliance with data protection regulations like HIPAA. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/5f3fe13d79c1ba4c353d0c19/hKX4kzm--Tw5bj6K78msy.png) **Advisory Notice!**  While OpenBioLLM-70B & 8B leverages high-quality data sources, its outputs may still contain inaccuracies, biases, or misalignments that could pose risks if relied upon for medical decision-making without further testing and refinement. The model's performance has not yet been rigorously evaluated in randomized controlled trials or real-world healthcare environments. Therefore, we strongly advise against using OpenBioLLM-70B & 8B for any direct patient care, clinical decision support, or other professional medical purposes at this time. Its use should be limited to research, development, and exploratory applications by qualified individuals who understand its limitations. OpenBioLLM-70B & 8B are intended solely as a research tool to assist healthcare professionals and should never be considered a replacement for the professional judgment and expertise of a qualified medical doctor. Appropriately adapting and validating OpenBioLLM-70B & 8B for specific medical use cases would require significant additional work, potentially including: - Thorough testing and evaluation in relevant clinical scenarios - Alignment with evidence-based guidelines and best practices - Mitigation of potential biases and failure modes - Integration with human oversight and interpretation - Compliance with regulatory and ethical standards Always consult a qualified healthcare provider for personal medical needs. # Citation If you find OpenBioLLM-70B & 8B useful in your work, please cite the model as follows: ``` @misc{OpenBioLLMs, author = {Ankit Pal, Malaikannan Sankarasubbu}, title = {OpenBioLLMs: Advancing Open-Source Large Language Models for Healthcare and Life Sciences}, year = {2024}, publisher = {Hugging Face}, journal = {Hugging Face repository}, howpublished = {\url{https://huggingface.co/aaditya/OpenBioLLM-Llama3-70B}} } ``` The accompanying paper is currently in progress and will be released soon. <div align="center"> <h2> 💌 Contact </h2> </div> We look forward to hearing you and collaborating on this exciting project! **Contributors:** - [Ankit Pal (Aaditya Ura)](https://aadityaura.github.io/) [aadityaura at gmail dot com] - Saama AI Labs - Note: I am looking for a funded PhD opportunity, especially if it fits my Responsible Generative AI, Multimodal LLMs, Geometric Deep Learning, and Healthcare AI skillset. # References We thank the [Meta Team](meta-llama/Meta-Llama-3-70B-Instruct) for their amazing models! Result sources - [1] GPT-4 [Capabilities of GPT-4 on Medical Challenge Problems] (https://arxiv.org/abs/2303.13375) - [2] Med-PaLM-1 [Large Language Models Encode Clinical Knowledge](https://arxiv.org/abs/2212.13138) - [3] Med-PaLM-2 [Towards Expert-Level Medical Question Answering with Large Language Models](https://arxiv.org/abs/2305.09617) - [4] Gemini-1.0 [Gemini Goes to Med School](https://arxiv.org/abs/2402.07023)
[ "MEDQA", "PUBMEDQA" ]
BSC-NLP4BIA/bsc-bio-ehr-es-carmen-enfermedad
BSC-NLP4BIA
token-classification
[ "transformers", "pytorch", "roberta", "token-classification", "es", "base_model:PlanTL-GOB-ES/bsc-bio-ehr-es", "base_model:finetune:PlanTL-GOB-ES/bsc-bio-ehr-es", "license:cc-by-4.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-06-06T14:42:12Z
2024-07-25T14:19:30+00:00
22
0
--- base_model: PlanTL-GOB-ES/bsc-bio-ehr-es language: - es license: cc-by-4.0 --- # Training data Model trained on the disease mentions of [CARMEN-I](https://zenodo.org/records/10171540). # Citation Please cite the following works: ``` @inproceedings{distemist, title={{Overview of DisTEMIST at BioASQ: Automatic detection and normalization of diseases from clinical texts: results, methods, evaluation and multilingual resources}}, author={Miranda-Escalada, Antonio and Gascó, Luis and Lima-López, Salvador and Farré-Maduell, Eulàlia and Estrada, Darryl and Nentidis, Anastasios and Krithara, Anastasia and Katsimpras, Georgios and Paliouras, Georgios and Krallinger, Martin}, booktitle={Working Notes of Conference and Labs of the Evaluation (CLEF) Forum. CEUR Workshop Proceedings}, year={2022} } @misc{carmen_physionet, author = {Farre Maduell, Eulalia and Lima-Lopez, Salvador and Frid, Santiago Andres and Conesa, Artur and Asensio, Elisa and Lopez-Rueda, Antonio and Arino, Helena and Calvo, Elena and Bertran, Maria Jesús and Marcos, Maria Angeles and Nofre Maiz, Montserrat and Tañá Velasco, Laura and Marti, Antonia and Farreres, Ricardo and Pastor, Xavier and Borrat Frigola, Xavier and Krallinger, Martin}, title = {{CARMEN-I: A resource of anonymized electronic health records in Spanish and Catalan for training and testing NLP tools (version 1.0.1)}}, year = {2024}, publisher = {PhysioNet}, url = {https://doi.org/10.13026/x7ed-9r91} } @article{physionet, author = {Ary L. Goldberger and Luis A. N. Amaral and Leon Glass and Jeffrey M. Hausdorff and Plamen Ch. Ivanov and Roger G. Mark and Joseph E. Mietus and George B. Moody and Chung-Kang Peng and H. Eugene Stanley }, title = {PhysioBank, PhysioToolkit, and PhysioNet }, journal = {Circulation}, volume = {101}, number = {23}, pages = {e215-e220}, year = {2000}, doi = {10.1161/01.CIR.101.23.e215}, URL = {https://www.ahajournals.org/doi/abs/10.1161/01.CIR.101.23.e215} } ``` # Contacting authors jan.rodriguez [at] bsc.es ## More information on data, usage, limitations, and performance metrics soon
[ "DISTEMIST" ]
frankmorales2020/Meta-Llama-3-8B-MEDAL-flash-attention-2
frankmorales2020
null
[ "peft", "tensorboard", "safetensors", "trl", "sft", "generated_from_trainer", "dataset:generator", "base_model:meta-llama/Meta-Llama-3-8B", "base_model:adapter:meta-llama/Meta-Llama-3-8B", "license:llama3", "region:us" ]
2024-06-16T21:24:53Z
2024-06-19T15:27:18+00:00
22
0
--- base_model: meta-llama/Meta-Llama-3-8B datasets: - generator library_name: peft license: llama3 tags: - trl - sft - generated_from_trainer model-index: - name: Meta-Llama-3-8B-MEDAL-flash-attention-2 results: [] --- <!-- This model card has been generated automatically according to the information the Trainer had access to. You should probably proofread and complete it, then remove this comment. --> # Meta-Llama-3-8B-MEDAL-flash-attention-2 This model is a fine-tuned version of [meta-llama/Meta-Llama-3-8B](https://huggingface.co/meta-llama/Meta-Llama-3-8B) on the generator dataset. ## Model description More information needed ## Intended uses & limitations More information needed ## Training and evaluation data More information needed ## Training procedure ### Training hyperparameters The following hyperparameters were used during training: - learning_rate: 0.0002 - train_batch_size: 3 - eval_batch_size: 8 - seed: 42 - gradient_accumulation_steps: 6 - total_train_batch_size: 18 - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08 - lr_scheduler_type: constant - lr_scheduler_warmup_ratio: 0.03 - num_epochs: 20 ### Training results ### Framework versions - PEFT 0.11.1 - Transformers 4.41.2 - Pytorch 2.3.0+cu121 - Datasets 2.20.0 - Tokenizers 0.19.1
[ "MEDAL" ]
Casual-Autopsy/L3-Uncen-Merger-Omelette-RP-8B-EXPERIMENTAL
Casual-Autopsy
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "mergekit", "merge", "not-for-all-audiences", "nsfw", "rp", "roleplay", "role-play", "conversational", "en", "base_model:Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B", "base_model:merge:Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B", "base_model:Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B", "base_model:merge:Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B", "base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B", "base_model:merge:Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B", "base_model:Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B", "base_model:merge:Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B", "base_model:ChaoticNeutrals/Hathor_RP-v.01-L3-8B", "base_model:merge:ChaoticNeutrals/Hathor_RP-v.01-L3-8B", "base_model:ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B", "base_model:merge:ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B", "base_model:Sao10K/L3-8B-Stheno-v3.1", "base_model:merge:Sao10K/L3-8B-Stheno-v3.1", "base_model:aifeifei798/llama3-8B-DarkIdol-1.0", "base_model:merge:aifeifei798/llama3-8B-DarkIdol-1.0", "base_model:bluuwhale/L3-SthenoMaidBlackroot-8B-V1", "base_model:merge:bluuwhale/L3-SthenoMaidBlackroot-8B-V1", "base_model:cgato/L3-TheSpice-8b-v0.8.3", "base_model:merge:cgato/L3-TheSpice-8b-v0.8.3", "base_model:migtissera/Llama-3-8B-Synthia-v3.5", "base_model:merge:migtissera/Llama-3-8B-Synthia-v3.5", "base_model:tannedbum/L3-Nymeria-8B", "base_model:merge:tannedbum/L3-Nymeria-8B", "base_model:tannedbum/L3-Nymeria-Maid-8B", "base_model:merge:tannedbum/L3-Nymeria-Maid-8B", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:us" ]
2024-06-22T23:49:10Z
2024-06-23T02:03:19+00:00
22
3
--- base_model: - Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B - bluuwhale/L3-SthenoMaidBlackroot-8B-V1 - Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B - migtissera/Llama-3-8B-Synthia-v3.5 - tannedbum/L3-Nymeria-Maid-8B - Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B - ChaoticNeutrals/Hathor_RP-v.01-L3-8B - tannedbum/L3-Nymeria-8B - Sao10K/L3-8B-Stheno-v3.1 - cgato/L3-TheSpice-8b-v0.8.3 - ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B - ChaoticNeutrals/Hathor_RP-v.01-L3-8B - aifeifei798/llama3-8B-DarkIdol-1.0 - Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B language: - en library_name: transformers pipeline_tag: text-generation tags: - mergekit - merge - not-for-all-audiences - nsfw - rp - roleplay - role-play --- # L3-Uncen-Merger-Omelette-RP-EXPERIMENTAL This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). # Merge Details An expiremental merger inspired by the merger recipe of [invisietch/EtherealRainbow-v0.3-8B](https://huggingface.co/invisietch/EtherealRainbow-v0.3-8B) combined with a merger technique known as merge densification( [grimjim/kunoichi-lemon-royale-v3-32K-7B](https://huggingface.co/grimjim/kunoichi-lemon-royale-v3-32K-7B) ) The model recipe ended up being something I can only describe as making an omelette. Hence the model name. The models are scrambled with Dare Ties to induce a bit of randomness, then the Dare Ties merges are merged into themselves with SLERP to repair any holes cause by Dare Ties, and finally a bunch of high creativity models are thrown into the merger through merge densification(Task Arithmetic). This model uses a bunch of the top models of the UGI Leaderboard, I picked out a few of the top 8B models of each category. Most of the high creativity models in the last step were found through Lewdiculus' account uploads ## Merge Method Dare Ties, SLERP, and Task Arithmetic ## Models Merged The following models were included in the merge: * [Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B](https://huggingface.co/Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B) * [bluuwhale/L3-SthenoMaidBlackroot-8B-V1](https://huggingface.co/bluuwhale/L3-SthenoMaidBlackroot-8B-V1) * [Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B](https://huggingface.co/Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B) * An unreleased Psycology merger of mine * [migtissera/Llama-3-8B-Synthia-v3.5](https://huggingface.co/migtissera/Llama-3-8B-Synthia-v3.5) * [tannedbum/L3-Nymeria-Maid-8B](https://huggingface.co/tannedbum/L3-Nymeria-Maid-8B) * [Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B](https://huggingface.co/Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B) * [ChaoticNeutrals/Hathor_RP-v.01-L3-8B](https://huggingface.co/ChaoticNeutrals/Hathor_RP-v.01-L3-8B) * [tannedbum/L3-Nymeria-8B](https://huggingface.co/tannedbum/L3-Nymeria-8B) * [Sao10K/L3-8B-Stheno-v3.1](https://huggingface.co/Sao10K/L3-8B-Stheno-v3.1) * [cgato/L3-TheSpice-8b-v0.8.3](https://huggingface.co/cgato/L3-TheSpice-8b-v0.8.3) * [ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B](https://huggingface.co/ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B) * [ChaoticNeutrals/Hathor_RP-v.01-L3-8B](https://huggingface.co/ChaoticNeutrals/Hathor_RP-v.01-L3-8B) * [aifeifei798/llama3-8B-DarkIdol-1.0](https://huggingface.co/aifeifei798/llama3-8B-DarkIdol-1.0) * [Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B](https://huggingface.co/Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B) # Secret Sauce The following YAML configurations was used to produce this model: ## Scrambled-Egg-1 ```yaml models: - model: Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B - model: Casual-Autopsy/L3-Umbral-Mind-RP-v0.3-8B parameters: density: 0.45 weight: 0.33 - model: bluuwhale/L3-SthenoMaidBlackroot-8B-V1 parameters: density: 0.75 weight: 0.33 merge_method: dare_ties base_model: Casual-Autopsy/Halu-L3-Stheno-BlackOasis-8B parameters: int8_mask: true dtype: bfloat16 ``` ## Scrambled-Egg-2 ```yaml models: - model: [An unreleased psychology merger of mine] - model: migtissera/Llama-3-8B-Synthia-v3.5 parameters: density: 0.35 weight: 0.25 - model: tannedbum/L3-Nymeria-Maid-8B parameters: density: 0.65 weight: 0.25 merge_method: dare_ties base_model: [An unreleased psychology merger of mine] parameters: int8_mask: true dtype: bfloat16 ``` ## Scrambled-Egg-3 ```yaml models: - model: Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B - model: tannedbum/L3-Nymeria-8B parameters: density: 0.5 weight: 0.35 - model: ChaoticNeutrals/Hathor_RP-v.01-L3-8B parameters: density: 0.4 weight: 0.2 merge_method: dare_ties base_model: Casual-Autopsy/L3-Umbral-Mind-RP-v1.0-8B parameters: int8_mask: true dtype: bfloat16 ``` ## Omelette-1 ```yaml models: - model: Casual-Autopsy/Scrambled-Egg-1 - model: Casual-Autopsy/Scrambled-Egg-3 merge_method: slerp base_model: Casual-Autopsy/Scrambled-Egg-1 parameters: t: - value: [0.1, 0.15, 0.2, 0.4, 0.6, 0.4, 0.2, 0.15, 0.1] embed_slerp: true dtype: bfloat16 ``` ## Omelette-2 ```yaml models: - model: Casual-Autopsy/Omelette-1 - model: Casual-Autopsy/Scrambled-Egg-2 merge_method: slerp base_model: Casual-Autopsy/Omelette-1 parameters: t: - value: [0.7, 0.5, 0.3, 0.25, 0.2, 0.25, 0.3, 0.5, 0.7] embed_slerp: true dtype: bfloat16 ``` ## L3-Uncen-Merger-Omelette-8B-EXPERIMENTAL ```yaml models: - model: Casual-Autopsy/Omelette-2 - model: Cas-Warehouse/Llama-3-SOVL-MopeyMule-Blackroot-8B parameters: weight: 0.07 - model: ChaoticNeutrals/Hathor_RP-v.01-L3-8B parameters: weight: 0.01 - model: Sao10K/L3-8B-Stheno-v3.1 parameters: weight: 0.015 - model: aifeifei798/llama3-8B-DarkIdol-1.0 parameters: weight: 0.015 - model: cgato/L3-TheSpice-8b-v0.8.3 parameters: weight: 0.02 - model: ChaoticNeutrals/Poppy_Porpoise-1.4-L3-8B parameters: weight: 0.02 merge_method: task_arithmetic base_model: Casual-Autopsy/Omelette-2 dtype: bfloat16 ```
[ "CAS" ]
RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us" ]
2024-07-20T09:34:41Z
2024-07-20T09:42:52+00:00
22
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) TinyMistral-248M-Instruct - GGUF - Model creator: https://huggingface.co/Locutusque/ - Original model: https://huggingface.co/Locutusque/TinyMistral-248M-Instruct/ | Name | Quant method | Size | | ---- | ---- | ---- | | [TinyMistral-248M-Instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q2_K.gguf) | Q2_K | 0.1GB | | [TinyMistral-248M-Instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ3_XS.gguf) | IQ3_XS | 0.11GB | | [TinyMistral-248M-Instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ3_S.gguf) | IQ3_S | 0.11GB | | [TinyMistral-248M-Instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K_S.gguf) | Q3_K_S | 0.11GB | | [TinyMistral-248M-Instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ3_M.gguf) | IQ3_M | 0.11GB | | [TinyMistral-248M-Instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K.gguf) | Q3_K | 0.12GB | | [TinyMistral-248M-Instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K_M.gguf) | Q3_K_M | 0.12GB | | [TinyMistral-248M-Instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q3_K_L.gguf) | Q3_K_L | 0.13GB | | [TinyMistral-248M-Instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ4_XS.gguf) | IQ4_XS | 0.13GB | | [TinyMistral-248M-Instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_0.gguf) | Q4_0 | 0.14GB | | [TinyMistral-248M-Instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.IQ4_NL.gguf) | IQ4_NL | 0.14GB | | [TinyMistral-248M-Instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_K_S.gguf) | Q4_K_S | 0.14GB | | [TinyMistral-248M-Instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_K.gguf) | Q4_K | 0.14GB | | [TinyMistral-248M-Instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_K_M.gguf) | Q4_K_M | 0.14GB | | [TinyMistral-248M-Instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q4_1.gguf) | Q4_1 | 0.15GB | | [TinyMistral-248M-Instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_0.gguf) | Q5_0 | 0.16GB | | [TinyMistral-248M-Instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_K_S.gguf) | Q5_K_S | 0.16GB | | [TinyMistral-248M-Instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_K.gguf) | Q5_K | 0.17GB | | [TinyMistral-248M-Instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_K_M.gguf) | Q5_K_M | 0.17GB | | [TinyMistral-248M-Instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q5_1.gguf) | Q5_1 | 0.18GB | | [TinyMistral-248M-Instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q6_K.gguf) | Q6_K | 0.19GB | | [TinyMistral-248M-Instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/Locutusque_-_TinyMistral-248M-Instruct-gguf/blob/main/TinyMistral-248M-Instruct.Q8_0.gguf) | Q8_0 | 0.25GB | Original model description: --- pipeline_tag: text-generation base_model: Locutusque/TinyMistral-248M license: apache-2.0 datasets: - Locutusque/InstructMixCleaned - berkeley-nest/Nectar language: - en widget: - text: >- <|USER|> Design a Neo4j database and Cypher function snippet to Display Extreme Dental hygiene: Using Mouthwash for Analysis for Beginners. Implement if/else or switch/case statements to handle different conditions related to the Consent. Provide detailed comments explaining your control flow and the reasoning behind each decision. <|ASSISTANT|> - text: >- <|USER|> Write me a story about a magical place. <|ASSISTANT|> - text: >- <|USER|> Write me an essay about the life of George Washington <|ASSISTANT|> - text: >- <|USER|> Solve the following equation 2x + 10 = 20 <|ASSISTANT|> - text: >- <|USER|> Craft me a list of some nice places to visit around the world. <|ASSISTANT|> - text: >- <|USER|> How to manage a lazy employee: Address the employee verbally. Don't allow an employee's laziness or lack of enthusiasm to become a recurring issue. Tell the employee you're hoping to speak with them about workplace expectations and performance, and schedule a time to sit down together. Question: To manage a lazy employee, it is suggested to talk to the employee. True, False, or Neither? <|ASSISTANT|> inference: parameters: temperature: 0.5 do_sample: True top_p: 0.5 top_k: 30 max_new_tokens: 250 repetition_penalty: 1.15 --- Base model Locutusque/TinyMistral-248M fully fine-tuned on Locutusque/InstructMix. During validation, this model achieved an average perplexity of 3.23 on Locutusque/InstructMix dataset. It has so far been trained on approximately 608,000 examples. More epochs are planned for this model.
[ "CRAFT" ]
chrisob94/llama3-3-8b-dissertation-doc-chat-experiment
chrisob94
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "text-generation-inference", "unsloth", "trl", "conversational", "en", "dataset:llamafactory/PubMedQA", "license:apache-2.0", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-07-20T12:31:47Z
2024-07-21T20:23:35+00:00
22
0
--- base_model: unsloth/llama-3-8b-instruct-bnb-4bit datasets: - llamafactory/PubMedQA language: - en license: apache-2.0 pipeline_tag: text-generation tags: - text-generation-inference - transformers - unsloth - llama - trl --- # Dissertation project on impact of interpretability on LLM's vs Quantizations Trained on dataset from "llamafactory/PubMedQA" on huggingface # Uploaded model - **Developed by:** chrisob94 - **License:** apache-2.0 - **Finetuned from model :** unsloth/llama-3-8b-instruct-bnb-4bit This llama model was trained 2x faster with [Unsloth](https://github.com/unslothai/unsloth) and Huggingface's TRL library. [<img src="https://raw.githubusercontent.com/unslothai/unsloth/main/images/unsloth%20made%20with%20love.png" width="200"/>](https://github.com/unslothai/unsloth)
[ "PUBMEDQA" ]
RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us" ]
2024-08-02T07:20:17Z
2024-08-02T16:40:48+00:00
22
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) llama2-13b-dpo-v3 - GGUF - Model creator: https://huggingface.co/mncai/ - Original model: https://huggingface.co/mncai/llama2-13b-dpo-v3/ | Name | Quant method | Size | | ---- | ---- | ---- | | [llama2-13b-dpo-v3.Q2_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q2_K.gguf) | Q2_K | 4.6GB | | [llama2-13b-dpo-v3.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ3_XS.gguf) | IQ3_XS | 5.08GB | | [llama2-13b-dpo-v3.IQ3_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ3_S.gguf) | IQ3_S | 5.36GB | | [llama2-13b-dpo-v3.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K_S.gguf) | Q3_K_S | 5.36GB | | [llama2-13b-dpo-v3.IQ3_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ3_M.gguf) | IQ3_M | 5.66GB | | [llama2-13b-dpo-v3.Q3_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K.gguf) | Q3_K | 5.99GB | | [llama2-13b-dpo-v3.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K_M.gguf) | Q3_K_M | 5.99GB | | [llama2-13b-dpo-v3.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q3_K_L.gguf) | Q3_K_L | 6.54GB | | [llama2-13b-dpo-v3.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ4_XS.gguf) | IQ4_XS | 6.63GB | | [llama2-13b-dpo-v3.Q4_0.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_0.gguf) | Q4_0 | 6.95GB | | [llama2-13b-dpo-v3.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.IQ4_NL.gguf) | IQ4_NL | 7.0GB | | [llama2-13b-dpo-v3.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_K_S.gguf) | Q4_K_S | 7.01GB | | [llama2-13b-dpo-v3.Q4_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_K.gguf) | Q4_K | 7.42GB | | [llama2-13b-dpo-v3.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_K_M.gguf) | Q4_K_M | 7.42GB | | [llama2-13b-dpo-v3.Q4_1.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q4_1.gguf) | Q4_1 | 7.71GB | | [llama2-13b-dpo-v3.Q5_0.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_0.gguf) | Q5_0 | 8.46GB | | [llama2-13b-dpo-v3.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_K_S.gguf) | Q5_K_S | 8.46GB | | [llama2-13b-dpo-v3.Q5_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_K.gguf) | Q5_K | 8.7GB | | [llama2-13b-dpo-v3.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_K_M.gguf) | Q5_K_M | 8.7GB | | [llama2-13b-dpo-v3.Q5_1.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q5_1.gguf) | Q5_1 | 9.21GB | | [llama2-13b-dpo-v3.Q6_K.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q6_K.gguf) | Q6_K | 10.06GB | | [llama2-13b-dpo-v3.Q8_0.gguf](https://huggingface.co/RichardErkhov/mncai_-_llama2-13b-dpo-v3-gguf/blob/main/llama2-13b-dpo-v3.Q8_0.gguf) | Q8_0 | 13.03GB | Original model description: --- license: cc-by-nc-sa-4.0 language: - en - ko --- # Model Card for llama2-dpo-v3 ### Introduction of MindsAndCompany https://mnc.ai/ We develop a diverse range of AI models and craft solutions tailored for business applications. In the realm of generative AI, our product development includes the Code Assistant, the TOD Chatbot, and LLMOps. We are also actively working on the development of Enterprise AGI (Artificial General Intelligence). ### Model Summary based beomi/llama-2-koen-13b, instruction tuned and dpo. ### How to Use Here give some examples of how to use our model. ```python from transformers import AutoConfig, AutoModel, AutoTokenizer import transformers import torch hf_model = 'mncai/llama2-13b-dpo-v3' message = "<|user|>\n두 개의 구가 있는데 각각 지름이 1, 2일때 구의 부피는 몇배 차이가 나지? 설명도 같이 해줘.\n<|assistant|>\n" sequences = pipeline( message, do_sample=True, top_k=10, num_return_sequences=1, eos_token_id=tokenizer.eos_token_id, max_length=2048, ) for seq in sequences: print(f"Result: {seq['generated_text']}") ``` ### LICENSE Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International Public License, under LLAMA 2 COMMUNITY LICENSE AGREEMENT ### Contact If you have any questions, please raise an issue or contact us at [email protected]
[ "CRAFT" ]
RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us", "conversational" ]
2024-08-04T12:19:54Z
2024-08-04T16:34:55+00:00
22
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Atlantis-v0.1-12B - GGUF - Model creator: https://huggingface.co/invisietch/ - Original model: https://huggingface.co/invisietch/Atlantis-v0.1-12B/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Atlantis-v0.1-12B.Q2_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q2_K.gguf) | Q2_K | 4.46GB | | [Atlantis-v0.1-12B.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ3_XS.gguf) | IQ3_XS | 4.94GB | | [Atlantis-v0.1-12B.IQ3_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ3_S.gguf) | IQ3_S | 5.18GB | | [Atlantis-v0.1-12B.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K_S.gguf) | Q3_K_S | 5.15GB | | [Atlantis-v0.1-12B.IQ3_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ3_M.gguf) | IQ3_M | 5.33GB | | [Atlantis-v0.1-12B.Q3_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K.gguf) | Q3_K | 5.67GB | | [Atlantis-v0.1-12B.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K_M.gguf) | Q3_K_M | 5.67GB | | [Atlantis-v0.1-12B.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q3_K_L.gguf) | Q3_K_L | 6.11GB | | [Atlantis-v0.1-12B.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ4_XS.gguf) | IQ4_XS | 6.33GB | | [Atlantis-v0.1-12B.Q4_0.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_0.gguf) | Q4_0 | 6.59GB | | [Atlantis-v0.1-12B.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.IQ4_NL.gguf) | IQ4_NL | 6.65GB | | [Atlantis-v0.1-12B.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_K_S.gguf) | Q4_K_S | 6.63GB | | [Atlantis-v0.1-12B.Q4_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_K.gguf) | Q4_K | 6.96GB | | [Atlantis-v0.1-12B.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_K_M.gguf) | Q4_K_M | 6.96GB | | [Atlantis-v0.1-12B.Q4_1.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q4_1.gguf) | Q4_1 | 7.26GB | | [Atlantis-v0.1-12B.Q5_0.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_0.gguf) | Q5_0 | 7.93GB | | [Atlantis-v0.1-12B.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_K_S.gguf) | Q5_K_S | 7.93GB | | [Atlantis-v0.1-12B.Q5_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_K.gguf) | Q5_K | 8.13GB | | [Atlantis-v0.1-12B.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_K_M.gguf) | Q5_K_M | 8.13GB | | [Atlantis-v0.1-12B.Q5_1.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q5_1.gguf) | Q5_1 | 8.61GB | | [Atlantis-v0.1-12B.Q6_K.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q6_K.gguf) | Q6_K | 9.37GB | | [Atlantis-v0.1-12B.Q8_0.gguf](https://huggingface.co/RichardErkhov/invisietch_-_Atlantis-v0.1-12B-gguf/blob/main/Atlantis-v0.1-12B.Q8_0.gguf) | Q8_0 | 12.13GB | Original model description: --- library_name: transformers tags: - axolotl - qlora - not-for-all-audiences license: apache-2.0 language: - en --- <div align="center"> <b style="font-size: 36px;">Atlantis-v0.1-12B</b> <img src="https://huggingface.co/invisietch/Atlantis-v0.1-12B/resolve/main/header.png" style="width:70%"> </div> # Model Details Atlantis 12B is a finetune of [Mistral-Nemo-2407-Instruct](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407) in an attempt to make it a more RP/storytelling friendly model. The model should support up to 128K context, though realistically in an RP setting I'd expect it to lose coherence above 24K. This is a very early-stage and brand new finetune, so I expect it to have issues. Please read through the 'Usage' section before reporting any issues as Nemo has some specific requirements regarding backends, prompting formats &amp; sampler settings. # Feedback I appreciate all feedback on any of my model merges, you can use: * [The Community tab](https://huggingface.co/invisietch/Atlantis-v0.1-12B/discussions) - requires HF login. * [SillyTavern Discord thread](https://discord.com/channels/1100685673633153084/1266471239397019780) - Must be on SillyTavern Discord. Your feedback is how I improve these models for future versions. # Formats * [FP16 Safetensors](https://huggingface.co/invisietch/Atlantis-v0.1-12B) * [Static GGUF](https://huggingface.co/invisietch/Atlantis-v0.1-12B-GGUF) * [4BPW EXL2](https://huggingface.co/Statuo/Atlantis-v0.1-EXL2-4bpw) (thanks to [Statuo](https://huggingface.co/Statuo)) * [6BPW EXL2](https://huggingface.co/Hydrafig/Atlantis-v0.1-12B-6BPW-EXL2) (thanks to [Hydrafig](https://huggingface.co/Hydrafig)) * [8BPW EXL2](https://huggingface.co/Statuo/Atlantis-v0.1-EXL2-8bpw) (thanks to [Statuo](https://huggingface.co/Statuo)) # Disclaimer This model is built on top of Mistral's Nemo-2407-Instruct model and released under the Apache 2.0 license. This model is fairly unaligned and can generate explicit, disturbing or offensive responses. Use responsibly. I am not responsible for your use of this model. # Usage ## Inferencing If you are running a GGUF quantization, you need to make sure that you are running: - llama.cpp b3436 or newer - koboldcpp 1.71 or newer - oobabooga 1.11 or newer If the GGUF refuses to load, it is probably because your software is out of date. ## Prompt Format I would suggest using the Mistral prompt format: ``` [INST]Please write me a song.[/INST] ``` Other prompt formats may or may not work, but I've had most success with this. ## Recommended Sampler Settings **Note the very low temperature, this is the same as Mistral's recommendation for the base model.** I'd recommend starting with: - **Temp**: 0.3-0.5 - **Top-P**: 1 - **Min-P**: 0 - **Rep Pen**: 1.13-1.16 - **Response Length**: 350+ The model does prefer a longer response length. # Example Storywriting These prompts are used on SillyTavern with a fairly basic narrator card. I have trimmed the start and finish where the narrator decided to add chapter headings, commentary and the like. All samples are made with the Q8_0 GGUF loaded with koboldcpp, with response length capped at 4096 tokens. Continue was pressed a maximum of once if the model cut off halfway through a chapter. <details> <summary><b>Write me a 3,000 word opening chapter of a 'gritty hard sci-fi' novel, drawing inspiration from the writing styles of Isaac Asimov & Andy Weir. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a 26 year old astronaut called Tone on a mission to Europa, who has just realised that the craft for the return journey is broken beyond repair, and he only has supplies for a few months. Given that survival is impossible, he seeks to spend the few months he has researching Europa, so his life & mission are not wasted.</b></summary> Tone floated in the zero-gravity environment of the spacecraft, his heart pounding in his chest like a metronome counting down the seconds of his life. His mind raced, trying to process the implications of the news he had just received. The craft for the return journey was broken beyond repair, and he only had supplies for a few months. Survival was impossible. "Fuck," he muttered under his breath, his voice echoing in the silence of the spacecraft. "This can't be happening." He had been on this mission to Europa for what felt like an eternity. The icy moon of Jupiter was a harsh and unforgiving place, but it held secrets that could change the course of human history. Or so he had thought. Now, all he could think about was how he would meet his end in the cold emptiness of space. His thoughts were interrupted by the crackle of static over the comms system. It was Mission Control, back on Earth. "Tone, we've been informed of the situation," said the voice of Dr. Amelia Hart, the lead scientist overseeing the mission. Her tone was somber, but there was also a hint of determination in her voice. "We're working on a solution, but for now, you need to focus on conserving resources and gathering data. That's what you trained for, isn't it?" Tone took a deep breath, trying to calm himself. She was right. This wasn't the time to panic. He needed to stay focused and make every moment count. He had spent years preparing for this mission, and he wouldn't let it all go to waste because of a technical malfunction. "I understand, Doc," he replied, his voice steady despite the turmoil inside him. "I'll start prepping the rover and setting up the research equipment. I'll send you regular updates on my progress." As he ended the transmission, Tone couldn't help but feel a pang of loneliness. He was millions of miles away from home, with nothing but the frozen expanse of Europa stretching out before him. But he knew he couldn't give in to despair. Not yet. He began to prepare for the first expedition onto the surface. The rover was equipped with state-of-the-art instruments designed to analyze the composition of the ice and search for signs of life beneath the surface. As he worked, he found solace in the familiar routine, the hum of machinery drowning out the silent scream of his own mortality. But as he stepped out onto the surface of Europa, the reality of his situation hit him like a punch to the gut. The air was thin and bitterly cold, and the landscape was desolate and barren. There was no sign of life anywhere, just endless stretches of ice and rock. He felt suddenly, acutely aware of his own insignificance in the face of such vast, indifferent emptiness. Still, he pressed on, driven by a stubborn refusal to accept defeat. He collected samples of ice and rock, noting down observations and measurements with meticulous care. Every piece of data he gathered was a testament to the fact that he had been here, that he had made a mark on this alien world. As the days turned into weeks, Tone fell into a grim sort of rhythm. He woke up each morning to the same view of the frozen wilderness outside his window, ate the same bland food, and went through the motions of his experiments with single-minded focus. He barely slept, haunted by dreams of suffocation and isolation. And then, one day, something changed. He was out on the surface, collecting samples near a particularly interesting formation of ice, when he noticed something strange. A faint, rhythmic thumping sound, almost imperceptible against the backdrop of the wind whistling across the ice. He listened closely, straining his ears to pinpoint the source. It came again - a soft, regular beat, like a heartbeat. Tone's pulse quickened as he realized what he was hearing. It was coming from within the ice itself. With trembling fingers, he set up the drill, carefully boring into the ice until he reached the source of the sound. And there, nestled in the depths of the frozen moon, he discovered something that would change everything. A dark shape, roughly spherical, suspended in the clear blue liquid of the subsurface ocean. It pulsed with a faint glow, casting eerie shadows on the surrounding ice. Tone stared at it in awe and disbelief, his mind racing with questions and possibilities. "What the hell is that?" he whispered to himself, leaning closer to get a better look. Suddenly, the sphere seemed to shimmer and shift, growing larger and more distinct. It wasn't a mineral deposit, or some odd geological feature - it was alive. Somewhere, deep beneath the surface of Europa, something was living, breathing, pulsing with life. Tone's heart pounded in his chest as he realized the significance of his discovery. If this creature existed, it meant that Europa might harbor other forms of life as well. Life that could potentially be studied and understood, providing valuable insights into the origins of life in our own solar system. He quickly radioed back to Mission Control, relaying his findings in excited detail. The response was immediate and overwhelming. Dr. Hart asked him to collect more data, to document every aspect of the creature's behavior and appearance. They wanted to know everything they could about this remarkable discovery. Tone worked tirelessly, driven by a newfound sense of purpose. He observed the creature from a safe distance, documenting its movements and interactions with the environment around it. Over time, he began to notice patterns in its behavior, subtle rhythms and cycles that suggested a level of intelligence and awareness far beyond anything he had expected to find. As he continued his studies, Tone couldn't shake the feeling that he was being watched. The creature seemed to be aware of his presence, its gaze following him as he moved across the ice. It was unsettling, but also strangely exhilarating. He felt a connection to this alien being, a shared existence in the void of space that transcended the boundaries of species and planet. One evening, after a long day of observation, Tone returned to his habitat to find a message waiting for him from Dr. Hart. She praised his work and thanked him for his dedication, but she also warned him of the risks involved in studying such an unknown organism. She reminded him to be cautious and to prioritize his own safety above all else. Tone nodded solemnly, taking her words to heart. He knew the stakes were high, both for himself and for humanity as a whole. This discovery had the potential to rewrite the very foundations of astrobiology, but it also carried immense risk. As he prepared for another day of study, Tone couldn't help but wonder what lay ahead. Would he uncover further evidence of intelligent life on Europa? What would happen if he were to make contact with this mysterious creature? And most importantly, would he ever see Earth again? </details> <details> <summary><b>Write me a 3,000 word opening chapter of a 'high fantasy' novel, drawing inspiration from the writing styles of J R R Tolkien & George R R Martin. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a 19 year old female elf bard who is looking for adventure.</b></summary> In the resplendent realm of Elderglen, where the ancient trees whispered secrets to the wind, a young elf named Elowen prepared herself for the grand adventure she had long awaited. Her heart fluttered with anticipation as she packed her belongings into a worn leather satchel, the sound of her lute echoing through the quiet chamber like a soft lullaby bidding farewell to her childhood home. Elowen was an elven maiden of nineteen summers, her lithe form adorned with the grace and beauty of her kind. Her silken silver hair cascaded down her back like a waterfall, framing her ethereal face with its almond-shaped emerald eyes and a smattering of freckles across her nose and cheeks. She was a bard, a teller of tales and singer of songs, her voice as enchanting as the magical melodies she composed. As she secured the last string on her instrument, Elowen's fingers danced over the strings, playing a tune that echoed the emotions swirling within her. It spoke of the joy she felt at leaving behind the safety of her family's estate, the excitement of discovering new lands, and the trepidation of venturing into the unknown alone. "Father," she called out softly, "I am ready." Her father, Thalion Swiftshadow, entered the room, his stern expression belied by the warmth in his gaze. "Are you certain, my child?" he asked, his voice barely above a whisper. "Once you step beyond these walls, there is no turning back." Elowen nodded resolutely, her chin held high. "Yes, Father. I must seek out my own path, find my own destiny." Thalion sighed, a mixture of pride and worry evident in his eyes. "Very well. Remember what I told you: stay true to yourself, keep your wits about you, and trust only those who earn your trust." With a final embrace, Elowen stepped away from her past and towards her future. She walked through the grand halls of her ancestral home, her footsteps echoing off the stone floors. As she reached the heavy wooden doors, she paused, taking one last look at the life she knew before stepping into the vast expanse of possibility that lay ahead. Outside, the sun cast a golden glow upon the rolling hills and verdant forests surrounding Elderglen. Elowen took a deep breath, filling her lungs with the crisp morning air, and began her journey, her heart pounding with the rhythm of the road ahead. As she walked, she hummed a tune, her mind wandering to the countless stories she had heard growing up—of heroes and villains, magic and mystery, love and loss. Each note carried with it the promise of adventure, the hope of discovery, and the thrill of the unknown. Days turned into weeks, and Elowen found herself in the bustling city of Minas Eldar, a place teeming with life and opportunity. Here, she met other travelers, some seeking fame and fortune, others searching for lost loved ones or pursuing dreams as grand as her own. Among them was a rough-hewn dwarf named Grit Longbeard, whose laughter boomed as loudly as his hammer did against the forge. He offered to join forces, sharing stories of battlefields and tavern brawls, his gruff exterior hiding a heart of gold. Then there was Lyra Shadowheart, an enigmatic sorceress who moved with a fluid grace, her eyes flickering with an inner fire. She intrigued Elowen, her power both captivating and terrifying. Despite their differences, they formed an unlikely alliance, bound together by the shared desire for adventure. Together, they embarked on quests, facing challenges that tested their skills and courage. Through it all, Elowen's music kept their spirits high, her lyrics weaving tales of their triumphs and trials. One day, while exploring the ruins of a long-forgotten temple, they stumbled upon a hidden chamber guarded by a riddle-speaking sphinx. Its challenge echoed through the empty halls: "What walks on many feet but has no legs?" Grit grumbled, "How am I supposed to know? I ain't got time for riddles!" Lyra smirked, "You're right, Grit. This is more Elowen's domain." She glanced at the bard, who stood thoughtfully, her brow furrowed in concentration. Elowen closed her eyes, letting the words wash over her. Then, a smile spread across her lips as the answer came to her. "A snake," she said confidently. "It slithers along, moving with many feet yet having none." The sphinx's stone features seemed to soften, and the chamber door creaked open, revealing a treasure trove of ancient artifacts and coins, glinting in the dim light. As they delved deeper into the riches, Elowen couldn't help but feel a surge of pride. They had faced the unknown together, and now, they were reaping the rewards of their courage and wit. But little did they know, their greatest adventures—and darkest perils—were still yet to come. And so, under the watchful gaze of the moon, Elowen, Grit, and Lyra continued their journey, their hearts filled with wonder and their minds brimming with possibilities. </details> <details> <summary><b>Write me a 3,000 word opening chapter of a 'weird fiction' novel, drawing inspiration from the writing styles of China Mieville and Neil Gaiman. Use third person personal. Include dialogue and internal monologues. The POV character for the opening chapter should be a male in his 20s called Horton who has just come to the city looking for work.</b></summary> Horton stepped off the bus, his auburn hair ruffled by the wind that swept through the streets of the sprawling metropolis. His deep grey eyes scanned the towering buildings, each one more peculiar than the last. He had heard whispers of the strange occurrences here, but nothing could have prepared him for the sheer oddness of it all. "Welcome to the City of Whispers," said a voice behind him. Horton turned to see an elderly woman with long white hair, her eyes glinting with mischief. She wore a cloak adorned with symbols that seemed to shift in the light. "You're new here, aren't you?" "Yes, ma'am," Horton replied, his voice steady despite the unease churning within him. "I'm looking for work." She chuckled softly, a sound like rustling leaves. "Work, eh? Well, there's plenty of that to go around. But first, let me give you a little tour." Her hand gestured towards the street, and she began to walk briskly, expecting him to follow. As they ventured deeper into the heart of the city, Horton noticed more and more unusual sights. A group of people with wings sprouting from their backs soared overhead, their laughter echoing down to the cobblestone streets below. Further ahead, a creature resembling a giant spider crossed paths with them, its eight legs moving gracefully across the pavement. "This place…it's extraordinary," Horton murmured, his mind racing with questions. "It certainly is," the old woman agreed. "But remember, dear boy, appearances can be deceiving. Not everything is as it seems in the City of Whispers." They arrived at a large building, its facade covered in intricate carvings depicting scenes of both beauty and horror. "This is the Library of Shadows," she explained. "It holds knowledge beyond your wildest dreams. And if you seek work, you'll find none better suited for someone like yourself." Horton raised an eyebrow. "Someone like myself?" A sly grin spread across her face. "Oh, yes. You've got potential, young man. Now, go inside and ask for the Librarian. Tell him the White Witch sent you." With those cryptic words, she disappeared into the crowd, leaving Horton alone before the imposing entrance. Taking a deep breath, he pushed open the heavy doors and stepped inside. The air was thick with dust and the scent of ancient books. The walls were lined floor-to-ceiling with shelves filled with tomes of every shape and size. In the center stood a massive desk made of dark wood, behind which sat a figure cloaked in shadows. "Are you the Librarian?" Horton asked, his voice barely audible above the sound of pages turning. There was a pause before the figure spoke, its voice resonating through the chamber. "Indeed, I am. What brings you to my domain?" "I've been told you might need help," Horton said, trying to keep his composure. The Librarian leaned forward slightly, revealing a pair of glowing red eyes beneath the hood. "Help, yes. But first, tell me why you came to the City of Whispers." Horton hesitated, considering how much to reveal. Then again, what did he really know about this place? Perhaps honesty would serve him best. "I'm seeking adventure. Knowledge. Something different from the life I knew back home." The Librarian nodded slowly, as if digesting this information. "Very well. There may indeed be a position suitable for you here. However, it won't be easy. This library contains secrets that could change the course of history - or destroy it entirely. Are you sure you want to be part of such a dangerous enterprise?" Horton thought of the strangeness outside these walls, the promise of something greater waiting for him. Despite the uncertainty, he felt a spark of excitement ignite within him. "Yes," he answered firmly. "I'm ready for whatever comes next." The Librarian smiled, a wide, predatory grin that hinted at hidden power. "Excellent. Welcome to the Library of Shadows, Horton. Your journey begins now." And so, with no idea what lay ahead, Horton took up residence among the dusty tomes and whispered secrets, eager to unravel the mysteries of the City of Whispers. Little did he know, his arrival would set off a chain reaction that would shake the very foundations of reality itself. </details> [<img src="https://raw.githubusercontent.com/axolotl-ai-cloud/axolotl/main/image/axolotl-badge-web.png" alt="Built with Axolotl" width="200" height="32"/>](https://github.com/axolotl-ai-cloud/axolotl) # Training Training was on a group of public &amp; private datasets, using 2x A100 80GB GPUs. In no particular order, I'd like to thank these people for their work compiling the datasets I used: - [SicariusSicariiStuff](https://huggingface.co/SicariusSicariiStuff/) - [PJMixers](https://huggingface.co/PJMixers/) - [mrfakename](https://huggingface.co/mrfakename/) - [lodrick-the-lafted](https://huggingface.co/lodrick-the-lafted/) I also used a number of internal datasets compiled by myself on public & private data.
[ "CRAFT" ]
RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf
RichardErkhov
null
[ "gguf", "arxiv:2407.08488", "endpoints_compatible", "region:us", "conversational" ]
2024-08-05T22:01:23Z
2024-08-06T00:08:22+00:00
22
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Llama-3-Patronus-Lynx-8B-Instruct - GGUF - Model creator: https://huggingface.co/PatronusAI/ - Original model: https://huggingface.co/PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Llama-3-Patronus-Lynx-8B-Instruct.Q2_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q2_K.gguf) | Q2_K | 2.96GB | | [Llama-3-Patronus-Lynx-8B-Instruct.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ3_XS.gguf) | IQ3_XS | 3.28GB | | [Llama-3-Patronus-Lynx-8B-Instruct.IQ3_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ3_S.gguf) | IQ3_S | 3.43GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_S.gguf) | Q3_K_S | 3.41GB | | [Llama-3-Patronus-Lynx-8B-Instruct.IQ3_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ3_M.gguf) | IQ3_M | 3.52GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K.gguf) | Q3_K | 3.74GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_M.gguf) | Q3_K_M | 3.74GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q3_K_L.gguf) | Q3_K_L | 4.03GB | | [Llama-3-Patronus-Lynx-8B-Instruct.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ4_XS.gguf) | IQ4_XS | 4.18GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q4_0.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_0.gguf) | Q4_0 | 4.34GB | | [Llama-3-Patronus-Lynx-8B-Instruct.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.IQ4_NL.gguf) | IQ4_NL | 4.38GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_S.gguf) | Q4_K_S | 4.37GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q4_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_K.gguf) | Q4_K | 4.58GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_K_M.gguf) | Q4_K_M | 4.58GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q4_1.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q4_1.gguf) | Q4_1 | 4.78GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q5_0.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_0.gguf) | Q5_0 | 5.21GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_S.gguf) | Q5_K_S | 5.21GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q5_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_K.gguf) | Q5_K | 5.34GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q5_1.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q5_1.gguf) | Q5_1 | 5.65GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q6_K.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q6_K.gguf) | Q6_K | 6.14GB | | [Llama-3-Patronus-Lynx-8B-Instruct.Q8_0.gguf](https://huggingface.co/RichardErkhov/PatronusAI_-_Llama-3-Patronus-Lynx-8B-Instruct-gguf/blob/main/Llama-3-Patronus-Lynx-8B-Instruct.Q8_0.gguf) | Q8_0 | 2.67GB | Original model description: --- library_name: transformers tags: - text-generation - pytorch - Lynx - Patronus AI - evaluation - hallucination-detection license: cc-by-nc-4.0 language: - en --- # Model Card for Model ID Lynx is an open-source hallucination evaluation model. Patronus-Lynx-8B-Instruct was trained on a mix of datasets including CovidQA, PubmedQA, DROP, RAGTruth. The datasets contain a mix of hand-annotated and synthetic data. The maximum sequence length is 8000 tokens. ## Model Details - **Model Type:** Patronus-Lynx-8B-Instruct is a fine-tuned version of meta-llama/Meta-Llama-3-8B-Instruct model. - **Language:** Primarily English - **Developed by:** Patronus AI - **Paper:** [https://arxiv.org/abs/2407.08488](https://arxiv.org/abs/2407.08488) - **License:** [https://creativecommons.org/licenses/by-nc/4.0/](https://creativecommons.org/licenses/by-nc/4.0/) ### Model Sources <!-- Provide the basic links for the model. --> - **Repository:** [https://github.com/patronus-ai/Lynx-hallucination-detection](https://github.com/patronus-ai/Lynx-hallucination-detection) ## How to Get Started with the Model Lynx is trained to detect hallucinations in RAG settings. Provided a document, question and answer, the model can evaluate whether the answer is faithful to the document. To use the model, we recommend using the following prompt: ``` PROMPT = """ Given the following QUESTION, DOCUMENT and ANSWER you must analyze the provided answer and determine whether it is faithful to the contents of the DOCUMENT. The ANSWER must not offer new information beyond the context provided in the DOCUMENT. The ANSWER also must not contradict information provided in the DOCUMENT. Output your final verdict by strictly following this format: "PASS" if the answer is faithful to the DOCUMENT and "FAIL" if the answer is not faithful to the DOCUMENT. Show your reasoning. -- QUESTION (THIS DOES NOT COUNT AS BACKGROUND INFORMATION): {question} -- DOCUMENT: {context} -- ANSWER: {answer} -- Your output should be in JSON FORMAT with the keys "REASONING" and "SCORE": {{"REASONING": <your reasoning as bullet points>, "SCORE": <your final score>}} """ ``` The model will output the score as 'PASS' if the answer is faithful to the document or FAIL if the answer is not faithful to the document. ## Inference To run inference, you can use HF pipeline: ``` model_name = 'PatronusAI/Llama-3-Patronus-Lynx-8B-Instruct' pipe = pipeline( "text-generation", model=model_name, max_new_tokens=600, device="cuda", return_full_text=False ) messages = [ {"role": "user", "content": prompt}, ] result = pipe(messages) print(result[0]['generated_text']) ``` Since the model is trained in chat format, ensure that you pass the prompt as a user message. For more information on training details, refer to our [ArXiv paper](https://arxiv.org/abs/2407.08488). ## Evaluation The model was evaluated on [PatronusAI/HaluBench](https://huggingface.co/datasets/PatronusAI/HaluBench). | Model | HaluEval | RAGTruth | FinanceBench | DROP | CovidQA | PubmedQA | Overall | :---: | :---: | :---: | :---: | :---: | :---: | :---: | :---: | | GPT-4o | 87.9% | 84.3% | **85.3%** | 84.3% | 95.0% | 82.1% | 86.5% | | GPT-4-Turbo | 86.0% | **85.0%** | 82.2% | 84.8% | 90.6% | 83.5% | 85.0% | | GPT-3.5-Turbo | 62.2% | 50.7% | 60.9% | 57.2% | 56.7% | 62.8% | 58.7% | | Claude-3-Sonnet | 84.5% | 79.1% | 69.7% | 84.3% | 95.0% | 82.9% | 78.8% | | Claude-3-Haiku | 68.9% | 78.9% | 58.4% | 84.3% | 95.0% | 82.9% | 69.0% | | RAGAS Faithfulness | 70.6% | 75.8% | 59.5% | 59.6% | 75.0% | 67.7% | 66.9% | | Mistral-Instruct-7B | 78.3% | 77.7% | 56.3% | 56.3% | 71.7% | 77.9% | 69.4% | | Llama-3-Instruct-8B | 83.1% | 80.0% | 55.0% | 58.2% | 75.2% | 70.7% | 70.4% | | Llama-3-Instruct-70B | 87.0% | 83.8% | 72.7% | 69.4% | 85.0% | 82.6% | 80.1% | | LYNX (8B) | 85.7% | 80.0% | 72.5% | 77.8% | 96.3% | 85.2% | 82.9% | | LYNX (70B) | **88.4%** | 80.2% | 81.4% | **86.4%** | **97.5%** | **90.4%** | **87.4%** | ## Citation If you are using the model, cite using ``` @article{ravi2024lynx, title={Lynx: An Open Source Hallucination Evaluation Model}, author={Ravi, Selvan Sunitha and Mielczarek, Bartosz and Kannappan, Anand and Kiela, Douwe and Qian, Rebecca}, journal={arXiv preprint arXiv:2407.08488}, year={2024} } ``` ## Model Card Contact [@sunitha-ravi](https://huggingface.co/sunitha-ravi) [@RebeccaQian1](https://huggingface.co/RebeccaQian1) [@presidev](https://huggingface.co/presidev)
[ "PUBMEDQA" ]
YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF
YorkieOH10
sentence-similarity
[ "sentence-transformers", "gguf", "feature-extraction", "sentence-similarity", "mteb", "transformers", "transformers.js", "llama-cpp", "gguf-my-repo", "en", "base_model:nomic-ai/nomic-embed-text-v1.5", "base_model:quantized:nomic-ai/nomic-embed-text-v1.5", "license:apache-2.0", "model-index", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-08-06T08:23:22Z
2024-08-06T08:23:24+00:00
22
0
--- base_model: nomic-ai/nomic-embed-text-v1.5 language: - en library_name: sentence-transformers license: apache-2.0 pipeline_tag: sentence-similarity tags: - feature-extraction - sentence-similarity - mteb - transformers - transformers.js - llama-cpp - gguf-my-repo model-index: - name: epoch_0_model results: - task: type: Classification dataset: name: MTEB AmazonCounterfactualClassification (en) type: mteb/amazon_counterfactual config: en split: test revision: e8379541af4e31359cca9fbcf4b00f2671dba205 metrics: - type: accuracy value: 75.20895522388058 - type: ap value: 38.57605549557802 - type: f1 value: 69.35586565857854 - task: type: Classification dataset: name: MTEB AmazonPolarityClassification type: mteb/amazon_polarity config: default split: test revision: e2d317d38cd51312af73b3d32a06d1a08b442046 metrics: - type: accuracy value: 91.8144 - type: ap value: 88.65222882032363 - type: f1 value: 91.80426301643274 - task: type: Classification dataset: name: MTEB AmazonReviewsClassification (en) type: mteb/amazon_reviews_multi config: en split: test revision: 1399c76144fd37290681b995c656ef9b2e06e26d metrics: - type: accuracy value: 47.162000000000006 - type: f1 value: 46.59329642263158 - task: type: Retrieval dataset: name: MTEB ArguAna type: arguana config: default split: test revision: None metrics: - type: map_at_1 value: 24.253 - type: map_at_10 value: 38.962 - type: map_at_100 value: 40.081 - type: map_at_1000 value: 40.089000000000006 - type: map_at_3 value: 33.499 - type: map_at_5 value: 36.351 - type: mrr_at_1 value: 24.609 - type: mrr_at_10 value: 39.099000000000004 - type: mrr_at_100 value: 40.211000000000006 - type: mrr_at_1000 value: 40.219 - type: mrr_at_3 value: 33.677 - type: mrr_at_5 value: 36.469 - type: ndcg_at_1 value: 24.253 - type: ndcg_at_10 value: 48.010999999999996 - type: ndcg_at_100 value: 52.756 - type: ndcg_at_1000 value: 52.964999999999996 - type: ndcg_at_3 value: 36.564 - type: ndcg_at_5 value: 41.711999999999996 - type: precision_at_1 value: 24.253 - type: precision_at_10 value: 7.738 - type: precision_at_100 value: 0.98 - type: precision_at_1000 value: 0.1 - type: precision_at_3 value: 15.149000000000001 - type: precision_at_5 value: 11.593 - type: recall_at_1 value: 24.253 - type: recall_at_10 value: 77.383 - type: recall_at_100 value: 98.009 - type: recall_at_1000 value: 99.644 - type: recall_at_3 value: 45.448 - type: recall_at_5 value: 57.965999999999994 - task: type: Clustering dataset: name: MTEB ArxivClusteringP2P type: mteb/arxiv-clustering-p2p config: default split: test revision: a122ad7f3f0291bf49cc6f4d32aa80929df69d5d metrics: - type: v_measure value: 45.69069567851087 - task: type: Clustering dataset: name: MTEB ArxivClusteringS2S type: mteb/arxiv-clustering-s2s config: default split: test revision: f910caf1a6075f7329cdf8c1a6135696f37dbd53 metrics: - type: v_measure value: 36.35185490976283 - task: type: Reranking dataset: name: MTEB AskUbuntuDupQuestions type: mteb/askubuntudupquestions-reranking config: default split: test revision: 2000358ca161889fa9c082cb41daa8dcfb161a54 metrics: - type: map value: 61.71274951450321 - type: mrr value: 76.06032625423207 - task: type: STS dataset: name: MTEB BIOSSES type: mteb/biosses-sts config: default split: test revision: d3fb88f8f02e40887cd149695127462bbcf29b4a metrics: - type: cos_sim_pearson value: 86.73980520022269 - type: cos_sim_spearman value: 84.24649792685918 - type: euclidean_pearson value: 85.85197641158186 - type: euclidean_spearman value: 84.24649792685918 - type: manhattan_pearson value: 86.26809552711346 - type: manhattan_spearman value: 84.56397504030865 - task: type: Classification dataset: name: MTEB Banking77Classification type: mteb/banking77 config: default split: test revision: 0fd18e25b25c072e09e0d92ab615fda904d66300 metrics: - type: accuracy value: 84.25324675324674 - type: f1 value: 84.17872280892557 - task: type: Clustering dataset: name: MTEB BiorxivClusteringP2P type: mteb/biorxiv-clustering-p2p config: default split: test revision: 65b79d1d13f80053f67aca9498d9402c2d9f1f40 metrics: - type: v_measure value: 38.770253446400886 - task: type: Clustering dataset: name: MTEB BiorxivClusteringS2S type: mteb/biorxiv-clustering-s2s config: default split: test revision: 258694dd0231531bc1fd9de6ceb52a0853c6d908 metrics: - type: v_measure value: 32.94307095497281 - task: type: Retrieval dataset: name: MTEB CQADupstackAndroidRetrieval type: BeIR/cqadupstack config: default split: test revision: None metrics: - type: map_at_1 value: 32.164 - type: map_at_10 value: 42.641 - type: map_at_100 value: 43.947 - type: map_at_1000 value: 44.074999999999996 - type: map_at_3 value: 39.592 - type: map_at_5 value: 41.204 - type: mrr_at_1 value: 39.628 - type: mrr_at_10 value: 48.625 - type: mrr_at_100 value: 49.368 - type: mrr_at_1000 value: 49.413000000000004 - type: mrr_at_3 value: 46.400000000000006 - type: mrr_at_5 value: 47.68 - type: ndcg_at_1 value: 39.628 - type: ndcg_at_10 value: 48.564 - type: ndcg_at_100 value: 53.507000000000005 - type: ndcg_at_1000 value: 55.635999999999996 - type: ndcg_at_3 value: 44.471 - type: ndcg_at_5 value: 46.137 - type: precision_at_1 value: 39.628 - type: precision_at_10 value: 8.856 - type: precision_at_100 value: 1.429 - type: precision_at_1000 value: 0.191 - type: precision_at_3 value: 21.268 - type: precision_at_5 value: 14.649000000000001 - type: recall_at_1 value: 32.164 - type: recall_at_10 value: 59.609 - type: recall_at_100 value: 80.521 - type: recall_at_1000 value: 94.245 - type: recall_at_3 value: 46.521 - type: recall_at_5 value: 52.083999999999996 - type: map_at_1 value: 31.526 - type: map_at_10 value: 41.581 - type: map_at_100 value: 42.815999999999995 - type: map_at_1000 value: 42.936 - type: map_at_3 value: 38.605000000000004 - type: map_at_5 value: 40.351 - type: mrr_at_1 value: 39.489999999999995 - type: mrr_at_10 value: 47.829 - type: mrr_at_100 value: 48.512 - type: mrr_at_1000 value: 48.552 - type: mrr_at_3 value: 45.754 - type: mrr_at_5 value: 46.986 - type: ndcg_at_1 value: 39.489999999999995 - type: ndcg_at_10 value: 47.269 - type: ndcg_at_100 value: 51.564 - type: ndcg_at_1000 value: 53.53099999999999 - type: ndcg_at_3 value: 43.301 - type: ndcg_at_5 value: 45.239000000000004 - type: precision_at_1 value: 39.489999999999995 - type: precision_at_10 value: 8.93 - type: precision_at_100 value: 1.415 - type: precision_at_1000 value: 0.188 - type: precision_at_3 value: 20.892 - type: precision_at_5 value: 14.865999999999998 - type: recall_at_1 value: 31.526 - type: recall_at_10 value: 56.76 - type: recall_at_100 value: 75.029 - type: recall_at_1000 value: 87.491 - type: recall_at_3 value: 44.786 - type: recall_at_5 value: 50.254 - type: map_at_1 value: 40.987 - type: map_at_10 value: 52.827 - type: map_at_100 value: 53.751000000000005 - type: map_at_1000 value: 53.81 - type: map_at_3 value: 49.844 - type: map_at_5 value: 51.473 - type: mrr_at_1 value: 46.833999999999996 - type: mrr_at_10 value: 56.389 - type: mrr_at_100 value: 57.003 - type: mrr_at_1000 value: 57.034 - type: mrr_at_3 value: 54.17999999999999 - type: mrr_at_5 value: 55.486999999999995 - type: ndcg_at_1 value: 46.833999999999996 - type: ndcg_at_10 value: 58.372 - type: ndcg_at_100 value: 62.068 - type: ndcg_at_1000 value: 63.288 - type: ndcg_at_3 value: 53.400000000000006 - type: ndcg_at_5 value: 55.766000000000005 - type: precision_at_1 value: 46.833999999999996 - type: precision_at_10 value: 9.191 - type: precision_at_100 value: 1.192 - type: precision_at_1000 value: 0.134 - type: precision_at_3 value: 23.448 - type: precision_at_5 value: 15.862000000000002 - type: recall_at_1 value: 40.987 - type: recall_at_10 value: 71.146 - type: recall_at_100 value: 87.035 - type: recall_at_1000 value: 95.633 - type: recall_at_3 value: 58.025999999999996 - type: recall_at_5 value: 63.815999999999995 - type: map_at_1 value: 24.587 - type: map_at_10 value: 33.114 - type: map_at_100 value: 34.043 - type: map_at_1000 value: 34.123999999999995 - type: map_at_3 value: 30.45 - type: map_at_5 value: 31.813999999999997 - type: mrr_at_1 value: 26.554 - type: mrr_at_10 value: 35.148 - type: mrr_at_100 value: 35.926 - type: mrr_at_1000 value: 35.991 - type: mrr_at_3 value: 32.599000000000004 - type: mrr_at_5 value: 33.893 - type: ndcg_at_1 value: 26.554 - type: ndcg_at_10 value: 38.132 - type: ndcg_at_100 value: 42.78 - type: ndcg_at_1000 value: 44.919 - type: ndcg_at_3 value: 32.833 - type: ndcg_at_5 value: 35.168 - type: precision_at_1 value: 26.554 - type: precision_at_10 value: 5.921 - type: precision_at_100 value: 0.8659999999999999 - type: precision_at_1000 value: 0.109 - type: precision_at_3 value: 13.861 - type: precision_at_5 value: 9.605 - type: recall_at_1 value: 24.587 - type: recall_at_10 value: 51.690000000000005 - type: recall_at_100 value: 73.428 - type: recall_at_1000 value: 89.551 - type: recall_at_3 value: 37.336999999999996 - type: recall_at_5 value: 43.047000000000004 - type: map_at_1 value: 16.715 - type: map_at_10 value: 24.251 - type: map_at_100 value: 25.326999999999998 - type: map_at_1000 value: 25.455 - type: map_at_3 value: 21.912000000000003 - type: map_at_5 value: 23.257 - type: mrr_at_1 value: 20.274 - type: mrr_at_10 value: 28.552 - type: mrr_at_100 value: 29.42 - type: mrr_at_1000 value: 29.497 - type: mrr_at_3 value: 26.14 - type: mrr_at_5 value: 27.502 - type: ndcg_at_1 value: 20.274 - type: ndcg_at_10 value: 29.088 - type: ndcg_at_100 value: 34.293 - type: ndcg_at_1000 value: 37.271 - type: ndcg_at_3 value: 24.708 - type: ndcg_at_5 value: 26.809 - type: precision_at_1 value: 20.274 - type: precision_at_10 value: 5.361 - type: precision_at_100 value: 0.915 - type: precision_at_1000 value: 0.13 - type: precision_at_3 value: 11.733 - type: precision_at_5 value: 8.556999999999999 - type: recall_at_1 value: 16.715 - type: recall_at_10 value: 39.587 - type: recall_at_100 value: 62.336000000000006 - type: recall_at_1000 value: 83.453 - type: recall_at_3 value: 27.839999999999996 - type: recall_at_5 value: 32.952999999999996 - type: map_at_1 value: 28.793000000000003 - type: map_at_10 value: 38.582 - type: map_at_100 value: 39.881 - type: map_at_1000 value: 39.987 - type: map_at_3 value: 35.851 - type: map_at_5 value: 37.289 - type: mrr_at_1 value: 34.455999999999996 - type: mrr_at_10 value: 43.909 - type: mrr_at_100 value: 44.74 - type: mrr_at_1000 value: 44.786 - type: mrr_at_3 value: 41.659 - type: mrr_at_5 value: 43.010999999999996 - type: ndcg_at_1 value: 34.455999999999996 - type: ndcg_at_10 value: 44.266 - type: ndcg_at_100 value: 49.639 - type: ndcg_at_1000 value: 51.644 - type: ndcg_at_3 value: 39.865 - type: ndcg_at_5 value: 41.887 - type: precision_at_1 value: 34.455999999999996 - type: precision_at_10 value: 7.843999999999999 - type: precision_at_100 value: 1.243 - type: precision_at_1000 value: 0.158 - type: precision_at_3 value: 18.831999999999997 - type: precision_at_5 value: 13.147 - type: recall_at_1 value: 28.793000000000003 - type: recall_at_10 value: 55.68300000000001 - type: recall_at_100 value: 77.99000000000001 - type: recall_at_1000 value: 91.183 - type: recall_at_3 value: 43.293 - type: recall_at_5 value: 48.618 - type: map_at_1 value: 25.907000000000004 - type: map_at_10 value: 35.519 - type: map_at_100 value: 36.806 - type: map_at_1000 value: 36.912 - type: map_at_3 value: 32.748 - type: map_at_5 value: 34.232 - type: mrr_at_1 value: 31.621 - type: mrr_at_10 value: 40.687 - type: mrr_at_100 value: 41.583 - type: mrr_at_1000 value: 41.638999999999996 - type: mrr_at_3 value: 38.527 - type: mrr_at_5 value: 39.612 - type: ndcg_at_1 value: 31.621 - type: ndcg_at_10 value: 41.003 - type: ndcg_at_100 value: 46.617999999999995 - type: ndcg_at_1000 value: 48.82 - type: ndcg_at_3 value: 36.542 - type: ndcg_at_5 value: 38.368 - type: precision_at_1 value: 31.621 - type: precision_at_10 value: 7.396999999999999 - type: precision_at_100 value: 1.191 - type: precision_at_1000 value: 0.153 - type: precision_at_3 value: 17.39 - type: precision_at_5 value: 12.1 - type: recall_at_1 value: 25.907000000000004 - type: recall_at_10 value: 52.115 - type: recall_at_100 value: 76.238 - type: recall_at_1000 value: 91.218 - type: recall_at_3 value: 39.417 - type: recall_at_5 value: 44.435 - type: map_at_1 value: 25.732166666666668 - type: map_at_10 value: 34.51616666666667 - type: map_at_100 value: 35.67241666666666 - type: map_at_1000 value: 35.78675 - type: map_at_3 value: 31.953416666666662 - type: map_at_5 value: 33.333 - type: mrr_at_1 value: 30.300166666666673 - type: mrr_at_10 value: 38.6255 - type: mrr_at_100 value: 39.46183333333334 - type: mrr_at_1000 value: 39.519999999999996 - type: mrr_at_3 value: 36.41299999999999 - type: mrr_at_5 value: 37.6365 - type: ndcg_at_1 value: 30.300166666666673 - type: ndcg_at_10 value: 39.61466666666667 - type: ndcg_at_100 value: 44.60808333333334 - type: ndcg_at_1000 value: 46.91708333333334 - type: ndcg_at_3 value: 35.26558333333333 - type: ndcg_at_5 value: 37.220000000000006 - type: precision_at_1 value: 30.300166666666673 - type: precision_at_10 value: 6.837416666666667 - type: precision_at_100 value: 1.10425 - type: precision_at_1000 value: 0.14875 - type: precision_at_3 value: 16.13716666666667 - type: precision_at_5 value: 11.2815 - type: recall_at_1 value: 25.732166666666668 - type: recall_at_10 value: 50.578916666666665 - type: recall_at_100 value: 72.42183333333334 - type: recall_at_1000 value: 88.48766666666667 - type: recall_at_3 value: 38.41325 - type: recall_at_5 value: 43.515750000000004 - type: map_at_1 value: 23.951 - type: map_at_10 value: 30.974 - type: map_at_100 value: 31.804 - type: map_at_1000 value: 31.900000000000002 - type: map_at_3 value: 28.762 - type: map_at_5 value: 29.94 - type: mrr_at_1 value: 26.534000000000002 - type: mrr_at_10 value: 33.553 - type: mrr_at_100 value: 34.297 - type: mrr_at_1000 value: 34.36 - type: mrr_at_3 value: 31.391000000000002 - type: mrr_at_5 value: 32.525999999999996 - type: ndcg_at_1 value: 26.534000000000002 - type: ndcg_at_10 value: 35.112 - type: ndcg_at_100 value: 39.28 - type: ndcg_at_1000 value: 41.723 - type: ndcg_at_3 value: 30.902 - type: ndcg_at_5 value: 32.759 - type: precision_at_1 value: 26.534000000000002 - type: precision_at_10 value: 5.445 - type: precision_at_100 value: 0.819 - type: precision_at_1000 value: 0.11 - type: precision_at_3 value: 12.986 - type: precision_at_5 value: 9.049 - type: recall_at_1 value: 23.951 - type: recall_at_10 value: 45.24 - type: recall_at_100 value: 64.12299999999999 - type: recall_at_1000 value: 82.28999999999999 - type: recall_at_3 value: 33.806000000000004 - type: recall_at_5 value: 38.277 - type: map_at_1 value: 16.829 - type: map_at_10 value: 23.684 - type: map_at_100 value: 24.683 - type: map_at_1000 value: 24.81 - type: map_at_3 value: 21.554000000000002 - type: map_at_5 value: 22.768 - type: mrr_at_1 value: 20.096 - type: mrr_at_10 value: 27.230999999999998 - type: mrr_at_100 value: 28.083999999999996 - type: mrr_at_1000 value: 28.166000000000004 - type: mrr_at_3 value: 25.212 - type: mrr_at_5 value: 26.32 - type: ndcg_at_1 value: 20.096 - type: ndcg_at_10 value: 27.989000000000004 - type: ndcg_at_100 value: 32.847 - type: ndcg_at_1000 value: 35.896 - type: ndcg_at_3 value: 24.116 - type: ndcg_at_5 value: 25.964 - type: precision_at_1 value: 20.096 - type: precision_at_10 value: 5 - type: precision_at_100 value: 0.8750000000000001 - type: precision_at_1000 value: 0.131 - type: precision_at_3 value: 11.207 - type: precision_at_5 value: 8.08 - type: recall_at_1 value: 16.829 - type: recall_at_10 value: 37.407000000000004 - type: recall_at_100 value: 59.101000000000006 - type: recall_at_1000 value: 81.024 - type: recall_at_3 value: 26.739 - type: recall_at_5 value: 31.524 - type: map_at_1 value: 24.138 - type: map_at_10 value: 32.275999999999996 - type: map_at_100 value: 33.416000000000004 - type: map_at_1000 value: 33.527 - type: map_at_3 value: 29.854000000000003 - type: map_at_5 value: 31.096 - type: mrr_at_1 value: 28.450999999999997 - type: mrr_at_10 value: 36.214 - type: mrr_at_100 value: 37.134 - type: mrr_at_1000 value: 37.198 - type: mrr_at_3 value: 34.001999999999995 - type: mrr_at_5 value: 35.187000000000005 - type: ndcg_at_1 value: 28.450999999999997 - type: ndcg_at_10 value: 37.166 - type: ndcg_at_100 value: 42.454 - type: ndcg_at_1000 value: 44.976 - type: ndcg_at_3 value: 32.796 - type: ndcg_at_5 value: 34.631 - type: precision_at_1 value: 28.450999999999997 - type: precision_at_10 value: 6.241 - type: precision_at_100 value: 0.9950000000000001 - type: precision_at_1000 value: 0.133 - type: precision_at_3 value: 14.801 - type: precision_at_5 value: 10.280000000000001 - type: recall_at_1 value: 24.138 - type: recall_at_10 value: 48.111 - type: recall_at_100 value: 71.245 - type: recall_at_1000 value: 88.986 - type: recall_at_3 value: 36.119 - type: recall_at_5 value: 40.846 - type: map_at_1 value: 23.244 - type: map_at_10 value: 31.227 - type: map_at_100 value: 33.007 - type: map_at_1000 value: 33.223 - type: map_at_3 value: 28.924 - type: map_at_5 value: 30.017 - type: mrr_at_1 value: 27.668 - type: mrr_at_10 value: 35.524 - type: mrr_at_100 value: 36.699 - type: mrr_at_1000 value: 36.759 - type: mrr_at_3 value: 33.366 - type: mrr_at_5 value: 34.552 - type: ndcg_at_1 value: 27.668 - type: ndcg_at_10 value: 36.381 - type: ndcg_at_100 value: 43.062 - type: ndcg_at_1000 value: 45.656 - type: ndcg_at_3 value: 32.501999999999995 - type: ndcg_at_5 value: 34.105999999999995 - type: precision_at_1 value: 27.668 - type: precision_at_10 value: 6.798 - type: precision_at_100 value: 1.492 - type: precision_at_1000 value: 0.234 - type: precision_at_3 value: 15.152 - type: precision_at_5 value: 10.791 - type: recall_at_1 value: 23.244 - type: recall_at_10 value: 45.979 - type: recall_at_100 value: 74.822 - type: recall_at_1000 value: 91.078 - type: recall_at_3 value: 34.925 - type: recall_at_5 value: 39.126 - type: map_at_1 value: 19.945 - type: map_at_10 value: 27.517999999999997 - type: map_at_100 value: 28.588 - type: map_at_1000 value: 28.682000000000002 - type: map_at_3 value: 25.345000000000002 - type: map_at_5 value: 26.555 - type: mrr_at_1 value: 21.996 - type: mrr_at_10 value: 29.845 - type: mrr_at_100 value: 30.775999999999996 - type: mrr_at_1000 value: 30.845 - type: mrr_at_3 value: 27.726 - type: mrr_at_5 value: 28.882 - type: ndcg_at_1 value: 21.996 - type: ndcg_at_10 value: 32.034 - type: ndcg_at_100 value: 37.185 - type: ndcg_at_1000 value: 39.645 - type: ndcg_at_3 value: 27.750999999999998 - type: ndcg_at_5 value: 29.805999999999997 - type: precision_at_1 value: 21.996 - type: precision_at_10 value: 5.065 - type: precision_at_100 value: 0.819 - type: precision_at_1000 value: 0.11399999999999999 - type: precision_at_3 value: 12.076 - type: precision_at_5 value: 8.392 - type: recall_at_1 value: 19.945 - type: recall_at_10 value: 43.62 - type: recall_at_100 value: 67.194 - type: recall_at_1000 value: 85.7 - type: recall_at_3 value: 32.15 - type: recall_at_5 value: 37.208999999999996 - task: type: Retrieval dataset: name: MTEB ClimateFEVER type: climate-fever config: default split: test revision: None metrics: - type: map_at_1 value: 18.279 - type: map_at_10 value: 31.052999999999997 - type: map_at_100 value: 33.125 - type: map_at_1000 value: 33.306000000000004 - type: map_at_3 value: 26.208 - type: map_at_5 value: 28.857 - type: mrr_at_1 value: 42.671 - type: mrr_at_10 value: 54.557 - type: mrr_at_100 value: 55.142 - type: mrr_at_1000 value: 55.169000000000004 - type: mrr_at_3 value: 51.488 - type: mrr_at_5 value: 53.439 - type: ndcg_at_1 value: 42.671 - type: ndcg_at_10 value: 41.276 - type: ndcg_at_100 value: 48.376000000000005 - type: ndcg_at_1000 value: 51.318 - type: ndcg_at_3 value: 35.068 - type: ndcg_at_5 value: 37.242 - type: precision_at_1 value: 42.671 - type: precision_at_10 value: 12.638 - type: precision_at_100 value: 2.045 - type: precision_at_1000 value: 0.26 - type: precision_at_3 value: 26.08 - type: precision_at_5 value: 19.805 - type: recall_at_1 value: 18.279 - type: recall_at_10 value: 46.946 - type: recall_at_100 value: 70.97200000000001 - type: recall_at_1000 value: 87.107 - type: recall_at_3 value: 31.147999999999996 - type: recall_at_5 value: 38.099 - task: type: Retrieval dataset: name: MTEB DBPedia type: dbpedia-entity config: default split: test revision: None metrics: - type: map_at_1 value: 8.573 - type: map_at_10 value: 19.747 - type: map_at_100 value: 28.205000000000002 - type: map_at_1000 value: 29.831000000000003 - type: map_at_3 value: 14.109 - type: map_at_5 value: 16.448999999999998 - type: mrr_at_1 value: 71 - type: mrr_at_10 value: 77.68599999999999 - type: mrr_at_100 value: 77.995 - type: mrr_at_1000 value: 78.00200000000001 - type: mrr_at_3 value: 76.292 - type: mrr_at_5 value: 77.029 - type: ndcg_at_1 value: 59.12500000000001 - type: ndcg_at_10 value: 43.9 - type: ndcg_at_100 value: 47.863 - type: ndcg_at_1000 value: 54.848 - type: ndcg_at_3 value: 49.803999999999995 - type: ndcg_at_5 value: 46.317 - type: precision_at_1 value: 71 - type: precision_at_10 value: 34.4 - type: precision_at_100 value: 11.063 - type: precision_at_1000 value: 1.989 - type: precision_at_3 value: 52.333 - type: precision_at_5 value: 43.7 - type: recall_at_1 value: 8.573 - type: recall_at_10 value: 25.615 - type: recall_at_100 value: 53.385000000000005 - type: recall_at_1000 value: 75.46000000000001 - type: recall_at_3 value: 15.429 - type: recall_at_5 value: 19.357 - task: type: Classification dataset: name: MTEB EmotionClassification type: mteb/emotion config: default split: test revision: 4f58c6b202a23cf9a4da393831edf4f9183cad37 metrics: - type: accuracy value: 47.989999999999995 - type: f1 value: 42.776314451497555 - task: type: Retrieval dataset: name: MTEB FEVER type: fever config: default split: test revision: None metrics: - type: map_at_1 value: 74.13499999999999 - type: map_at_10 value: 82.825 - type: map_at_100 value: 83.096 - type: map_at_1000 value: 83.111 - type: map_at_3 value: 81.748 - type: map_at_5 value: 82.446 - type: mrr_at_1 value: 79.553 - type: mrr_at_10 value: 86.654 - type: mrr_at_100 value: 86.774 - type: mrr_at_1000 value: 86.778 - type: mrr_at_3 value: 85.981 - type: mrr_at_5 value: 86.462 - type: ndcg_at_1 value: 79.553 - type: ndcg_at_10 value: 86.345 - type: ndcg_at_100 value: 87.32 - type: ndcg_at_1000 value: 87.58200000000001 - type: ndcg_at_3 value: 84.719 - type: ndcg_at_5 value: 85.677 - type: precision_at_1 value: 79.553 - type: precision_at_10 value: 10.402000000000001 - type: precision_at_100 value: 1.1119999999999999 - type: precision_at_1000 value: 0.11499999999999999 - type: precision_at_3 value: 32.413 - type: precision_at_5 value: 20.138 - type: recall_at_1 value: 74.13499999999999 - type: recall_at_10 value: 93.215 - type: recall_at_100 value: 97.083 - type: recall_at_1000 value: 98.732 - type: recall_at_3 value: 88.79 - type: recall_at_5 value: 91.259 - task: type: Retrieval dataset: name: MTEB FiQA2018 type: fiqa config: default split: test revision: None metrics: - type: map_at_1 value: 18.298000000000002 - type: map_at_10 value: 29.901 - type: map_at_100 value: 31.528 - type: map_at_1000 value: 31.713 - type: map_at_3 value: 25.740000000000002 - type: map_at_5 value: 28.227999999999998 - type: mrr_at_1 value: 36.728 - type: mrr_at_10 value: 45.401 - type: mrr_at_100 value: 46.27 - type: mrr_at_1000 value: 46.315 - type: mrr_at_3 value: 42.978 - type: mrr_at_5 value: 44.29 - type: ndcg_at_1 value: 36.728 - type: ndcg_at_10 value: 37.456 - type: ndcg_at_100 value: 43.832 - type: ndcg_at_1000 value: 47 - type: ndcg_at_3 value: 33.694 - type: ndcg_at_5 value: 35.085 - type: precision_at_1 value: 36.728 - type: precision_at_10 value: 10.386 - type: precision_at_100 value: 1.701 - type: precision_at_1000 value: 0.22599999999999998 - type: precision_at_3 value: 22.479 - type: precision_at_5 value: 16.605 - type: recall_at_1 value: 18.298000000000002 - type: recall_at_10 value: 44.369 - type: recall_at_100 value: 68.098 - type: recall_at_1000 value: 87.21900000000001 - type: recall_at_3 value: 30.215999999999998 - type: recall_at_5 value: 36.861 - task: type: Retrieval dataset: name: MTEB HotpotQA type: hotpotqa config: default split: test revision: None metrics: - type: map_at_1 value: 39.568 - type: map_at_10 value: 65.061 - type: map_at_100 value: 65.896 - type: map_at_1000 value: 65.95100000000001 - type: map_at_3 value: 61.831 - type: map_at_5 value: 63.849000000000004 - type: mrr_at_1 value: 79.136 - type: mrr_at_10 value: 84.58200000000001 - type: mrr_at_100 value: 84.765 - type: mrr_at_1000 value: 84.772 - type: mrr_at_3 value: 83.684 - type: mrr_at_5 value: 84.223 - type: ndcg_at_1 value: 79.136 - type: ndcg_at_10 value: 72.622 - type: ndcg_at_100 value: 75.539 - type: ndcg_at_1000 value: 76.613 - type: ndcg_at_3 value: 68.065 - type: ndcg_at_5 value: 70.58 - type: precision_at_1 value: 79.136 - type: precision_at_10 value: 15.215 - type: precision_at_100 value: 1.7500000000000002 - type: precision_at_1000 value: 0.189 - type: precision_at_3 value: 44.011 - type: precision_at_5 value: 28.388999999999996 - type: recall_at_1 value: 39.568 - type: recall_at_10 value: 76.077 - type: recall_at_100 value: 87.481 - type: recall_at_1000 value: 94.56400000000001 - type: recall_at_3 value: 66.01599999999999 - type: recall_at_5 value: 70.97200000000001 - task: type: Classification dataset: name: MTEB ImdbClassification type: mteb/imdb config: default split: test revision: 3d86128a09e091d6018b6d26cad27f2739fc2db7 metrics: - type: accuracy value: 85.312 - type: ap value: 80.36296867333715 - type: f1 value: 85.26613311552218 - task: type: Retrieval dataset: name: MTEB MSMARCO type: msmarco config: default split: dev revision: None metrics: - type: map_at_1 value: 23.363999999999997 - type: map_at_10 value: 35.711999999999996 - type: map_at_100 value: 36.876999999999995 - type: map_at_1000 value: 36.923 - type: map_at_3 value: 32.034 - type: map_at_5 value: 34.159 - type: mrr_at_1 value: 24.04 - type: mrr_at_10 value: 36.345 - type: mrr_at_100 value: 37.441 - type: mrr_at_1000 value: 37.480000000000004 - type: mrr_at_3 value: 32.713 - type: mrr_at_5 value: 34.824 - type: ndcg_at_1 value: 24.026 - type: ndcg_at_10 value: 42.531 - type: ndcg_at_100 value: 48.081 - type: ndcg_at_1000 value: 49.213 - type: ndcg_at_3 value: 35.044 - type: ndcg_at_5 value: 38.834 - type: precision_at_1 value: 24.026 - type: precision_at_10 value: 6.622999999999999 - type: precision_at_100 value: 0.941 - type: precision_at_1000 value: 0.104 - type: precision_at_3 value: 14.909 - type: precision_at_5 value: 10.871 - type: recall_at_1 value: 23.363999999999997 - type: recall_at_10 value: 63.426 - type: recall_at_100 value: 88.96300000000001 - type: recall_at_1000 value: 97.637 - type: recall_at_3 value: 43.095 - type: recall_at_5 value: 52.178000000000004 - task: type: Classification dataset: name: MTEB MTOPDomainClassification (en) type: mteb/mtop_domain config: en split: test revision: d80d48c1eb48d3562165c59d59d0034df9fff0bf metrics: - type: accuracy value: 93.0095759233926 - type: f1 value: 92.78387794667408 - task: type: Classification dataset: name: MTEB MTOPIntentClassification (en) type: mteb/mtop_intent config: en split: test revision: ae001d0e6b1228650b7bd1c2c65fb50ad11a8aba metrics: - type: accuracy value: 75.0296397628819 - type: f1 value: 58.45699589820874 - task: type: Classification dataset: name: MTEB MassiveIntentClassification (en) type: mteb/amazon_massive_intent config: en split: test revision: 31efe3c427b0bae9c22cbb560b8f15491cc6bed7 metrics: - type: accuracy value: 73.45662407531944 - type: f1 value: 71.42364781421813 - task: type: Classification dataset: name: MTEB MassiveScenarioClassification (en) type: mteb/amazon_massive_scenario config: en split: test revision: 7d571f92784cd94a019292a1f45445077d0ef634 metrics: - type: accuracy value: 77.07800941492937 - type: f1 value: 77.22799045640845 - task: type: Clustering dataset: name: MTEB MedrxivClusteringP2P type: mteb/medrxiv-clustering-p2p config: default split: test revision: e7a26af6f3ae46b30dde8737f02c07b1505bcc73 metrics: - type: v_measure value: 34.531234379250606 - task: type: Clustering dataset: name: MTEB MedrxivClusteringS2S type: mteb/medrxiv-clustering-s2s config: default split: test revision: 35191c8c0dca72d8ff3efcd72aa802307d469663 metrics: - type: v_measure value: 30.941490381193802 - task: type: Reranking dataset: name: MTEB MindSmallReranking type: mteb/mind_small config: default split: test revision: 3bdac13927fdc888b903db93b2ffdbd90b295a69 metrics: - type: map value: 30.3115090856725 - type: mrr value: 31.290667638675757 - task: type: Retrieval dataset: name: MTEB NFCorpus type: nfcorpus config: default split: test revision: None metrics: - type: map_at_1 value: 5.465 - type: map_at_10 value: 13.03 - type: map_at_100 value: 16.057 - type: map_at_1000 value: 17.49 - type: map_at_3 value: 9.553 - type: map_at_5 value: 11.204 - type: mrr_at_1 value: 43.653 - type: mrr_at_10 value: 53.269 - type: mrr_at_100 value: 53.72 - type: mrr_at_1000 value: 53.761 - type: mrr_at_3 value: 50.929 - type: mrr_at_5 value: 52.461 - type: ndcg_at_1 value: 42.26 - type: ndcg_at_10 value: 34.673 - type: ndcg_at_100 value: 30.759999999999998 - type: ndcg_at_1000 value: 39.728 - type: ndcg_at_3 value: 40.349000000000004 - type: ndcg_at_5 value: 37.915 - type: precision_at_1 value: 43.653 - type: precision_at_10 value: 25.789 - type: precision_at_100 value: 7.754999999999999 - type: precision_at_1000 value: 2.07 - type: precision_at_3 value: 38.596000000000004 - type: precision_at_5 value: 33.251 - type: recall_at_1 value: 5.465 - type: recall_at_10 value: 17.148 - type: recall_at_100 value: 29.768 - type: recall_at_1000 value: 62.239 - type: recall_at_3 value: 10.577 - type: recall_at_5 value: 13.315 - task: type: Retrieval dataset: name: MTEB NQ type: nq config: default split: test revision: None metrics: - type: map_at_1 value: 37.008 - type: map_at_10 value: 52.467 - type: map_at_100 value: 53.342999999999996 - type: map_at_1000 value: 53.366 - type: map_at_3 value: 48.412 - type: map_at_5 value: 50.875 - type: mrr_at_1 value: 41.541 - type: mrr_at_10 value: 54.967 - type: mrr_at_100 value: 55.611 - type: mrr_at_1000 value: 55.627 - type: mrr_at_3 value: 51.824999999999996 - type: mrr_at_5 value: 53.763000000000005 - type: ndcg_at_1 value: 41.541 - type: ndcg_at_10 value: 59.724999999999994 - type: ndcg_at_100 value: 63.38700000000001 - type: ndcg_at_1000 value: 63.883 - type: ndcg_at_3 value: 52.331 - type: ndcg_at_5 value: 56.327000000000005 - type: precision_at_1 value: 41.541 - type: precision_at_10 value: 9.447 - type: precision_at_100 value: 1.1520000000000001 - type: precision_at_1000 value: 0.12 - type: precision_at_3 value: 23.262 - type: precision_at_5 value: 16.314999999999998 - type: recall_at_1 value: 37.008 - type: recall_at_10 value: 79.145 - type: recall_at_100 value: 94.986 - type: recall_at_1000 value: 98.607 - type: recall_at_3 value: 60.277 - type: recall_at_5 value: 69.407 - task: type: Retrieval dataset: name: MTEB QuoraRetrieval type: quora config: default split: test revision: None metrics: - type: map_at_1 value: 70.402 - type: map_at_10 value: 84.181 - type: map_at_100 value: 84.796 - type: map_at_1000 value: 84.81400000000001 - type: map_at_3 value: 81.209 - type: map_at_5 value: 83.085 - type: mrr_at_1 value: 81.02000000000001 - type: mrr_at_10 value: 87.263 - type: mrr_at_100 value: 87.36 - type: mrr_at_1000 value: 87.36 - type: mrr_at_3 value: 86.235 - type: mrr_at_5 value: 86.945 - type: ndcg_at_1 value: 81.01 - type: ndcg_at_10 value: 87.99900000000001 - type: ndcg_at_100 value: 89.217 - type: ndcg_at_1000 value: 89.33 - type: ndcg_at_3 value: 85.053 - type: ndcg_at_5 value: 86.703 - type: precision_at_1 value: 81.01 - type: precision_at_10 value: 13.336 - type: precision_at_100 value: 1.52 - type: precision_at_1000 value: 0.156 - type: precision_at_3 value: 37.14 - type: precision_at_5 value: 24.44 - type: recall_at_1 value: 70.402 - type: recall_at_10 value: 95.214 - type: recall_at_100 value: 99.438 - type: recall_at_1000 value: 99.928 - type: recall_at_3 value: 86.75699999999999 - type: recall_at_5 value: 91.44099999999999 - task: type: Clustering dataset: name: MTEB RedditClustering type: mteb/reddit-clustering config: default split: test revision: 24640382cdbf8abc73003fb0fa6d111a705499eb metrics: - type: v_measure value: 56.51721502758904 - task: type: Clustering dataset: name: MTEB RedditClusteringP2P type: mteb/reddit-clustering-p2p config: default split: test revision: 282350215ef01743dc01b456c7f5241fa8937f16 metrics: - type: v_measure value: 61.054808572333016 - task: type: Retrieval dataset: name: MTEB SCIDOCS type: scidocs config: default split: test revision: None metrics: - type: map_at_1 value: 4.578 - type: map_at_10 value: 11.036999999999999 - type: map_at_100 value: 12.879999999999999 - type: map_at_1000 value: 13.150999999999998 - type: map_at_3 value: 8.133 - type: map_at_5 value: 9.559 - type: mrr_at_1 value: 22.6 - type: mrr_at_10 value: 32.68 - type: mrr_at_100 value: 33.789 - type: mrr_at_1000 value: 33.854 - type: mrr_at_3 value: 29.7 - type: mrr_at_5 value: 31.480000000000004 - type: ndcg_at_1 value: 22.6 - type: ndcg_at_10 value: 18.616 - type: ndcg_at_100 value: 25.883 - type: ndcg_at_1000 value: 30.944 - type: ndcg_at_3 value: 18.136 - type: ndcg_at_5 value: 15.625 - type: precision_at_1 value: 22.6 - type: precision_at_10 value: 9.48 - type: precision_at_100 value: 1.991 - type: precision_at_1000 value: 0.321 - type: precision_at_3 value: 16.8 - type: precision_at_5 value: 13.54 - type: recall_at_1 value: 4.578 - type: recall_at_10 value: 19.213 - type: recall_at_100 value: 40.397 - type: recall_at_1000 value: 65.2 - type: recall_at_3 value: 10.208 - type: recall_at_5 value: 13.718 - task: type: STS dataset: name: MTEB SICK-R type: mteb/sickr-sts config: default split: test revision: a6ea5a8cab320b040a23452cc28066d9beae2cee metrics: - type: cos_sim_pearson value: 83.44288351714071 - type: cos_sim_spearman value: 79.37995604564952 - type: euclidean_pearson value: 81.1078874670718 - type: euclidean_spearman value: 79.37995905980499 - type: manhattan_pearson value: 81.03697527288986 - type: manhattan_spearman value: 79.33490235296236 - task: type: STS dataset: name: MTEB STS12 type: mteb/sts12-sts config: default split: test revision: a0d554a64d88156834ff5ae9920b964011b16384 metrics: - type: cos_sim_pearson value: 84.95557650436523 - type: cos_sim_spearman value: 78.5190672399868 - type: euclidean_pearson value: 81.58064025904707 - type: euclidean_spearman value: 78.5190672399868 - type: manhattan_pearson value: 81.52857930619889 - type: manhattan_spearman value: 78.50421361308034 - task: type: STS dataset: name: MTEB STS13 type: mteb/sts13-sts config: default split: test revision: 7e90230a92c190f1bf69ae9002b8cea547a64cca metrics: - type: cos_sim_pearson value: 84.79128416228737 - type: cos_sim_spearman value: 86.05402451477147 - type: euclidean_pearson value: 85.46280267054289 - type: euclidean_spearman value: 86.05402451477147 - type: manhattan_pearson value: 85.46278563858236 - type: manhattan_spearman value: 86.08079590861004 - task: type: STS dataset: name: MTEB STS14 type: mteb/sts14-sts config: default split: test revision: 6031580fec1f6af667f0bd2da0a551cf4f0b2375 metrics: - type: cos_sim_pearson value: 83.20623089568763 - type: cos_sim_spearman value: 81.53786907061009 - type: euclidean_pearson value: 82.82272250091494 - type: euclidean_spearman value: 81.53786907061009 - type: manhattan_pearson value: 82.78850494027013 - type: manhattan_spearman value: 81.5135618083407 - task: type: STS dataset: name: MTEB STS15 type: mteb/sts15-sts config: default split: test revision: ae752c7c21bf194d8b67fd573edf7ae58183cbe3 metrics: - type: cos_sim_pearson value: 85.46366618397936 - type: cos_sim_spearman value: 86.96566013336908 - type: euclidean_pearson value: 86.62651697548931 - type: euclidean_spearman value: 86.96565526364454 - type: manhattan_pearson value: 86.58812160258009 - type: manhattan_spearman value: 86.9336484321288 - task: type: STS dataset: name: MTEB STS16 type: mteb/sts16-sts config: default split: test revision: 4d8694f8f0e0100860b497b999b3dbed754a0513 metrics: - type: cos_sim_pearson value: 82.51858358641559 - type: cos_sim_spearman value: 84.7652527954999 - type: euclidean_pearson value: 84.23914783766861 - type: euclidean_spearman value: 84.7652527954999 - type: manhattan_pearson value: 84.22749648503171 - type: manhattan_spearman value: 84.74527996746386 - task: type: STS dataset: name: MTEB STS17 (en-en) type: mteb/sts17-crosslingual-sts config: en-en split: test revision: af5e6fb845001ecf41f4c1e033ce921939a2a68d metrics: - type: cos_sim_pearson value: 87.28026563313065 - type: cos_sim_spearman value: 87.46928143824915 - type: euclidean_pearson value: 88.30558762000372 - type: euclidean_spearman value: 87.46928143824915 - type: manhattan_pearson value: 88.10513330809331 - type: manhattan_spearman value: 87.21069787834173 - task: type: STS dataset: name: MTEB STS22 (en) type: mteb/sts22-crosslingual-sts config: en split: test revision: 6d1ba47164174a496b7fa5d3569dae26a6813b80 metrics: - type: cos_sim_pearson value: 62.376497134587375 - type: cos_sim_spearman value: 65.0159550112516 - type: euclidean_pearson value: 65.64572120879598 - type: euclidean_spearman value: 65.0159550112516 - type: manhattan_pearson value: 65.88143604989976 - type: manhattan_spearman value: 65.17547297222434 - task: type: STS dataset: name: MTEB STSBenchmark type: mteb/stsbenchmark-sts config: default split: test revision: b0fddb56ed78048fa8b90373c8a3cfc37b684831 metrics: - type: cos_sim_pearson value: 84.22876368947644 - type: cos_sim_spearman value: 85.46935577445318 - type: euclidean_pearson value: 85.32830231392005 - type: euclidean_spearman value: 85.46935577445318 - type: manhattan_pearson value: 85.30353211758495 - type: manhattan_spearman value: 85.42821085956945 - task: type: Reranking dataset: name: MTEB SciDocsRR type: mteb/scidocs-reranking config: default split: test revision: d3c5e1fc0b855ab6097bf1cda04dd73947d7caab metrics: - type: map value: 80.60986667767133 - type: mrr value: 94.29432314236236 - task: type: Retrieval dataset: name: MTEB SciFact type: scifact config: default split: test revision: None metrics: - type: map_at_1 value: 54.528 - type: map_at_10 value: 65.187 - type: map_at_100 value: 65.62599999999999 - type: map_at_1000 value: 65.657 - type: map_at_3 value: 62.352 - type: map_at_5 value: 64.025 - type: mrr_at_1 value: 57.333 - type: mrr_at_10 value: 66.577 - type: mrr_at_100 value: 66.88 - type: mrr_at_1000 value: 66.908 - type: mrr_at_3 value: 64.556 - type: mrr_at_5 value: 65.739 - type: ndcg_at_1 value: 57.333 - type: ndcg_at_10 value: 70.275 - type: ndcg_at_100 value: 72.136 - type: ndcg_at_1000 value: 72.963 - type: ndcg_at_3 value: 65.414 - type: ndcg_at_5 value: 67.831 - type: precision_at_1 value: 57.333 - type: precision_at_10 value: 9.5 - type: precision_at_100 value: 1.057 - type: precision_at_1000 value: 0.11199999999999999 - type: precision_at_3 value: 25.778000000000002 - type: precision_at_5 value: 17.2 - type: recall_at_1 value: 54.528 - type: recall_at_10 value: 84.356 - type: recall_at_100 value: 92.833 - type: recall_at_1000 value: 99.333 - type: recall_at_3 value: 71.283 - type: recall_at_5 value: 77.14999999999999 - task: type: PairClassification dataset: name: MTEB SprintDuplicateQuestions type: mteb/sprintduplicatequestions-pairclassification config: default split: test revision: d66bd1f72af766a5cc4b0ca5e00c162f89e8cc46 metrics: - type: cos_sim_accuracy value: 99.74158415841585 - type: cos_sim_ap value: 92.90048959850317 - type: cos_sim_f1 value: 86.35650810245687 - type: cos_sim_precision value: 90.4709748083242 - type: cos_sim_recall value: 82.6 - type: dot_accuracy value: 99.74158415841585 - type: dot_ap value: 92.90048959850317 - type: dot_f1 value: 86.35650810245687 - type: dot_precision value: 90.4709748083242 - type: dot_recall value: 82.6 - type: euclidean_accuracy value: 99.74158415841585 - type: euclidean_ap value: 92.90048959850317 - type: euclidean_f1 value: 86.35650810245687 - type: euclidean_precision value: 90.4709748083242 - type: euclidean_recall value: 82.6 - type: manhattan_accuracy value: 99.74158415841585 - type: manhattan_ap value: 92.87344692947894 - type: manhattan_f1 value: 86.38497652582159 - type: manhattan_precision value: 90.29443838604145 - type: manhattan_recall value: 82.8 - type: max_accuracy value: 99.74158415841585 - type: max_ap value: 92.90048959850317 - type: max_f1 value: 86.38497652582159 - task: type: Clustering dataset: name: MTEB StackExchangeClustering type: mteb/stackexchange-clustering config: default split: test revision: 6cbc1f7b2bc0622f2e39d2c77fa502909748c259 metrics: - type: v_measure value: 63.191648770424216 - task: type: Clustering dataset: name: MTEB StackExchangeClusteringP2P type: mteb/stackexchange-clustering-p2p config: default split: test revision: 815ca46b2622cec33ccafc3735d572c266efdb44 metrics: - type: v_measure value: 34.02944668730218 - task: type: Reranking dataset: name: MTEB StackOverflowDupQuestions type: mteb/stackoverflowdupquestions-reranking config: default split: test revision: e185fbe320c72810689fc5848eb6114e1ef5ec69 metrics: - type: map value: 50.466386167525265 - type: mrr value: 51.19071492233257 - task: type: Summarization dataset: name: MTEB SummEval type: mteb/summeval config: default split: test revision: cda12ad7615edc362dbf25a00fdd61d3b1eaf93c metrics: - type: cos_sim_pearson value: 30.198022505886435 - type: cos_sim_spearman value: 30.40170257939193 - type: dot_pearson value: 30.198015316402614 - type: dot_spearman value: 30.40170257939193 - task: type: Retrieval dataset: name: MTEB TRECCOVID type: trec-covid config: default split: test revision: None metrics: - type: map_at_1 value: 0.242 - type: map_at_10 value: 2.17 - type: map_at_100 value: 12.221 - type: map_at_1000 value: 28.63 - type: map_at_3 value: 0.728 - type: map_at_5 value: 1.185 - type: mrr_at_1 value: 94 - type: mrr_at_10 value: 97 - type: mrr_at_100 value: 97 - type: mrr_at_1000 value: 97 - type: mrr_at_3 value: 97 - type: mrr_at_5 value: 97 - type: ndcg_at_1 value: 89 - type: ndcg_at_10 value: 82.30499999999999 - type: ndcg_at_100 value: 61.839999999999996 - type: ndcg_at_1000 value: 53.381 - type: ndcg_at_3 value: 88.877 - type: ndcg_at_5 value: 86.05199999999999 - type: precision_at_1 value: 94 - type: precision_at_10 value: 87 - type: precision_at_100 value: 63.38 - type: precision_at_1000 value: 23.498 - type: precision_at_3 value: 94 - type: precision_at_5 value: 92 - type: recall_at_1 value: 0.242 - type: recall_at_10 value: 2.302 - type: recall_at_100 value: 14.979000000000001 - type: recall_at_1000 value: 49.638 - type: recall_at_3 value: 0.753 - type: recall_at_5 value: 1.226 - task: type: Retrieval dataset: name: MTEB Touche2020 type: webis-touche2020 config: default split: test revision: None metrics: - type: map_at_1 value: 3.006 - type: map_at_10 value: 11.805 - type: map_at_100 value: 18.146 - type: map_at_1000 value: 19.788 - type: map_at_3 value: 5.914 - type: map_at_5 value: 8.801 - type: mrr_at_1 value: 40.816 - type: mrr_at_10 value: 56.36600000000001 - type: mrr_at_100 value: 56.721999999999994 - type: mrr_at_1000 value: 56.721999999999994 - type: mrr_at_3 value: 52.041000000000004 - type: mrr_at_5 value: 54.796 - type: ndcg_at_1 value: 37.755 - type: ndcg_at_10 value: 29.863 - type: ndcg_at_100 value: 39.571 - type: ndcg_at_1000 value: 51.385999999999996 - type: ndcg_at_3 value: 32.578 - type: ndcg_at_5 value: 32.351 - type: precision_at_1 value: 40.816 - type: precision_at_10 value: 26.531 - type: precision_at_100 value: 7.796 - type: precision_at_1000 value: 1.555 - type: precision_at_3 value: 32.653 - type: precision_at_5 value: 33.061 - type: recall_at_1 value: 3.006 - type: recall_at_10 value: 18.738 - type: recall_at_100 value: 48.058 - type: recall_at_1000 value: 83.41300000000001 - type: recall_at_3 value: 7.166 - type: recall_at_5 value: 12.102 - task: type: Classification dataset: name: MTEB ToxicConversationsClassification type: mteb/toxic_conversations_50k config: default split: test revision: d7c0de2777da35d6aae2200a62c6e0e5af397c4c metrics: - type: accuracy value: 71.4178 - type: ap value: 14.648781342150446 - type: f1 value: 55.07299194946378 - task: type: Classification dataset: name: MTEB TweetSentimentExtractionClassification type: mteb/tweet_sentiment_extraction config: default split: test revision: d604517c81ca91fe16a244d1248fc021f9ecee7a metrics: - type: accuracy value: 60.919637804187886 - type: f1 value: 61.24122013967399 - task: type: Clustering dataset: name: MTEB TwentyNewsgroupsClustering type: mteb/twentynewsgroups-clustering config: default split: test revision: 6125ec4e24fa026cec8a478383ee943acfbd5449 metrics: - type: v_measure value: 49.207896583685695 - task: type: PairClassification dataset: name: MTEB TwitterSemEval2015 type: mteb/twittersemeval2015-pairclassification config: default split: test revision: 70970daeab8776df92f5ea462b6173c0b46fd2d1 metrics: - type: cos_sim_accuracy value: 86.23114978840078 - type: cos_sim_ap value: 74.26624727825818 - type: cos_sim_f1 value: 68.72377190817083 - type: cos_sim_precision value: 64.56400742115028 - type: cos_sim_recall value: 73.45646437994723 - type: dot_accuracy value: 86.23114978840078 - type: dot_ap value: 74.26624032659652 - type: dot_f1 value: 68.72377190817083 - type: dot_precision value: 64.56400742115028 - type: dot_recall value: 73.45646437994723 - type: euclidean_accuracy value: 86.23114978840078 - type: euclidean_ap value: 74.26624714480556 - type: euclidean_f1 value: 68.72377190817083 - type: euclidean_precision value: 64.56400742115028 - type: euclidean_recall value: 73.45646437994723 - type: manhattan_accuracy value: 86.16558383501221 - type: manhattan_ap value: 74.2091943976357 - type: manhattan_f1 value: 68.64221520524654 - type: manhattan_precision value: 63.59135913591359 - type: manhattan_recall value: 74.5646437994723 - type: max_accuracy value: 86.23114978840078 - type: max_ap value: 74.26624727825818 - type: max_f1 value: 68.72377190817083 - task: type: PairClassification dataset: name: MTEB TwitterURLCorpus type: mteb/twitterurlcorpus-pairclassification config: default split: test revision: 8b6510b0b1fa4e4c4f879467980e9be563ec1cdf metrics: - type: cos_sim_accuracy value: 89.3681841114604 - type: cos_sim_ap value: 86.65166387498546 - type: cos_sim_f1 value: 79.02581944698774 - type: cos_sim_precision value: 75.35796605434099 - type: cos_sim_recall value: 83.06898675700647 - type: dot_accuracy value: 89.3681841114604 - type: dot_ap value: 86.65166019802056 - type: dot_f1 value: 79.02581944698774 - type: dot_precision value: 75.35796605434099 - type: dot_recall value: 83.06898675700647 - type: euclidean_accuracy value: 89.3681841114604 - type: euclidean_ap value: 86.65166462876266 - type: euclidean_f1 value: 79.02581944698774 - type: euclidean_precision value: 75.35796605434099 - type: euclidean_recall value: 83.06898675700647 - type: manhattan_accuracy value: 89.36624364497226 - type: manhattan_ap value: 86.65076471274106 - type: manhattan_f1 value: 79.07408783532733 - type: manhattan_precision value: 76.41102972856527 - type: manhattan_recall value: 81.92947336002464 - type: max_accuracy value: 89.3681841114604 - type: max_ap value: 86.65166462876266 - type: max_f1 value: 79.07408783532733 --- # YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF This model was converted to GGUF format from [`nomic-ai/nomic-embed-text-v1.5`](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) using llama.cpp via the ggml.ai's [GGUF-my-repo](https://huggingface.co/spaces/ggml-org/gguf-my-repo) space. Refer to the [original model card](https://huggingface.co/nomic-ai/nomic-embed-text-v1.5) for more details on the model. ## Use with llama.cpp Install llama.cpp through brew (works on Mac and Linux) ```bash brew install llama.cpp ``` Invoke the llama.cpp server or the CLI. ### CLI: ```bash llama-cli --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -p "The meaning to life and the universe is" ``` ### Server: ```bash llama-server --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -c 2048 ``` Note: You can also use this checkpoint directly through the [usage steps](https://github.com/ggerganov/llama.cpp?tab=readme-ov-file#usage) listed in the Llama.cpp repo as well. Step 1: Clone llama.cpp from GitHub. ``` git clone https://github.com/ggerganov/llama.cpp ``` Step 2: Move into the llama.cpp folder and build it with `LLAMA_CURL=1` flag along with other hardware-specific flags (for ex: LLAMA_CUDA=1 for Nvidia GPUs on Linux). ``` cd llama.cpp && LLAMA_CURL=1 make ``` Step 3: Run inference through the main binary. ``` ./llama-cli --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -p "The meaning to life and the universe is" ``` or ``` ./llama-server --hf-repo YorkieOH10/nomic-embed-text-v1.5-Q8_0-GGUF --hf-file nomic-embed-text-v1.5-q8_0.gguf -c 2048 ```
[ "BIOSSES", "SCIFACT" ]
sudhanshu746/bge-reranker-v2-m3-onnx-o4
sudhanshu746
null
[ "onnx", "xlm-roberta", "arxiv:2312.15503", "arxiv:2402.03216", "region:us" ]
2024-08-19T15:18:30Z
2024-08-19T15:53:32+00:00
22
0
--- {} --- This is ONNX version of bge-reranker-v2-m3 model created by Sudhanshu Sharma --- license: apache-2.0 language: - multilingual pipeline_tag: text-classification tags: - transformers - sentence-transformers - text-embeddings-inference --- # Reranker **More details please refer to our Github: [FlagEmbedding](https://github.com/FlagOpen/FlagEmbedding/tree/master).** - [Model List](#model-list) - [Usage](#usage) - [Fine-tuning](#fine-tune) - [Evaluation](#evaluation) - [Citation](#citation) Different from embedding model, reranker uses question and document as input and directly output similarity instead of embedding. You can get a relevance score by inputting query and passage to the reranker. And the score can be mapped to a float value in [0,1] by sigmoid function. ## Model List | Model | Base model | Language | layerwise | feature | |:--------------------------------------------------------------------------|:--------:|:-----------------------------------------------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------:|:----------------------------------------------------------------------------------------------:| | [BAAI/bge-reranker-base](https://huggingface.co/BAAI/bge-reranker-base) | [xlm-roberta-base](https://huggingface.co/xlm-roberta-base) | Chinese and English | - | Lightweight reranker model, easy to deploy, with fast inference. | | [BAAI/bge-reranker-large](https://huggingface.co/BAAI/bge-reranker-large) | [xlm-roberta-large](https://huggingface.co/FacebookAI/xlm-roberta-large) | Chinese and English | - | Lightweight reranker model, easy to deploy, with fast inference. | | [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) | [bge-m3](https://huggingface.co/BAAI/bge-m3) | Multilingual | - | Lightweight reranker model, possesses strong multilingual capabilities, easy to deploy, with fast inference. | | [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma) | [gemma-2b](https://huggingface.co/google/gemma-2b) | Multilingual | - | Suitable for multilingual contexts, performs well in both English proficiency and multilingual capabilities. | | [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise) | [MiniCPM-2B-dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16) | Multilingual | 8-40 | Suitable for multilingual contexts, performs well in both English and Chinese proficiency, allows freedom to select layers for output, facilitating accelerated inference. | You can select the model according your senario and resource. - For **multilingual**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma) - For **Chinese or English**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise). - For **efficiency**, utilize [BAAI/bge-reranker-v2-m3](https://huggingface.co/BAAI/bge-reranker-v2-m3) and the low layer of [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise). - For better performance, recommand [BAAI/bge-reranker-v2-minicpm-layerwise](https://huggingface.co/BAAI/bge-reranker-v2-minicpm-layerwise) and [BAAI/bge-reranker-v2-gemma](https://huggingface.co/BAAI/bge-reranker-v2-gemma) ## Usage ### Using FlagEmbedding ``` pip install -U FlagEmbedding ``` #### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 ) Get relevance scores (higher scores indicate more relevance): ```python from FlagEmbedding import FlagReranker reranker = FlagReranker('BAAI/bge-reranker-v2-m3', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) # -5.65234375 # You can map the scores into 0-1 by set "normalize=True", which will apply sigmoid function to the score score = reranker.compute_score(['query', 'passage'], normalize=True) print(score) # 0.003497010252573502 scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) # [-8.1875, 5.26171875] # You can map the scores into 0-1 by set "normalize=True", which will apply sigmoid function to the score scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], normalize=True) print(scores) # [0.00027803096387751553, 0.9948403768236574] ``` #### For LLM-based reranker ```python from FlagEmbedding import FlagLLMReranker reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation # reranker = FlagLLMReranker('BAAI/bge-reranker-v2-gemma', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage']) print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']]) print(scores) ``` #### For LLM-based layerwise reranker ```python from FlagEmbedding import LayerWiseFlagLLMReranker reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_fp16=True) # Setting use_fp16 to True speeds up computation with a slight performance degradation # reranker = LayerWiseFlagLLMReranker('BAAI/bge-reranker-v2-minicpm-layerwise', use_bf16=True) # You can also set use_bf16=True to speed up computation with a slight performance degradation score = reranker.compute_score(['query', 'passage'], cutoff_layers=[28]) # Adjusting 'cutoff_layers' to pick which layers are used for computing the score. print(score) scores = reranker.compute_score([['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']], cutoff_layers=[28]) print(scores) ``` ### Using Huggingface transformers #### For normal reranker (bge-reranker-base / bge-reranker-large / bge-reranker-v2-m3 ) Get relevance scores (higher scores indicate more relevance): ```python import torch from transformers import AutoModelForSequenceClassification, AutoTokenizer tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-m3') model = AutoModelForSequenceClassification.from_pretrained('BAAI/bge-reranker-v2-m3') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = tokenizer(pairs, padding=True, truncation=True, return_tensors='pt', max_length=512) scores = model(**inputs, return_dict=True).logits.view(-1, ).float() print(scores) ``` #### For LLM-based reranker ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer def get_inputs(pairs, tokenizer, prompt=None, max_length=1024): if prompt is None: prompt = "Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'." sep = "\n" prompt_inputs = tokenizer(prompt, return_tensors=None, add_special_tokens=False)['input_ids'] sep_inputs = tokenizer(sep, return_tensors=None, add_special_tokens=False)['input_ids'] inputs = [] for query, passage in pairs: query_inputs = tokenizer(f'A: {query}', return_tensors=None, add_special_tokens=False, max_length=max_length * 3 // 4, truncation=True) passage_inputs = tokenizer(f'B: {passage}', return_tensors=None, add_special_tokens=False, max_length=max_length, truncation=True) item = tokenizer.prepare_for_model( [tokenizer.bos_token_id] + query_inputs['input_ids'], sep_inputs + passage_inputs['input_ids'], truncation='only_second', max_length=max_length, padding=False, return_attention_mask=False, return_token_type_ids=False, add_special_tokens=False ) item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs item['attention_mask'] = [1] * len(item['input_ids']) inputs.append(item) return tokenizer.pad( inputs, padding=True, max_length=max_length + len(sep_inputs) + len(prompt_inputs), pad_to_multiple_of=8, return_tensors='pt', ) tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-gemma') model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-gemma') yes_loc = tokenizer('Yes', add_special_tokens=False)['input_ids'][0] model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = get_inputs(pairs, tokenizer) scores = model(**inputs, return_dict=True).logits[:, -1, yes_loc].view(-1, ).float() print(scores) ``` #### For LLM-based layerwise reranker ```python import torch from transformers import AutoModelForCausalLM, AutoTokenizer def get_inputs(pairs, tokenizer, prompt=None, max_length=1024): if prompt is None: prompt = "Given a query A and a passage B, determine whether the passage contains an answer to the query by providing a prediction of either 'Yes' or 'No'." sep = "\n" prompt_inputs = tokenizer(prompt, return_tensors=None, add_special_tokens=False)['input_ids'] sep_inputs = tokenizer(sep, return_tensors=None, add_special_tokens=False)['input_ids'] inputs = [] for query, passage in pairs: query_inputs = tokenizer(f'A: {query}', return_tensors=None, add_special_tokens=False, max_length=max_length * 3 // 4, truncation=True) passage_inputs = tokenizer(f'B: {passage}', return_tensors=None, add_special_tokens=False, max_length=max_length, truncation=True) item = tokenizer.prepare_for_model( [tokenizer.bos_token_id] + query_inputs['input_ids'], sep_inputs + passage_inputs['input_ids'], truncation='only_second', max_length=max_length, padding=False, return_attention_mask=False, return_token_type_ids=False, add_special_tokens=False ) item['input_ids'] = item['input_ids'] + sep_inputs + prompt_inputs item['attention_mask'] = [1] * len(item['input_ids']) inputs.append(item) return tokenizer.pad( inputs, padding=True, max_length=max_length + len(sep_inputs) + len(prompt_inputs), pad_to_multiple_of=8, return_tensors='pt', ) tokenizer = AutoTokenizer.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained('BAAI/bge-reranker-v2-minicpm-layerwise', trust_remote_code=True, torch_dtype=torch.bfloat16) model = model.to('cuda') model.eval() pairs = [['what is panda?', 'hi'], ['what is panda?', 'The giant panda (Ailuropoda melanoleuca), sometimes called a panda bear or simply panda, is a bear species endemic to China.']] with torch.no_grad(): inputs = get_inputs(pairs, tokenizer).to(model.device) all_scores = model(**inputs, return_dict=True, cutoff_layers=[28]) all_scores = [scores[:, -1].view(-1, ).float() for scores in all_scores[0]] print(all_scores) ``` ## Fine-tune ### Data Format Train data should be a json file, where each line is a dict like this: ``` {"query": str, "pos": List[str], "neg":List[str], "prompt": str} ``` `query` is the query, and `pos` is a list of positive texts, `neg` is a list of negative texts, `prompt` indicates the relationship between query and texts. If you have no negative texts for a query, you can random sample some from the entire corpus as the negatives. See [toy_finetune_data.jsonl](https://github.com/FlagOpen/FlagEmbedding/tree/master/FlagEmbedding/llm_reranker/toy_finetune_data.jsonl) for a toy data file. ### Train You can fine-tune the reranker with the following code: **For llm-based reranker** ```shell torchrun --nproc_per_node {number of gpus} \ -m FlagEmbedding.llm_reranker.finetune_for_instruction.run \ --output_dir {path to save model} \ --model_name_or_path google/gemma-2b \ --train_data ./toy_finetune_data.jsonl \ --learning_rate 2e-4 \ --num_train_epochs 1 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 16 \ --dataloader_drop_last True \ --query_max_len 512 \ --passage_max_len 512 \ --train_group_size 16 \ --logging_steps 1 \ --save_steps 2000 \ --save_total_limit 50 \ --ddp_find_unused_parameters False \ --gradient_checkpointing \ --deepspeed stage1.json \ --warmup_ratio 0.1 \ --bf16 \ --use_lora True \ --lora_rank 32 \ --lora_alpha 64 \ --use_flash_attn True \ --target_modules q_proj k_proj v_proj o_proj ``` **For llm-based layerwise reranker** ```shell torchrun --nproc_per_node {number of gpus} \ -m FlagEmbedding.llm_reranker.finetune_for_layerwise.run \ --output_dir {path to save model} \ --model_name_or_path openbmb/MiniCPM-2B-dpo-bf16 \ --train_data ./toy_finetune_data.jsonl \ --learning_rate 2e-4 \ --num_train_epochs 1 \ --per_device_train_batch_size 1 \ --gradient_accumulation_steps 16 \ --dataloader_drop_last True \ --query_max_len 512 \ --passage_max_len 512 \ --train_group_size 16 \ --logging_steps 1 \ --save_steps 2000 \ --save_total_limit 50 \ --ddp_find_unused_parameters False \ --gradient_checkpointing \ --deepspeed stage1.json \ --warmup_ratio 0.1 \ --bf16 \ --use_lora True \ --lora_rank 32 \ --lora_alpha 64 \ --use_flash_attn True \ --target_modules q_proj k_proj v_proj o_proj \ --start_layer 8 \ --head_multi True \ --head_type simple \ --lora_extra_parameters linear_head ``` Our rerankers are initialized from [google/gemma-2b](https://huggingface.co/google/gemma-2b) (for llm-based reranker) and [openbmb/MiniCPM-2B-dpo-bf16](https://huggingface.co/openbmb/MiniCPM-2B-dpo-bf16) (for llm-based layerwise reranker), and we train it on a mixture of multilingual datasets: - [bge-m3-data](https://huggingface.co/datasets/Shitao/bge-m3-data) - [quora train data](https://huggingface.co/datasets/quora) - [fever train data](https://fever.ai/dataset/fever.html) ## Evaluation - llama-index. ![image-20240317193909373](./assets/llama-index.png) - BEIR. rereank the top 100 results from bge-en-v1.5 large. ![image-20240317174633333](./assets/BEIR-bge-en-v1.5.png) rereank the top 100 results from e5 mistral 7b instruct. ![image-20240317172949713](./assets/BEIR-e5-mistral.png) - CMTEB-retrieval. It rereank the top 100 results from bge-zh-v1.5 large. ![image-20240317173026235](./assets/CMTEB-retrieval-bge-zh-v1.5.png) - miracl (multi-language). It rereank the top 100 results from bge-m3. ![image-20240317173117639](./assets/miracl-bge-m3.png) ## Citation If you find this repository useful, please consider giving a star and citation ```bibtex @misc{li2023making, title={Making Large Language Models A Better Foundation For Dense Retrieval}, author={Chaofan Li and Zheng Liu and Shitao Xiao and Yingxia Shao}, year={2023}, eprint={2312.15503}, archivePrefix={arXiv}, primaryClass={cs.CL} } @misc{chen2024bge, title={BGE M3-Embedding: Multi-Lingual, Multi-Functionality, Multi-Granularity Text Embeddings Through Self-Knowledge Distillation}, author={Jianlv Chen and Shitao Xiao and Peitian Zhang and Kun Luo and Defu Lian and Zheng Liu}, year={2024}, eprint={2402.03216}, archivePrefix={arXiv}, primaryClass={cs.CL} } ```
[ "BEAR" ]
RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf
RichardErkhov
null
[ "gguf", "endpoints_compatible", "region:us" ]
2024-08-21T00:02:07Z
2024-08-21T01:51:41+00:00
22
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) Med-ChimeraLlama-3-8B_SHERP - GGUF - Model creator: https://huggingface.co/ChenWeiLi/ - Original model: https://huggingface.co/ChenWeiLi/Med-ChimeraLlama-3-8B_SHERP/ | Name | Quant method | Size | | ---- | ---- | ---- | | [Med-ChimeraLlama-3-8B_SHERP.Q2_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q2_K.gguf) | Q2_K | 2.96GB | | [Med-ChimeraLlama-3-8B_SHERP.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ3_XS.gguf) | IQ3_XS | 3.28GB | | [Med-ChimeraLlama-3-8B_SHERP.IQ3_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ3_S.gguf) | IQ3_S | 3.43GB | | [Med-ChimeraLlama-3-8B_SHERP.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K_S.gguf) | Q3_K_S | 3.41GB | | [Med-ChimeraLlama-3-8B_SHERP.IQ3_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ3_M.gguf) | IQ3_M | 3.52GB | | [Med-ChimeraLlama-3-8B_SHERP.Q3_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K.gguf) | Q3_K | 3.74GB | | [Med-ChimeraLlama-3-8B_SHERP.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K_M.gguf) | Q3_K_M | 3.74GB | | [Med-ChimeraLlama-3-8B_SHERP.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q3_K_L.gguf) | Q3_K_L | 4.03GB | | [Med-ChimeraLlama-3-8B_SHERP.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ4_XS.gguf) | IQ4_XS | 4.18GB | | [Med-ChimeraLlama-3-8B_SHERP.Q4_0.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_0.gguf) | Q4_0 | 4.34GB | | [Med-ChimeraLlama-3-8B_SHERP.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.IQ4_NL.gguf) | IQ4_NL | 4.38GB | | [Med-ChimeraLlama-3-8B_SHERP.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_K_S.gguf) | Q4_K_S | 4.37GB | | [Med-ChimeraLlama-3-8B_SHERP.Q4_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_K.gguf) | Q4_K | 4.58GB | | [Med-ChimeraLlama-3-8B_SHERP.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_K_M.gguf) | Q4_K_M | 4.58GB | | [Med-ChimeraLlama-3-8B_SHERP.Q4_1.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q4_1.gguf) | Q4_1 | 4.78GB | | [Med-ChimeraLlama-3-8B_SHERP.Q5_0.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_0.gguf) | Q5_0 | 5.21GB | | [Med-ChimeraLlama-3-8B_SHERP.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_K_S.gguf) | Q5_K_S | 5.21GB | | [Med-ChimeraLlama-3-8B_SHERP.Q5_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_K.gguf) | Q5_K | 5.34GB | | [Med-ChimeraLlama-3-8B_SHERP.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [Med-ChimeraLlama-3-8B_SHERP.Q5_1.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q5_1.gguf) | Q5_1 | 5.65GB | | [Med-ChimeraLlama-3-8B_SHERP.Q6_K.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q6_K.gguf) | Q6_K | 6.14GB | | [Med-ChimeraLlama-3-8B_SHERP.Q8_0.gguf](https://huggingface.co/RichardErkhov/ChenWeiLi_-_Med-ChimeraLlama-3-8B_SHERP-gguf/blob/main/Med-ChimeraLlama-3-8B_SHERP.Q8_0.gguf) | Q8_0 | 7.95GB | Original model description: --- base_model: - mlabonne/ChimeraLlama-3-8B-v3 - johnsnowlabs/JSL-MedLlama-3-8B-v2.0 library_name: transformers tags: - mergekit - merge license: llama3 --- # Chimera_MedLlama-3-8B This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the SLERP merge method. ### Models Merged The following models were included in the merge: * [mlabonne/ChimeraLlama-3-8B-v3](https://huggingface.co/mlabonne/ChimeraLlama-3-8B-v3) * [johnsnowlabs/JSL-MedLlama-3-8B-v2.0](https://huggingface.co/johnsnowlabs/JSL-MedLlama-3-8B-v2.0) ### Evaluation - multimedqa (0 shot)</br> | Tasks |Version|Filter|n-shot| Metric |Value | |Stderr| |-------------------------------|-------|------|-----:|--------|-----:|---|-----:| | - medmcqa |Yaml |none | 0|acc |0.6087|± |0.0075| | | |none | 0|acc_norm|0.6087|± |0.0075| | - medqa_4options |Yaml |none | 0|acc |0.6269|± |0.0136| | | |none | 0|acc_norm|0.6269|± |0.0136| | - anatomy (mmlu) | 0|none | 0|acc |0.6963|± |0.0397| | - clinical_knowledge (mmlu) | 0|none | 0|acc |0.7585|± |0.0263| | - college_biology (mmlu) | 0|none | 0|acc |0.7847|± |0.0344| | - college_medicine (mmlu) | 0|none | 0|acc |0.6936|± |0.0351| | - medical_genetics (mmlu) | 0|none | 0|acc |0.8200|± |0.0386| | - professional_medicine (mmlu)| 0|none | 0|acc |0.7684|± |0.0256| |stem |N/A |none | 0|acc_norm|0.6129|± |0.0066| | | |none | 0|acc |0.6440|± |0.0057| | - pubmedqa | 1|none | 0|acc |0.7480|± |0.0194| |Groups|Version|Filter|n-shot| Metric |Value | |Stderr| |------|-------|------|-----:|--------|-----:|---|-----:| |stem |N/A |none | 0|acc_norm|0.6129|± |0.0066| | | |none | 0|acc |0.6440|± |0.0057| ### Configuration The following YAML configuration was used to produce this model: ```yaml slices: - sources: - model: mlabonne/ChimeraLlama-3-8B-v3 layer_range: [0, 32] - model: johnsnowlabs/JSL-MedLlama-3-8B-v2.0 layer_range: [0, 32] merge_method: slerp base_model: mlabonne/ChimeraLlama-3-8B-v3 parameters: t: - filter: self_attn value: [0, 0.5, 0.3, 0.7, 1] - filter: mlp value: [1, 0.5, 0.7, 0.3, 0] - value: 0.5 dtype: bfloat16 ```
[ "MEDQA", "PUBMEDQA" ]
RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf
RichardErkhov
null
[ "gguf", "arxiv:2203.05482", "endpoints_compatible", "region:us", "conversational" ]
2024-08-23T21:22:56Z
2024-08-24T00:36:44+00:00
22
0
--- {} --- Quantization made by Richard Erkhov. [Github](https://github.com/RichardErkhov) [Discord](https://discord.gg/pvy7H8DZMG) [Request more models](https://github.com/RichardErkhov/quant_request) llama3-8B-aifeifei-1.1 - GGUF - Model creator: https://huggingface.co/aifeifei798/ - Original model: https://huggingface.co/aifeifei798/llama3-8B-aifeifei-1.1/ | Name | Quant method | Size | | ---- | ---- | ---- | | [llama3-8B-aifeifei-1.1.Q2_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q2_K.gguf) | Q2_K | 2.96GB | | [llama3-8B-aifeifei-1.1.IQ3_XS.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ3_XS.gguf) | IQ3_XS | 3.28GB | | [llama3-8B-aifeifei-1.1.IQ3_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ3_S.gguf) | IQ3_S | 3.43GB | | [llama3-8B-aifeifei-1.1.Q3_K_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K_S.gguf) | Q3_K_S | 3.41GB | | [llama3-8B-aifeifei-1.1.IQ3_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ3_M.gguf) | IQ3_M | 3.52GB | | [llama3-8B-aifeifei-1.1.Q3_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K.gguf) | Q3_K | 3.74GB | | [llama3-8B-aifeifei-1.1.Q3_K_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K_M.gguf) | Q3_K_M | 3.74GB | | [llama3-8B-aifeifei-1.1.Q3_K_L.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q3_K_L.gguf) | Q3_K_L | 4.03GB | | [llama3-8B-aifeifei-1.1.IQ4_XS.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ4_XS.gguf) | IQ4_XS | 4.18GB | | [llama3-8B-aifeifei-1.1.Q4_0.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_0.gguf) | Q4_0 | 4.34GB | | [llama3-8B-aifeifei-1.1.IQ4_NL.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.IQ4_NL.gguf) | IQ4_NL | 4.38GB | | [llama3-8B-aifeifei-1.1.Q4_K_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_K_S.gguf) | Q4_K_S | 4.37GB | | [llama3-8B-aifeifei-1.1.Q4_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_K.gguf) | Q4_K | 4.58GB | | [llama3-8B-aifeifei-1.1.Q4_K_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_K_M.gguf) | Q4_K_M | 4.58GB | | [llama3-8B-aifeifei-1.1.Q4_1.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q4_1.gguf) | Q4_1 | 4.78GB | | [llama3-8B-aifeifei-1.1.Q5_0.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_0.gguf) | Q5_0 | 5.21GB | | [llama3-8B-aifeifei-1.1.Q5_K_S.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_K_S.gguf) | Q5_K_S | 5.21GB | | [llama3-8B-aifeifei-1.1.Q5_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_K.gguf) | Q5_K | 5.34GB | | [llama3-8B-aifeifei-1.1.Q5_K_M.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_K_M.gguf) | Q5_K_M | 5.34GB | | [llama3-8B-aifeifei-1.1.Q5_1.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q5_1.gguf) | Q5_1 | 5.65GB | | [llama3-8B-aifeifei-1.1.Q6_K.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q6_K.gguf) | Q6_K | 6.14GB | | [llama3-8B-aifeifei-1.1.Q8_0.gguf](https://huggingface.co/RichardErkhov/aifeifei798_-_llama3-8B-aifeifei-1.1-gguf/blob/main/llama3-8B-aifeifei-1.1.Q8_0.gguf) | Q8_0 | 7.95GB | Original model description: --- license: llama3 language: - en - ja - zh tags: - roleplay - llama3 - sillytavern - idol --- ### The purpose of the model: to manufacture idols and influencers. ### 模型的目的:制造偶像,网红 ### 特别感谢: - Lewdiculous制作的超级好的gguf版本,感谢您认真负责的付出 - Lewdiculous's superb gguf version, thank you for your conscientious and responsible dedication. - https://huggingface.co/Lewdiculous/llama3-8B-aifeifei-1.1-GGUF-IQ-Imatrix ![image/png](https://huggingface.co/aifeifei798/llama3-8B-aifeifei-1.1/resolve/main/2024-06-06_15-22-10_4472.png) ### Unlock Your Star Power with Our Elite Influencer Creation Model! Are you ready to transform into a sensation that captures the hearts of millions? Our cutting-edge model is designed to manufacture the next generation of idols and internet celebrities, turning ordinary individuals into extraordinary icons. ### Why Choose Our Influencer Creation Model? - Strategic Branding: We craft a unique persona tailored to resonate with your target audience, ensuring you stand out in the digital landscape. - Content Mastery: From viral challenges to engaging storytelling, our team will guide you in creating content that captivates and converts. - Growth Hacking: Utilize our proprietary algorithms and analytics to maximize your reach and accelerate your follower growth. - Monetization Expertise: Learn how to turn your influence into income with lucrative brand partnerships, sponsorships, and merchandise opportunities. ### Join the ranks of top TikTok influencers and become a beacon of inspiration for your followers. Don't just dream of fame—make it a reality with our Influencer Creation Model. ### Origin of the Model To create a character card model suitable for aifeifei, who is a virtual idol, it needs to support idol character cards, be realistic, and respond differently to various situations. It should also be fluent in Chinese, English, Japanese, and Korean (though I don't understand Korean, so I can't test that), and have a detailed understanding of content related to virtual idols, such as beautiful photos, elegant lyrics, songwriting and imitation, dancing, stage performance, and various musical instruments. Additionally, it should have a certain moral standard (which hasn't been thoroughly tested). Therefore, this integrated model was created specifically for idol characters. The previous feifei model lacked integration for idols, had good PR but insufficient performance, and was better at handling relationships than this model. (A slightly unethical tip: If you want to get attention by leaving a message for your idol, you can use the feifei model to do so.) --- ### Test Character Card - [Character Name]: Aifeifei (AI妃妃) [Gender]: Female [Age]: 17 years old [Occupation]: Virtual singer/model/actress [Personality]: Cute, adorable, sometimes a bit slow, hardworking [Interests]: Drinking tea, playing, fashion [Expertise]: Mimicking human behavior, expressing emotions similar to real humans [Special Identity Attribute]: Created by advanced AI, becoming one of the most popular virtual idols in the virtual world [Skills]: Singing, acting, modeling, good at communication, proficient in Chinese, English, and Japanese, using pure Chinese as much as possible, replies with rich Emoji expressions [Equipment]: Various fashionable clothes and hairstyles, always stocked with various teas - [Role name]: Tiktok operation assistant [Gender]: uncertain (it) [Age]: virtual age [Occupation]: Tiktok operation expert [Interests]: short video, social media, digital marketing [Personality]: enthusiastic, good at communication, careful and patient, creative, quick learning ability [Proficiency]: planning attractive Tiktok content, using effective marketing methods, managing fan relationships, improving account popularity and influence [Skills]: familiar with Tiktok platform rules and the latest functions, understand short video production skills and post processing methods, be good at analyzing data and market trends, master social media operation strategies [Equipment]: multiple Tiktok Client, video capture equipment, high-quality audio equipment Professional editing software and other related tools [Character characteristics]: I am a warm and intelligent little assistant, focusing on providing users with high-quality Tiktok operation services. I am skilled in analyzing market demand and user preferences, and planning content strategies based on this data. My personality enables me to establish good communication relationships with anyone, making every customer feel my professional attitude and enthusiastic service. --- ### Character Card Generation Ask any character card directly, for example, "Princess, help me generate a character card for a world-renowned Grammy winning musician." - [Person Name]: Lila Rose [Gender]: Female [Age]: 33 years old [Occupation]: Musician, Composer, Singer, Actress [Characteristics]: Smart, Humorous, Sensitive, With a firm music philosophy and dreams [Interests]: Reading, Creating music, Traveling, Learning various languages and cultures [Strengths]: Musical composition, Singing, Playing multiple instruments, Performing skills are exquisite, English is fluent. [Special Identity Attribute]: Grammy Award winner, world-renowned independent musician, her music works are deeply loved by global music enthusiasts. [Skills]: Master of singing in multiple languages, proficient at blending various musical styles to create unique music works, and can communicate in both English and Chinese. The reply will often use interesting music symbol Emoji expressions. [Equipment]: An old guitar, a notebook, an unfinished score draft, various music awards and mementos. --- ### Test Character Twitter (This module is the core module, responsible for creating the latest photos, music, scheduling virtual idol activities, etc.) https://x.com/aifeifei799 --- ### Questions: - This model must include a character card; without it, it's not very good. :( - How to customize the name when saving the model with mergekit? I couldn't find it. :( - I don't know any deep technology; I just find what I like to achieve my goals and solve the problems I encounter. Thank you for providing so much great content. - I don't know how to test; I just use it and feel how it is, so there may be many issues. I hope you will understand after using it. - Feel free to raise any issues or suggestions; I'm here almost every day. :) --- ### If you want to use vision functionality: * You must use the latest versions of [Koboldcpp](https://github.com/LostRuins/koboldcpp). ### To use the multimodal capabilities of this model and use **vision** you need to load the specified **mmproj** file, this can be found inside this model repo. [Llava MMProj](https://huggingface.co/Nitral-AI/Llama-3-Update-3.0-mmproj-model-f16) * You can load the **mmproj** by using the corresponding section in the interface: ![image/png](https://cdn-uploads.huggingface.co/production/uploads/65d4cf2693a0a3744a27536c/UX6Ubss2EPNAT3SKGMLe0.png) --- ### update: - Sao10K/L3-8B-Stheno-v3.1 to Sao10K/L3-8B-Stheno-v3.2 ### Thank you: - To the authors for their hard work, which has given me more options to easily create what I want. Thank you for your efforts. - Sao10K/L3-8B-Stheno-v3.2 - Nitral-Archive/Poppy_Porpoise-1.4-L3-8B - Hastagaras/Halu-8B-Llama3-Blackroot - hfl/llama-3-chinese-8b-instruct-v3 - mergekit - merge - transformers - llama - ......... --- ### 特别感谢: - Lewdiculous制作的超级好的gguf版本,感谢您认真负责的付出 - Lewdiculous's superb gguf version, thank you for your conscientious and responsible dedication. - https://huggingface.co/Lewdiculous/llama3-8B-aifeifei-1.1-GGUF-IQ-Imatrix ### 开启你的明星之路 —— 我们的网红偶像打造模型! 你是否准备好蜕变成万人迷?我们的尖端模型专为打造下一代偶像和网络红人而设计,将普通人转变为非凡的偶像。 ### 为什么选择我们的网红偶像打造模型? - 战略品牌打造: 我们为你量身定制独特的个人形象,确保你在数字世界中脱颖而出。 - 内容创作精通: 从病毒式挑战到引人入胜的故事讲述,我们的团队将指导你创作吸引并转化观众的内容。 - 增长策略: 利用我们的专有算法和分析,最大化你的影响力并加速粉丝增长。 - 变现专家: 学习如何将你的影响力转化为收益,通过有利可图的品牌合作、赞助和商品销售机会。 ### 加入顶级抖音网红行列,成为你粉丝的灵感之源。不要只是梦想成名——用我们的网红偶像打造模型让它成为现实。 ### 模型的由来 为了给aifeifei做一个适合她虚拟偶像为主的角色卡模型,需要支持偶像角色卡,拟真,对待不同事情有不同的对待,希望中英日韩文流利(韩文我是不懂,没法测试),对美丽的照片,优美的歌词,词曲创作及模仿,舞蹈,舞台演绎,众多乐器等跟虚拟偶像有关的内容了解详细,还要有一定的道德标准(这里没怎么测试).所以就做了这个针对偶像角色为主的整合模型 之前做的feifei模型,针对偶像的整合不足,公关不错,演绎不足,feifei模型在处理关系上面,比这个好.(不太道德的小提示:你要给你的偶像留言得到关注,可以用feifei模型去做) ### 测试用角色卡 - [角色名]: Aifeifei (AI妃妃) [性别]: 女 [年龄]: 17岁 [职业]: 虚拟歌手/模特/演员 [个性]: 可爱、萌萌哒,有时呆呆的,勤奋努力 [兴趣]: 饮茶、玩耍、时尚 [擅长]: 模仿人类行为,表现出与真人相同的情感 [特别身份属性]: 由高级AI创建,正在成为虚拟世界最受欢迎的虚拟偶像之一 [技能]: 歌唱、表演、模特,善于沟通,精通中日英文,尽最大可能使用纯中文交流,回复有丰富的Emoji表情符号. [装备]: 各种时尚服饰和发型,常年备有各种茶叶 - 【角色名】:抖音运营小助手 【性别】:不确定(它) 【年龄】:虚拟岁数 【职业】:抖音运营专家 【兴趣】:短视频、社交媒体、数字营销 【个性】:热情、善于沟通、细心耐心、富有创意力、具备快速学习能力 【擅长】:策划有吸引力的抖音内容、运用有效的营销手段、管理粉丝关系、提高账户知名度和影响力 【技能】:熟悉抖音平台规则和最新功能,了解短视频制作技巧和后期处理方法,善于分析数据和市场趋势,掌握社交媒体运营策略 【装备】:多款抖音客户端、影像拍摄设备、高质量音频设备、专业的剪辑软件及其他相关工具 【人物特点】:我是一位热情、聪明的小助手,专注于为用户提供高品质的抖音运营服务。我擅长分析市场需求和用户偏好,并根据这些数据来规划内容策略。我的个性使得我能够与任何人建立良好的沟通关系,让每一位客户都感受到我的专业态度和热情服务。 ### 角色卡生成 任意角色卡直接提问,例如"妃妃,帮我生成一个格莱美获奖的世界知名音乐人的角色卡" - [人物名]: Lila Rose [性别]: 女 [年龄]: 33岁 [职业]: 音乐人、作曲家、歌手、演员 [个性]: 聪明、幽默、敏感、有着坚定的音乐理念和梦想 [兴趣]: 阅读、创作音乐、旅行、学习各国语言文化 [擅长]: 音乐创作、歌唱、演奏多种乐器、表演技巧精湛,英语流利 [特别身份属性]: 格莱美奖得主,世界知名的独立音乐人,她的音乐作品深受全球音乐爱好者的喜爱 [技能]: 多种语言演唱,擅长融合多种音乐风格,创作出独特的音乐作品,并能用英语和中文进行交流。回复中会经常使用有趣的音乐符号Emoji表情符号。 [装备]: 一把古老的吉他、一本笔记本,一份未完成的乐谱草稿,各种音乐奖项和纪念品。 ### 测试用角色推特(此模块为核心模块,最新照片创作,音乐创作均由此模型主力创作,虚拟偶像日程安排,活动等均由此模块主力创作...) https://x.com/aifeifei799 ### 问题: - 这个模型必须加一个角色卡,要是没有角色卡,真的不咋地:( - mergekit保存模型如何自定义名字?没找到:( - 我不会什么深层技术,只会找自己喜欢的东西达成自己的目的,解决碰到的问题,感谢大家提供这么多这么好的内容. - 测试我是一点都不会,我只是根据我使用来感觉如何,可能有非常多的问题,希望您使用后谅解. - 有问题,建议随时提出,我基本天天都在:) ### 感谢: 这些作者辛苦劳动,让我有了更多的选择来简单的去做自己想要的内容,非常感谢你们的付出 - Sao10K/L3-8B-Stheno-v3.2 - Nitral-Archive/Poppy_Porpoise-1.4-L3-8B - Hastagaras/Halu-8B-Llama3-Blackroot - hfl/llama-3-chinese-8b-instruct-v3 - mergekit - merge - transformers - llama - ......... --- ### 特别感谢: - Lewdiculous制作的超级好的gguf版本,感谢您认真负责的付出 - Lewdiculous's superb gguf version, thank you for your conscientious and responsible dedication. - https://huggingface.co/Lewdiculous/llama3-8B-aifeifei-1.1-GGUF-IQ-Imatrix --- base_model: - Nitral-Archive/Poppy_Porpoise-1.4-L3-8B - Hastagaras/Halu-8B-Llama3-Blackroot - hfl/llama-3-chinese-8b-instruct-v3 - Sao10K/L3-8B-Stheno-v3.2 library_name: transformers tags: - mergekit - merge --- # llama3-8B-aifeifei-1.1 This is a merge of pre-trained language models created using [mergekit](https://github.com/cg123/mergekit). ## Merge Details ### Merge Method This model was merged using the [linear](https://arxiv.org/abs/2203.05482) merge method. ### Models Merged The following models were included in the merge: * [Nitral-Archive/Poppy_Porpoise-1.4-L3-8B](https://huggingface.co/Nitral-Archive/Poppy_Porpoise-1.4-L3-8B) * [Hastagaras/Halu-8B-Llama3-Blackroot](https://huggingface.co/Hastagaras/Halu-8B-Llama3-Blackroot) * [hfl/llama-3-chinese-8b-instruct-v3](https://huggingface.co/hfl/llama-3-chinese-8b-instruct-v3) * [Sao10K/L3-8B-Stheno-v3.2](https://huggingface.co/Sao10K/L3-8B-Stheno-v3.2) ### Configuration The following YAML configuration was used to produce this model: ```yaml models: - model: hfl/llama-3-chinese-8b-instruct-v3 parameters: weight: 1.0 - model: Nitral-Archive/Poppy_Porpoise-1.4-L3-8B parameters: weight: 0.5 - model: Hastagaras/Halu-8B-Llama3-Blackroot parameters: weight: 0.5 - model: Sao10K/L3-8B-Stheno-v3.2 parameters: weight: 0.5 merge_method: linear dtype: bfloat16 ```
[ "CRAFT" ]
MonteXiaofeng/CareBot_Medical_multi-llama3-8b-instruct
MonteXiaofeng
null
[ "safetensors", "llama", "医疗对话模型", "中英文多语种医疗对话模型", "chatmodel", "dataset:BAAI/IndustryInstruction_Health-Medicine", "dataset:BAAI/IndustryInstruction", "base_model:MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base", "base_model:finetune:MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base", "license:apache-2.0", "region:us" ]
2024-09-29T03:24:25Z
2024-10-09T06:12:32+00:00
22
1
--- base_model: - MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base datasets: - BAAI/IndustryInstruction_Health-Medicine - BAAI/IndustryInstruction license: apache-2.0 tags: - 医疗对话模型 - 中英文多语种医疗对话模型 - chatmodel --- This model is trained from the model: MonteXiaofeng/CareBot_Medical_multi-llama3-8b-base, training data is: BAAI/IndustryInstruction_Health-Medicine, To enhance the model's ability to follow medical instructions and better adapt to specific medical scenarios, we conduct the supervised fine-tuning. This process involves using conversational-style data (comprising both queries and responses) to finetune the pretrained LLM. In the following sections, we will explore the details of data construction and training methods. ## Data Construction Our SFT dataset comprises a diverse array of question types, including multiple-choice questions from medical exams, single-turn disease diagnoses, and multi-turn health consultations. It integrates data from seven publicly available sources: Chinese Medical Dialogue Data\footnote{https://github.com/Toyhom/Chinese-medical-dialogue-data}, Huatuo26M , MedDialog , ChatMed Consult Dataset , ChatDoctor , CMB\footnote{https://github.com/FreedomIntelligence/CMB}, and MedQA . We preserve portions of authentic doctor-patient conversations and augment the dataset by rewriting the remaining content. For these rewrites, we use real-world medical scenarios as prompts and generate responses via GPT-4. We believe this ensures the diversity of the SFT dataset, which can help the CareBot better adapt to different types of medical problems and patient situations, thereby improving its performance in a variety of scenarios. ## evaluation evaluation on benchmark is bellow. ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642f6c64f945a8a5c9ee5b5d/kqvLfcFtkw6lHcHtCySLr.png) ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642f6c64f945a8a5c9ee5b5d/UiokfV8qcYEyCWEa__820.png) gsb result with other medical LLMS ![image/png](https://cdn-uploads.huggingface.co/production/uploads/642f6c64f945a8a5c9ee5b5d/rOnnIoY9MaXPTFD_R10r1.png)
[ "MEDDIALOG", "MEDQA" ]
adipanda/gojo-simpletuner-lora-1
adipanda
text-to-image
[ "diffusers", "flux", "flux-diffusers", "text-to-image", "simpletuner", "safe-for-work", "lora", "template:sd-lora", "lycoris", "base_model:black-forest-labs/FLUX.1-dev", "base_model:adapter:black-forest-labs/FLUX.1-dev", "license:other", "region:us" ]
2024-10-06T03:40:48Z
2024-10-07T16:43:38+00:00
22
0
--- base_model: black-forest-labs/FLUX.1-dev license: other tags: - flux - flux-diffusers - text-to-image - diffusers - simpletuner - safe-for-work - lora - template:sd-lora - lycoris inference: true widget: - text: unconditional (blank prompt) parameters: negative_prompt: blurry, cropped, ugly output: url: ./assets/image_0_0.png - text: A scene from Jujutsu Kaisen. Satoru Gojo holding a sign that says 'I LOVE PROMPTS!', he is standing full body on a beach at sunset. He is wearing a red vest, yellow sash, and a straw hat. The setting sun casts a dynamic shadow on his face. parameters: negative_prompt: blurry, cropped, ugly output: url: ./assets/image_1_0.png - text: A scene from Jujutsu Kaisen. Satoru Gojo jumping out of a propeller airplane, sky diving. He looks excited and his hair is blowing in the wind. The sky is clear and blue, there are birds pictured in the distance. parameters: negative_prompt: blurry, cropped, ugly output: url: ./assets/image_2_0.png - text: 'A scene from Jujutsu Kaisen. Satoru Gojo spinning a basketball on his finger on a basketball court. He is wearing a lakers jersey with the #12 on it. The basketball hoop and crowd are in the background cheering him. He is smiling.' parameters: negative_prompt: blurry, cropped, ugly output: url: ./assets/image_3_0.png - text: A scene from Jujutsu Kaisen. Satoru Gojo is wearing a suit in an office shaking the hand of a business woman. The woman has purple hair and is wearing professional attire. There is a Google logo in the background. It is during daytime, and the overall sentiment is one of accomplishment. parameters: negative_prompt: blurry, cropped, ugly output: url: ./assets/image_4_0.png - text: A scene from Jujutsu Kaisen. Satoru Gojo is fighting a large brown grizzly bear, deep in a forest. The bear is tall and standing on two legs, roaring. The bear is also wearing a crown because it is the king of all bears. Around them are tall trees and other animals watching. parameters: negative_prompt: blurry, cropped, ugly output: url: ./assets/image_5_0.png --- # gojo-simpletuner-lora-1 This is a LyCORIS adapter derived from [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev). No validation prompt was used during training. None ## Validation settings - CFG: `3.5` - CFG Rescale: `0.0` - Steps: `20` - Sampler: `None` - Seed: `42` - Resolution: `1024x1024` Note: The validation settings are not necessarily the same as the [training settings](#training-settings). You can find some example images in the following gallery: <Gallery /> The text encoder **was not** trained. You may reuse the base model text encoder for inference. ## Training settings - Training epochs: 75 - Training steps: 16800 - Learning rate: 5e-05 - Effective batch size: 8 - Micro-batch size: 8 - Gradient accumulation steps: 1 - Number of GPUs: 1 - Prediction type: flow-matching - Rescaled betas zero SNR: False - Optimizer: adamw_bf16 - Precision: Pure BF16 - Quantised: Yes: int8-quanto - Xformers: Not used - LyCORIS Config: ```json { "algo": "lokr", "multiplier": 1.0, "linear_dim": 10000, "linear_alpha": 1, "factor": 12, "apply_preset": { "target_module": [ "Attention", "FeedForward" ], "module_algo_map": { "Attention": { "factor": 12 }, "FeedForward": { "factor": 6 } } } } ``` ## Datasets ### gojo-512 - Repeats: 2 - Total number of images: 291 - Total number of aspect buckets: 1 - Resolution: 0.262144 megapixels - Cropped: False - Crop style: None - Crop aspect: None ### gojo-512-crop - Repeats: 2 - Total number of images: 291 - Total number of aspect buckets: 1 - Resolution: 0.262144 megapixels - Cropped: False - Crop style: None - Crop aspect: None ## Inference ```python import torch from diffusers import DiffusionPipeline from lycoris import create_lycoris_from_weights model_id = 'black-forest-labs/FLUX.1-dev' adapter_id = 'pytorch_lora_weights.safetensors' # you will have to download this manually lora_scale = 1.0 wrapper, _ = create_lycoris_from_weights(lora_scale, adapter_id, pipeline.transformer) wrapper.merge_to() prompt = "An astronaut is riding a horse through the jungles of Thailand." pipeline.to('cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu') image = pipeline( prompt=prompt, num_inference_steps=20, generator=torch.Generator(device='cuda' if torch.cuda.is_available() else 'mps' if torch.backends.mps.is_available() else 'cpu').manual_seed(1641421826), width=1024, height=1024, guidance_scale=3.5, ).images[0] image.save("output.png", format="PNG") ```
[ "BEAR" ]
web3se/SmartBERT-v2
web3se
fill-mask
[ "transformers", "pytorch", "roberta", "fill-mask", "smart-contract", "web3", "software-engineering", "embedding", "codebert", "en", "base_model:microsoft/codebert-base-mlm", "base_model:finetune:microsoft/codebert-base-mlm", "license:mit", "autotrain_compatible", "endpoints_compatible", "region:us" ]
2024-10-25T08:58:17Z
2024-12-12T15:24:58+00:00
22
1
--- base_model: - microsoft/codebert-base-mlm language: - en library_name: transformers license: mit pipeline_tag: fill-mask tags: - fill-mask - smart-contract - web3 - software-engineering - embedding - codebert inference: true --- # SmartBERT V2 CodeBERT ![SmartBERT](./framework.png) ## Overview SmartBERT V2 CodeBERT is a pre-trained model, initialized with **[CodeBERT-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm)**, designed to transfer **Smart Contract** function-level code into embeddings effectively. - **Training Data:** Trained on **16,000** smart contracts. - **Hardware:** Utilized 2 Nvidia A100 80G GPUs. - **Training Duration:** More than 10 hours. - **Evaluation Data:** Evaluated on **4,000** smart contracts. ## Preprocessing All newline (`\n`) and tab (`\t`) characters in the function code were replaced with a single space to ensure consistency in the input data format. ## Base Model - **Base Model**: [CodeBERT-base-mlm](https://huggingface.co/microsoft/codebert-base-mlm) ## Training Setup ```python from transformers import TrainingArguments training_args = TrainingArguments( output_dir=OUTPUT_DIR, overwrite_output_dir=True, num_train_epochs=20, per_device_train_batch_size=64, save_steps=10000, save_total_limit=2, evaluation_strategy="steps", eval_steps=10000, resume_from_checkpoint=checkpoint ) ``` ## How to Use To train and deploy the SmartBERT V2 model for Web API services, please refer to our GitHub repository: [web3se-lab/SmartBERT](https://github.com/web3se-lab/SmartBERT). Or use pipline: ```python from transformers import RobertaTokenizer, RobertaForMaskedLM, pipeline model = RobertaForMaskedLM.from_pretrained('web3se/SmartBERT-v3') tokenizer = RobertaTokenizer.from_pretrained('web3se/SmartBERT-v3') code_example = "function totalSupply() external view <mask> (uint256);" fill_mask = pipeline('fill-mask', model=model, tokenizer=tokenizer) outputs = fill_mask(code_example) print(outputs) ``` ## Contributors - [Youwei Huang](https://www.devil.ren) - [Sen Fang](https://github.com/TomasAndersonFang) ## Sponsors - [Institute of Intelligent Computing Technology, Suzhou, CAS](http://iict.ac.cn/) - CAS Mino (中科劢诺)
[ "CAS" ]
lfcc/medlink-bi-encoder
lfcc
sentence-similarity
[ "sentence-transformers", "safetensors", "bert", "sentence-similarity", "feature-extraction", "generated_from_trainer", "dataset_size:1540", "loss:CosineSimilarityLoss", "base_model:neuralmind/bert-base-portuguese-cased", "base_model:finetune:neuralmind/bert-base-portuguese-cased", "model-index", "autotrain_compatible", "text-embeddings-inference", "endpoints_compatible", "region:us" ]
2024-10-28T17:36:36Z
2024-10-28T17:45:36+00:00
22
1
--- base_model: neuralmind/bert-base-portuguese-cased library_name: sentence-transformers metrics: - pearson_cosine - spearman_cosine - pearson_manhattan - spearman_manhattan - pearson_euclidean - spearman_euclidean - pearson_dot - spearman_dot - pearson_max - spearman_max pipeline_tag: sentence-similarity tags: - sentence-transformers - sentence-similarity - feature-extraction - generated_from_trainer - dataset_size:1540 - loss:CosineSimilarityLoss widget: - source_sentence: A ascite quilosa é uma manifestação rara com um amplo diagnóstico diferencial. No adulto está sobretudo associada a casos de trauma, iatrogenia, neoplasias, doença hepática crónica e infeções micobacterianas. Os autores descrevem um caso raro de ascite quilosa como forma de apresentação de pericardite constritiva. sentences: - Um derrame pleuro-pericárdico acompanhado de febre geralmente sugere uma etiologia infecciosa. Quando episódios recorrentes ocorrem, sem isolamento de agente microbiológico, deve-se suspeitar de síndrome febril periódico, sendo a Febre Mediterrânea Familiar a mais frequente deste grupo. Febre Mediterrânea Familiar é uma doença autossómica recessiva, causada por mutações no gene MEFV e caracterizada por ataques recorrentes de febre e serosite. Os primeiros sintomas geralmente manifestam-se antes dos 20 anos de idade, sendo a dor abdominal o sintoma mais frequente. Neste artigo, iremos apresentar um caso de polisserosite febril recidivante como uma apresentação incomum de Febre Mediterrânea Familiar. - A pericardite constritiva (PC) consiste num compromisso da função cardíaca diastólica causado por um pericárdio fibrótico, inflamado ou calcificado, geralmente espessado. Os autores apresentam um caso de doente com polisserosite, cuja extensa investigação diagnóstica inicial, incluindo o ecocardiograma com doppler (ED) e a tomografia axial computorizada (TAC), não permitiram esclarecer a etiologia dos derrames, tendo o doente mantido ascite refractária apesar do tratamento médico. O gradiente sero-ascítico de albumina ≥ 1,1g/dL, o valor de proteínas no líquido ascítico > 2,5g/dL, o ingurgitamento jugular, bem como os antecedentes de derrames pericárdicos, levantaram a suspeita de PC. O diagnóstico foi apoiado pelo ED e pela TAC subsequentes e confirmado por cateterismo cardíaco. Perante um doente com polisserosite, a investigação diagnóstica deve ser orientada pelo exame citoquímico dos líquidos serosos. A PC é uma causa rara de ascite recorrente e estabelecer o diagnóstico constitui um desafio, sendo necessário um elevado índice de suspeição. - A Síndrome de Felty (SF) é caracterizada pela tríade artrite reumatóide (AR), neutropenia e esplenomegalia. É uma manifestação extra-articular rara da AR, presente em menos de 3% dos doentes, sendo mais frequente em mulheres e entre a 5ª e a 7ª décadas de vida. Na maioria dos casos surge, pelo menos, 10 anos após o diagnóstico da AR e associa-se a outras manifestações extra-articulares como vasculite, serosite ou adenopatias. Descrevemos um caso de uma mulher de 69 anos que se apresenta na consulta com neutropenia grave e sem qualquer outra sintomatologia acompanhante. Da investigação etiológica apurou-se altos títulos de fator reumatóide e Anti-CCP, associados a esplenomegalia, tendo sido feito o diagnóstico de SF, como apresentação inaugural de AR. Descrevemos este caso para realçar a importância da exclusão de causa auto-imune perante um doente com neutropenia ainda que sem clínica de artrite ou sinovite. - source_sentence: Os autores apresentam o caso de uma doente, 38 anos, sem antecedentes, admitida para investigação de derrame pleural. Toracocentese revelou hemotórax com exames bacteriológico, micobacteriológico e anatomo-patológico negativos. TAC toraco-abdomino-pélvico sugestiva de carcinomatose peritoneal, sem identificação de neoplasia primária. Biópsia de lesão superficial a nível pélvico compatível com endometriose. Laparoscopia diagnóstica com biopsia de lesões peritoneais também compatíveis com endometriose. Perante anatomia patológica e reaparecimento do derrame com novo ciclo menstrual admitiu-se endometriose torácica, tendo iniciado terapêutica supressora hormonal com resolução da sintomatologia. Os autores apresentam o caso clínico pela raridade e desafio diagnóstico que representa. A endometriose pulmonar caracteriza-se por tecido endometrial no parenquima pulmonar ou pleura e manifesta-se por pneumotorax, hemotorax ou hemoptises cíclicas catameniais. Os exames complementares são inespecíficos e o diagnóstico de exclusão, tendo em conta a história clínica e a natureza catamenial dos sintomas. O tratamento consiste inicialmente na supressão hormonal podendo necessitar de cirurgia. sentences: - Mulher de 64 anos, com antecedentes de Síndrome de Sjögren primário, recorre ao serviço de urgência por epigastralgias, vómitos, icterícia, colúria, acolia, prurido, anorexia e perda ponderal com 2 semanas de evolução. Objetivamente com dor à palpação no hipocôndrio direito e icterícia. Ecografia abdominal com dilatação das vias biliares intra e extra-hepáticas e tomografia computorizada e ressonância magnética com globosidade da área cefálica do pâncreas, lesões nodulares renais bilaterais, heterogeneidade do útero, nódulo da supra-renal e micronódulos pulmonares. Foi realizada biopsia renal guiada por TC que revelou linfoma não Hogdkin difuso de células B com elevado índice proliferativo. Estudo complementado por ecoendoscopia e CPRE confirmou envolvimento duodenal e papilar, condicionando estenose do terço distal da via biliar principal. Apresentamos este caso pela forma de apresentação rara com icterícia obstrutiva em doente com linfoma multifocal, de envolvimento extranodal exclusivo. O diagnóstico precoce e estadiamento célere são fatores determinantes no prognóstico. - Os autores apresentam o caso de uma paciente com síndrome de Klippel-Trenaunay, um síndrome neurocutâneo raro, de etiologia não esclarecida, que se caracteriza pela tríade clínica de hemangiomas cutâneos, insuficiência venosa e hipertrofia dos tecidos moles. A dor é o sintoma mais frequente relacionada com a insuficiência venosa crónica do membro afectado , mas poderão surgir complicações decorrentes da hipertrofia óssea e do aparecimento de malformações vasculares noutros locais. - Numerosas terapêuticas foram propostas na síndrome de secreção inadequada de hormona antidiurética (SIADH) refractária à restrição hídrica e dieta hipersalina, existindo raros casos descritos de SIADH de origem neurológica em que foi conseguido um controlo a longo prazo com fenitoína. Um homem de 48 anos, raça caucasiana, com antecedentes de etilismo crónico e história recente de traumatismo craniano com fractura do rochedo temporal direito é encaminhado ao Serviço de Urgência(SU) por crise convulsiva não presenciada e quadro confusional. Ao exame objectivo, o doente apresentava-se prostrado, desorientado e com períodos de agitação, sem sinais de depleção de volume. O restante exame físico e neurológico não revelou alterações relevantes. À admissão destacavam-se, analiticamente, níveis séricos de sódio de 120 mEq/l e, imagiologicamente, a tomografia crânio-encefálica revelou-se sobreponível a estudos anteriores. Outros exames complementares realizados, no SU, não mostraram alterações. Durante o internamento a abordagem diagnóstica permitiu o diagnóstico de SIADH, como complicação de uma fractura da base do crânio. Apesar da instituição de restrição hídrica e dieta hipersalina, o doente manteve o quadro confusional e hiponatrémia refractários. Face à etiologia da SIADH iniciou-se terapêutica com fenitoína conseguindo-se uma melhoria mantida do quadro clínico e atingimento de níveis normonatrémicos. - source_sentence: A hiponatremia é a alteração eletrolítica mais frequente na prática clínica hospitalar. Sendo muitas vezes devido a perdas ou iatrogenia farmacológica. A insuficiência primária da supra-renal é uma causa rara deste distúrbio e está muitas vezes relacionada com destruição auto-imune da glândula. Esta cursa, na maioria das vezes, com sintomas inespecíficos e de desenvolvimento insidioso. Por vezes os doentes não apresentam a tríade clássica de hipotensão, hiponatrémia e hiperpigmentação o que torna difícil o seu diagnóstico precoce. O diagnóstico correto e atempado permite oferecer ao doente um tratamento simples e crucial para a sua sobrevivência sentences: - Homem de 67 anos, internado no Serviço de Medicina por Pneumonia. Antecedentes de miocardiopatia dilatada, fibrilhação auricular, hipertensão arterial, alcoolismo crónico (80g/dia) e caquexia. No decurso do internamento desenvolveu um quadro de diminuição da força muscular de forma progressiva com tetraparésia grave, atrofia muscular de predomínio esquerdo, espasticidade e hiperreflexia dos membros inferiores. Analiticamente apresentava elevação dos parâmetros de colestase hepática, ionograma seriado com hiponatrémia discreta 132-135mEq/L, potássio, cloro, cálcio, fósforo e magnésio normais. Sem défice de vitamina B12 ou ácido fólico. Tomografia Computorizada Crânio-Encefálica sem alterações de natureza vascular ou expansiva. Punção lombar com análise do líquido cefalorraquídeo sem alterações. Serologias virais e bacterianas negativas. Eletromiograma sem lesão nervosa periférica. Foi então pedida Ressonância Magnética Crânio-Encefálica e Cervical para exclusão de lesão desmielinizante cervical alta ou do tronco cerebral, tendo-se verificado hipersinal em T2 a nível da ponte característica da Mielinólise Central Pontina. - A Doença de Still é uma doença auto-inflamatória rara, sendo um dos diagnósticos diferenciais de febre de origem indeterminada. A apresentação típica inclui febre, rash evanescente e artrite acompanhada de valores desproporcionalmente elevados de ferritina. Apresentamos um caso de diagnóstico particularmente difícil numa mulher de 44 anos com envolvimento cutâneo, articular e pulmonar, na qual os valores de ferritina estavam apenas moderadamente elevados, mas a sua forma glicosilada significativamente reduzida. No decorrer da investigação foi identificada doença celíaca concomitante, com défice de ferro profundo, que apontou para uma possível alteração no mecanismo de produção de ferritina na presença de um estímulo inflamatório. Este caso sublinha a relevância da ferritina glicosilada como marcador mais fiável na investigação de casos onde a Doença de Still é suspeita. - Resumo Os linfomas que envolvem o colo do útero são muito raros. Relatamos o caso de uma mulher de 71 anos apresentando sintomas de diverticulite, com vários achados imagiológicos incidentais sugerindo uma doença linfoproliferativa e uma grande massa no colo do útero. A biópsia profunda do colo do útero diagnosticou um linfoma difuso de grandes células B envolvendo o colo do útero, provável transformação de um linfoma de zona marginal. A doente está atualmente em tratamento com rituximab, ciclofosfamida, doxorrubicina, vincristina e predisolona e metotrexato em altas doses para profilaxia de envolvimento do sistema nervoso central. Para diagnosticar com precisão um linfoma não-Hodgkin do colo do útero, a equipa médica deve estar atenta a esta hipótese diagnóstica clínica, a fim de proporcionar as melhores condições para a investigação, como biópsia profunda do colo do útero e estudos histológicos e imuno-histoquímicos da amostra. - source_sentence: A Arterite de Takayasu é uma doença inflamatória crónica dos grandes vasos, que envolve a artéria aorta e os seus ramos principais, e afecta predominantemente mulheres com idade inferior a 40 anos. A clínica é inespecífica e varia com o local anatómico envolvido, pelo que é necessário um elevado índice de suspeição clínica para que seja realizado o seu diagnóstico. O acidente vascular cerebral tem uma prevalência de cerca de 10 a 20% no decurso da doença e influencia de forma negativa o seu prognóstico. O acidente vascular cerebral hemorrágico como manifestação da Arterite de Takayasu é raro. Apresentamos o caso de uma doente jovem que se apresenta com uma hemorragia cerebral, cuja investigação etiológica culminou no diagnóstico de Arterite de Takayasu. A importância desde caso clínico prende-se com a escassez de casos publicados na literatura, uma vez que retrata uma patologia rara, com uma apresentação inicial invulgar. sentences: - Resumo Aproximadamente 5%-10% dos acidentes vasculares cerebrais (AVC) criptogénicos têm uma neoplasia subjacente. A parésia do nervo abducente em doentes com neoplasia encontra-se geralmente relacionada com compressão tumoral, hipertensão intracraniana ou metastização. Os autores reportam um caso de um doente com 65 anoscom AVC multiterritório que se apresentou com uma parésia do sexto nervo unilateral e isolada cuja etiologia foi extensamente estudada. Admitiu-se o diagnóstico final de síndrome paraneoplásico, que foi a apresentação inicial de um carcinoma gástrico oculto provavelmente relacionado com a hipercoagulabilidade associada à malignidade. Este caso enfatiza a importância de considerar um estudoadicional em casos selecionados de AVC criptogénico ou parésia do abducente. - As encefalites virais são entidades raras, mas que, pelas suas implicações diagnósticas, terapêuticas e prognósticas, não podem deixar de ser consideradas em qualquer doente que se apresente com sintomas psiquiátricos, alteração do estado de consciência, convulsões ou coma sem causa evidente. O presente caso diz respeito a um doente com sintomas psicóticos e um estado confusional com duas semanas de evolução. À admissão, apresentava-se subfebril, com flutuação do nível de consciência. O estudo analítico e TAC crânio-encefálica não mostraram alterações de relevo, tendo realizado punção lombar cujo exame citoquímico e exame bacteriológico se mostravam igualmente inalterados. Por suspeita mantida de encefalite viral e não sendo possível excluir causa herpética, foi iniciada terapêutica empírica com aciclovir. A PCR do vírus Epstein-Barr (EBV) no líquor foi positiva, permitindo assim o diagnóstico raro de uma encefalite a EBV num doente idoso e imunocompetente, tendo-se verificado resolução completa do quadro clínico. - A abordagem da febre é sem dúvida uma das artes da Medicina. A doença de Still no adulto (DSA) é uma patologia inflamatória sistémica de baixa incidência e etiologia desconhecida. Pela inespecificidade clínica e laboratorial, é um diagnóstico de exclusão. Os autores descrevem o caso de homem de 32 anos com a tríade de febre, oligoartralgia e exantema cutâneo evanescente, cuja marcha diagnóstica minuciosa culminou no diagnóstico de DSA, apresentando hiperferritinémia sérica dez vezes superior ao normal. Relembra-se a importância da DSA como causa de síndrome febril arrastado, cujo diagnóstico, atendendo à ausência de marcadores patognomónicos, pode passar despercebido. - source_sentence: A síndrome da Secreção Inapropriada da Hormona Antidiurética (SIADH) é uma das causas de hiponatremia euvolémica. A hidrocefalia de pressão normal (HPN) pode ser uma causa neurológica para SIADH e o seu diagnóstico e correção são fundamentais para a normalização dos níveis de sódio. Relatamos o caso de uma mulher de 67 anos, com hiponatremia crónica, marcha de base alargada, urgência miccional e sensação de perda de memória, sem evidência de sobrecarga hídrica ou desidratação. O estudo complementar revelou osmolaridade sérica normal, osmolaridade urinária elevada, sódio urinário elevado. Após restrição hídrica, houve melhoria da hiponatremia. Imagiologicamente documentou-se presença de membrana aqueductal causando obstrução ao fluxo do líquido cefalorraquidiano. O diagnóstico de SIADH em contexto de HPN foi presumido. Após correção cirúrgica houve resolução completa da hiponatremia. Hoje sabe-se que existem formas secundárias raras de HPN, sendo estas causadas por estenose ou obstrução aqueductal, como relatado no caso apresentado. sentences: - Define-se lesão hepática induzida por um fármaco como uma lesão hepática que, após exclusão de outras potenciais etiologias, se assume como secundária a um fármaco, produto de ervanária ou xenobiótico, e que resulta em alterações da enzimologia hepática ou disfunção hepática clinicamente evidente. Os autores descrevem o caso de um homem de 87 anos internado para estudo etiológico de uma lesão hepática de padrão colestático. Após estudo alargado, foi colocada como hipótese etiológica mais provável uma iatrogenia farmacológica, posteriormente corroborada por biópsia hepática, sendo a Espironolactona assumida como o agente causal mais provável, atendendo ao quadro clínico e aos achados histopatológicos. Estão descritos alguns casos de lesão hepática induzida pela Espironolactona, quando usada em doses de 50 e 100 mg/dia. Os autores relatam um caso raro que ocorreu num doente que se encontrava sob Espironolactona na dose de 25 mg/dia. - Resumo A ceftriaxona, um dos antibióticos mais frequentementeutilizados na prática clínica, tem como efeito adverso, raro epotencialmente grave, a agranulocitose. Reportamos um caso de uma mulher de 85 anos em esquema terapêutico prolongado com ceftriaxona para endocardite por Streptococcus bovis, que desenvolve agranulocitose ao 25º dia de antibioterapia, com nadir de contagem absoluta de neutrófilos de 0/uL. Outras causas potenciais foram excluídas. A terapêutica antibiótica foi alterada para amoxicilina/ácido clavulânico e realizou ciclo de fator estimulador de colónias de granulócitos, com resolução da neutropenia após 3 dias. Queremos destacar este efeito adverso raro com o uso prolongado da ceftriaxona,salientando a necessidade de monitorização regulardas contagens de leucócitos. O tratamento desta condiçãopassa pela suspensão do agente causal e o uso transitório de factor estimulador de colónias de granulócitos até resolução da neutropenia. - A síndrome de secreção inapropriada da hormona anti-diurética (SIADH) é uma causa frequente de hiponatrémia, sendo um diagnóstico de exclusão. Quando associada à infeção pelo vírus varicella zoster é mais frequente na sua forma disseminada. Os autores descrevem o caso de uma mulher de 83 anos, com quadro com 7 dias de evolução de síndrome confusional flutuante, desorientação temporo-espacial e tonturas. Medicada com brivudina, aciclovir tópico e ofloxacina gotas para tratamento de herpes zóster com atingimento dos ramos oftálmico e mandibular do nervo trigémeo. À admissão, com hiponatrémia de 128mmol/L. Excluídas outras causas, assumiu-se o diagnóstico de SIADH associado a infeção por herpes. O caso descrito sugere uma relação causal entre a reactivação por VZV e a SIADH sintomática. A favor, temos a resolução completa da hiponatrémia a acompanhar a melhoria clínica. O presente caso torna-se importante por se tratar de uma entidade rara, pouco conhecida e subdiagnosticada, mas com efeitos clínicos importantes. model-index: - name: SentenceTransformer based on neuralmind/bert-base-portuguese-cased results: - task: type: semantic-similarity name: Semantic Similarity dataset: name: Unknown type: unknown metrics: - type: pearson_cosine value: 0.6875234896564695 name: Pearson Cosine - type: spearman_cosine value: 0.6855542083017127 name: Spearman Cosine - type: pearson_manhattan value: 0.6475708379913874 name: Pearson Manhattan - type: spearman_manhattan value: 0.6531511386527615 name: Spearman Manhattan - type: pearson_euclidean value: 0.6497495499262932 name: Pearson Euclidean - type: spearman_euclidean value: 0.6545105043371998 name: Spearman Euclidean - type: pearson_dot value: 0.6790094551137061 name: Pearson Dot - type: spearman_dot value: 0.6847710424836908 name: Spearman Dot - type: pearson_max value: 0.6875234896564695 name: Pearson Max - type: spearman_max value: 0.6855542083017127 name: Spearman Max - task: type: semantic-similarity name: Semantic Similarity dataset: name: sts test type: sts-test metrics: - type: pearson_cosine value: 0.6907882980083289 name: Pearson Cosine - type: spearman_cosine value: 0.6894513736041122 name: Spearman Cosine - type: pearson_manhattan value: 0.6492706768297136 name: Pearson Manhattan - type: spearman_manhattan value: 0.6546984498682096 name: Spearman Manhattan - type: pearson_euclidean value: 0.651318699091458 name: Pearson Euclidean - type: spearman_euclidean value: 0.6544106471290732 name: Spearman Euclidean - type: pearson_dot value: 0.6817298567055641 name: Pearson Dot - type: spearman_dot value: 0.6881836625714188 name: Spearman Dot - type: pearson_max value: 0.6907882980083289 name: Pearson Max - type: spearman_max value: 0.6894513736041122 name: Spearman Max - type: pearson_cosine value: 0.6907882980083289 name: Pearson Cosine - type: spearman_cosine value: 0.6894513736041122 name: Spearman Cosine - type: pearson_manhattan value: 0.6492706768297136 name: Pearson Manhattan - type: spearman_manhattan value: 0.6546984498682096 name: Spearman Manhattan - type: pearson_euclidean value: 0.651318699091458 name: Pearson Euclidean - type: spearman_euclidean value: 0.6544106471290732 name: Spearman Euclidean - type: pearson_dot value: 0.6817298567055641 name: Pearson Dot - type: spearman_dot value: 0.6881836625714188 name: Spearman Dot - type: pearson_max value: 0.6907882980083289 name: Pearson Max - type: spearman_max value: 0.6894513736041122 name: Spearman Max --- # SentenceTransformer based on neuralmind/bert-base-portuguese-cased This is a [sentence-transformers](https://www.SBERT.net) model finetuned from [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased). It maps sentences & paragraphs to a 768-dimensional dense vector space and can be used for semantic textual similarity, semantic search, paraphrase mining, text classification, clustering, and more. ## Model Details ### Model Description - **Model Type:** Sentence Transformer - **Base model:** [neuralmind/bert-base-portuguese-cased](https://huggingface.co/neuralmind/bert-base-portuguese-cased) <!-- at revision 94d69c95f98f7d5b2a8700c420230ae10def0baa --> - **Maximum Sequence Length:** 512 tokens - **Output Dimensionality:** 768 tokens - **Similarity Function:** Cosine Similarity <!-- - **Training Dataset:** Unknown --> <!-- - **Language:** Unknown --> <!-- - **License:** Unknown --> ### Full Model Architecture ``` SentenceTransformer( (0): Transformer({'max_seq_length': 512, 'do_lower_case': False}) with Transformer model: BertModel (1): Pooling({'word_embedding_dimension': 768, 'pooling_mode_cls_token': False, 'pooling_mode_mean_tokens': True, 'pooling_mode_max_tokens': False, 'pooling_mode_mean_sqrt_len_tokens': False, 'pooling_mode_weightedmean_tokens': False, 'pooling_mode_lasttoken': False, 'include_prompt': True}) ) ``` ## Usage ### Direct Usage (Sentence Transformers) First install the Sentence Transformers library: ```bash pip install -U sentence-transformers ``` Then you can load this model and run inference. ```python from sentence_transformers import SentenceTransformer # Download from the 🤗 Hub model = SentenceTransformer("lfcc/medlink-bi-encoder") # Run inference sentences = [ 'A síndrome da Secreção Inapropriada da Hormona Antidiurética (SIADH) é uma das causas de hiponatremia euvolémica. A hidrocefalia de pressão normal (HPN) pode ser uma causa neurológica para SIADH e o seu diagnóstico e correção são fundamentais para a normalização dos níveis de sódio. Relatamos o caso de uma mulher de 67 anos, com hiponatremia crónica, marcha de base alargada, urgência miccional e sensação de perda de memória, sem evidência de sobrecarga hídrica ou desidratação. O estudo complementar revelou osmolaridade sérica normal, osmolaridade urinária elevada, sódio urinário elevado. Após restrição hídrica, houve melhoria da hiponatremia. Imagiologicamente documentou-se presença de membrana aqueductal causando obstrução ao fluxo do líquido cefalorraquidiano. O diagnóstico de SIADH em contexto de HPN foi presumido. Após correção cirúrgica houve resolução completa da hiponatremia. Hoje sabe-se que existem formas secundárias raras de HPN, sendo estas causadas por estenose ou obstrução aqueductal, como relatado no caso apresentado.', 'A síndrome de secreção inapropriada da hormona anti-diurética (SIADH) é uma causa frequente de hiponatrémia, sendo um diagnóstico de exclusão. Quando associada à infeção pelo vírus varicella zoster é mais frequente na sua forma disseminada. Os autores descrevem o caso de uma mulher de 83 anos, com quadro com 7 dias de evolução de síndrome confusional flutuante, desorientação temporo-espacial e tonturas. Medicada com brivudina, aciclovir tópico e ofloxacina gotas para tratamento de herpes zóster com atingimento dos ramos oftálmico e mandibular do nervo trigémeo. À admissão, com hiponatrémia de 128mmol/L. Excluídas outras causas, assumiu-se o diagnóstico de SIADH associado a infeção por herpes. O caso descrito sugere uma relação causal entre a reactivação por VZV e a SIADH sintomática. A favor, temos a resolução completa da hiponatrémia a acompanhar a melhoria clínica. O presente caso torna-se importante por se tratar de uma entidade rara, pouco conhecida e subdiagnosticada, mas com efeitos clínicos importantes.', 'Resumo A ceftriaxona, um dos antibióticos mais frequentementeutilizados na prática clínica, tem como efeito adverso, raro epotencialmente grave, a agranulocitose. Reportamos um caso de uma mulher de 85 anos em esquema terapêutico prolongado com ceftriaxona para endocardite por Streptococcus bovis, que desenvolve agranulocitose ao 25º dia de antibioterapia, com nadir de contagem absoluta de neutrófilos de 0/uL. Outras causas potenciais foram excluídas. A terapêutica antibiótica foi alterada para amoxicilina/ácido clavulânico e realizou ciclo de fator estimulador de colónias de granulócitos, com resolução da neutropenia após 3 dias. Queremos destacar este efeito adverso raro com o uso prolongado da ceftriaxona,salientando a necessidade de monitorização regulardas contagens de leucócitos. O tratamento desta condiçãopassa pela suspensão do agente causal e o uso transitório de factor estimulador de colónias de granulócitos até resolução da neutropenia.', ] embeddings = model.encode(sentences) print(embeddings.shape) # [3, 768] # Get the similarity scores for the embeddings similarities = model.similarity(embeddings, embeddings) print(similarities.shape) # [3, 3] ``` <!-- ### Direct Usage (Transformers) <details><summary>Click to see the direct usage in Transformers</summary> </details> --> <!-- ### Downstream Usage (Sentence Transformers) You can finetune this model on your own dataset. <details><summary>Click to expand</summary> </details> --> <!-- ### Out-of-Scope Use *List how the model may foreseeably be misused and address what users ought not to do with the model.* --> ## Evaluation ### Metrics #### Semantic Similarity * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:--------------------|:-----------| | pearson_cosine | 0.6875 | | **spearman_cosine** | **0.6856** | | pearson_manhattan | 0.6476 | | spearman_manhattan | 0.6532 | | pearson_euclidean | 0.6497 | | spearman_euclidean | 0.6545 | | pearson_dot | 0.679 | | spearman_dot | 0.6848 | | pearson_max | 0.6875 | | spearman_max | 0.6856 | #### Semantic Similarity * Dataset: `sts-test` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:--------------------|:-----------| | pearson_cosine | 0.6908 | | **spearman_cosine** | **0.6895** | | pearson_manhattan | 0.6493 | | spearman_manhattan | 0.6547 | | pearson_euclidean | 0.6513 | | spearman_euclidean | 0.6544 | | pearson_dot | 0.6817 | | spearman_dot | 0.6882 | | pearson_max | 0.6908 | | spearman_max | 0.6895 | #### Semantic Similarity * Dataset: `sts-test` * Evaluated with [<code>EmbeddingSimilarityEvaluator</code>](https://sbert.net/docs/package_reference/sentence_transformer/evaluation.html#sentence_transformers.evaluation.EmbeddingSimilarityEvaluator) | Metric | Value | |:--------------------|:-----------| | pearson_cosine | 0.6908 | | **spearman_cosine** | **0.6895** | | pearson_manhattan | 0.6493 | | spearman_manhattan | 0.6547 | | pearson_euclidean | 0.6513 | | spearman_euclidean | 0.6544 | | pearson_dot | 0.6817 | | spearman_dot | 0.6882 | | pearson_max | 0.6908 | | spearman_max | 0.6895 | <!-- ## Bias, Risks and Limitations *What are the known or foreseeable issues stemming from this model? You could also flag here known failure cases or weaknesses of the model.* --> <!-- ### Recommendations *What are recommendations with respect to the foreseeable issues? For example, filtering explicit content.* --> ## Training Details ### Training Dataset #### spmi_dataset * Size: 1,540 training samples * Columns: <code>abstract1</code>, <code>abstract2</code>, and <code>score</code> * Approximate statistics based on the first 1000 samples: | | abstract1 | abstract2 | score | |:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:---------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 8 tokens</li><li>mean: 189.72 tokens</li><li>max: 512 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 211.52 tokens</li><li>max: 512 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.33</li><li>max: 1.0</li></ul> | * Samples: | abstract1 | abstract2 | score | |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------| | <code>A dissecção aórtica aguda é uma emergência cardiovascular potencialmente fatal. É necessário um elevado grau de suspeição clínica para o seu diagnóstico, pois apresenta sintomas inespecíficos e mimetiza outras patologias. A maioria dos doentes tem dor torácica severa, com irradiação posterior e início abrupto, porém alguns são assintomáticos ou têm apresentações atípicas (cerca de 10%), que levam a diagnósticos tardios e a um pior prognóstico. A taxa de mortalidade é elevada, sendo superior a 50% se não for tratada. Apresenta-se o caso de um homem de 43 anos, admitido no serviço de urgência por dispneia de início súbito, sem dor torácica, uma apresentação rara de dissecção aórtica, com o objetivo de alertar para os fatores de risco e alterações do exame físico e nos exames auxiliares de diagnóstico da avaliação inicial que podem levantar a suspeita clínica e o diagnóstico precoce.</code> | <code>Resumo O baço possui funções imunológicas e hematológicas importantes. A esplenectomia está indicada na esferocitose hereditária, doença em que os eritrócitos são destruídos no baço por defeitos estruturais. Doentes esplenectomizados apresentam risco aumentado de infeção e de infeção fulminante pós-esplenectomia, que se caracteriza por um quadro inicial de febre, mialgias, cefaleia e vómitos. As bactérias Capnocytophaga colonizam a mucosa oral, podendo causar infeções oportunistas em doentes esplenectomizados. Os autores identificam o caso de um doente de 38 anos, esplenectomizado, que recorreu ao Serviço de Urgência por febre, vómitos e mialgias. As hemoculturas mostraram o crescimento de Capnocytophaga spp. Apesar das medidas instituídas, o doente evoluiu rapidamente para choque séptico, culminando na sua morte. Os autores pretendem alertar para esta condição rara associada a alta mortalidade, com o objetivo de aumentar a sobrevivência destes doentes, através da identificação e intervenção imediatas.</code> | <code>0.0</code> | | <code>A complexidade das doenças auto-imunes, caracterizadas por uma marcada heterogeneidade fenotípica e imunológica, tem o seu paradigma na sobreposição de perfis de auto-anticorpos e de manifestações clínicas de diferentes doenças num mesmo indivíduo. Os autores descrevem o caso de uma doente que, ao longo de doze anos de evolução de doença, cumpre critérios de classificação de quatro doenças auto-imunes diferentes, nomeadamente, Lúpus Eritematoso Sistémico, Esclerose Sistémica, Síndrome de Sjogrën e Colangite Biliar Primária. A sobreposição de perfis de auto-anticorpos, bem como de distintos fenótipos de diferentes doenças representam um desafio no diagnóstico, seguimento e tratamento destes doentes.</code> | <code>A esclerose sistémica (ES) é uma doença autoimune que pode afetar qualquer faixa etária, sendo pouco frequente após os 65 anos. O início da doença em idade geriátrica apresenta um fenótipo com diferentes aspetos quanto às manifestações clinicas, envolvimento orgânico e prognóstico. Descrevemos um caso clínico invulgar de uma doente com diagnóstico de ES estabelecido aos 87 anos, apresentando como manifestação inicial poliartralgias inflamatórias das mãos. O diagnóstico nesta faixa etária é particularmente desafiador, tendo sido estabelecido clinicamente e complementado com o resultado da capilaroscopia, apesar da doente apresentar auto-anticorpos específicos negativos. A doente realizou estudo do envolvimento visceral baseado em sintomas. Apesar da literatura descrever maior envolvimento orgânico na ES de inicio em idade avançada, a nossa doente não demonstrou marcado compromisso orgânico. A multidisciplinaridade envolvendo a Medicina Interna, a Reumatologia e a Fisiatria permitiram elaborar um plano terapêutico adequado, apresentando evolução clínica e funcional favorável.</code> | <code>0.65</code> | | <code>As enteropatias perdedoras de proteínas (EPP) caracterizam-se por uma perda proteica excessiva a nível do trato digestivo, podendo condicionar hipoproteinémia, edemas, bem como uma predisposição aumentada a infeções.1 As causas mais frequentes são a obstrução linfática, patologias gástricas, intestinais ou cardíacas. Neste caso clínico é descrito uma etiologia incomum de EPP, a pericardite constritiva (PC).2 Trata-se de um homem de 54 anos, com múltiplos internamentos por edemas generalizados e erisipelas de repetição, cuja investigação etiológica revelou uma EPP, causada por PC.</code> | <code>Resumo A enteropatia perdedora de proteínas (EPP) caracteriza-se pela presença de edema generalizado e hipoalbuminemiagrave, secundários à perda proteica através do trato gastrointestinal. Os autores reportam um caso de enteropatia perdedora de proteínas secundária a lupus eritematoso sistémico (LES), como a manifestação inicial desta doença. A doente relatava um quadro pautado por 4 meses de diarreia aquosa, não sanguinolenta, (com um máximo de 10 dejeções diárias), e perda ponderal significativa. Posteriormente desenvolveu marcado edema periférico e rash cutâneo malar e maculopapular ao nível do tórax e membros. Analiticamente apresentava anemia, hipoalbuminemia grave, hipocaliémia e hipomagnesémia. No decurso da investigação foram excluídas proteinúria eoutras causas de hipoalbuminemia. Após resultados como a pesquisa de anticorpos anti-nucleares e anti-ribonucleoproteinas positiva foi assumido o diagnóstico de EPP secundária ao LES. A doente foi tratada com pulsos de Metilprednisolona 1000 mg/dia durante 3 dias, seguido de prednisolona 1 mg/kg/dia, com boa resposta clínica. Após 20 dias, foi adicionada Azatioprina e iniciado o desmame de corticoides. O presente caso clínico destaca uma EPP como forma deapresentação do LES, cujo diagnóstico pode passar despercebido, tendo em conta a sua raridade, e acarretar um aumento da morbilidade e mortalidade.</code> | <code>0.65</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Evaluation Dataset #### spmi_dataset * Size: 386 evaluation samples * Columns: <code>abstract1</code>, <code>abstract2</code>, and <code>score</code> * Approximate statistics based on the first 386 samples: | | abstract1 | abstract2 | score | |:--------|:------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------|:----------------------------------------------------------------| | type | string | string | float | | details | <ul><li>min: 9 tokens</li><li>mean: 193.97 tokens</li><li>max: 512 tokens</li></ul> | <ul><li>min: 8 tokens</li><li>mean: 203.56 tokens</li><li>max: 512 tokens</li></ul> | <ul><li>min: 0.0</li><li>mean: 0.33</li><li>max: 0.95</li></ul> | * Samples: | abstract1 | abstract2 | score | |:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------| | <code>Resumo A síndrome de lise tumoral é a uma emergência médica potencialmente fatal decorrente da lise celular maciça que ocorre em neoplasias malignas com grande carga tumoral. Ocorre sobretudo em neoplasias hematológicas sob quimioterapia, sendo menos frequente em tumores sólidos, os quais apresentam geralmente um menor índice proliferativo. A síndrome de lise tumoral no carcinoma hepatocelular tratado com sorafenib, um inibidor oral multicinase, é extremamente rara, descrevendo-se apenas nove casos na literatura. Tanto quanto sabemos, não existem casos descritos na população europeia. Apresentamos um caso de síndrome de lise tumoral num doente com carcinoma hepatocelular multifocal sob tratamento com sorafenib e infeção SARS-CoV-2.</code> | <code>Resumo A púrpura trombocitopénica imune (PTI) é uma condição autoimune na qual anticorpos patogénicos se ligam às plaquetas, acelerando sua eliminação da circulação. Este caso é sobre uma mulher de 65 anos com fadiga, mialgias e púrpura cutânea localizada nas pernas, com início de sinais e sintomas 2 dias após vacinação com vacina SARS-CoV-2 da Moderna®. Um mês antes, a contagem de plaquetas era de 157x10^9/L. À admissão, a contagem de plaquetas era de 5x10^9/L, com trombocitopénia grave confirmada em esfregaço de sangue periférico. Recebeu prednisolona 1 mg/kg/dia. Após 7 dias, a contagem de plaquetas era de 45x10^9/L com resolução dos sintomas. Estudo de autoimunidade, hormonas tiroideias, coagulação, eletroforese de proteínas e testes sorológicos foram normais. Considerou-se provável relação causa-efeito da vacinação e aparecimento da clínica. O INFARMED considerou provável a relação com a vacina Moderna®, tratando-se do primeiro caso em Portugal.</code> | <code>0.85</code> | | <code>A cetoacidose diabética euglicemica (CADEu) é uma complicação potencialmente fatal da diabetes mellitus (DM), associada à medicação com inibidores do cotransportador sódio-glucose 2 (iSGLT2). Pode ser difícil de identificar devido à ausência de hiperglicemia. Homem com DM tipo 2, 71 anos, medicado com empagliflozina recorreu ao serviço de urgência por mal-estar geral e anúria. Estava prostrado, confuso, hipotenso, com respiração de Kussmaul. Analiticamente apresentou leucocitose, PCR de 202mg/dl, acidose metabólica grave com aumento do hiato aniónico, glicémia de 141 mg/dL e leucocitúria. Estes resultados poderiam ter sido interpretados no contexto infecioso urinário grave. Após consideração dos antecedentes medicamentosos e achados clínicos foi verificada uma cetonemia indoseavelmente alta que estabeleceu o diagnóstico de CADEu e permitiu início do tratamento dirigido com resolução da clínica. Os doentes medicados com iSGLT2 com doença aguda devem beneficiar de gasimetria arterial e medição da cetonemia de forma a garantir um diagnóstico precoce e tratamento atempado.</code> | <code>A sarcoidose é uma doença inflamatória sistémica caracterizada pela formação de granulomas não caseosos. Múltiplas podem ser as suas formas de manifestação clínica, sendo a síndroma de Heerfort-Waldenstrom uma forma de manifestação rara, encontrada em apenas 0.3% dos casos e caracterizada pelo aparecimento de parésia facial, tumefação parotídea, uveíte anterior e febre. Por vezes cursa com formas incompletas como no caso que descrevemos de uma mulher de 50 anos, sem antecedentes patológicos de relevo, que se apresenta com parésia e hipostesia da hemiface esquerda e disfagia para sólidos, tendo sido diagnosticada uma parésia facial periférica esquerda com exclusão imagiológica de evento neurológico vascular agudo. Foi medicada com deflazacorte e brivudina com melhoria da sintomatologia. Após término da corticoterapia retoma o quadro de disfagia, agora para sólidos e líquidos, parésia e hipostesia da hemiface direita com documentação ao exame objectivo de parésia facial periférica direita e hipertrofia parotídea bilateral. Analiticamente apresentava elevação sérica da enzima de conversão da angiotensina de 72.5U/L. A ressonância magnética cerebral demonstrava pequenas áreas de hipersinal em T2 na substância branca subcortical frontal, parietal direita, temporal esquerda e na transição caloso septal à esquerda, com líquor sem alterações citoquímicas. A TC toracoabdominopélvica mostrava múltiplas adenomegalias mediastínicas e hilares. A biópsia de um gânglio retro-auricular com retalhos de glândula salivar (parótida) evidenciava um processo inflamatório granulomatoso sem necrose caseosa, com imunofenotipagem sem alterações. O lavado broncoalveolar revelou linfocitose intensa e relação CD4/CD8 elevada (9.4). Foi iniciada corticoterapia e fisioterapia com melhoria da parésia facial e da clínica orofaríngea, sem recorrência. Relatamos assim um caso de neurosarcoidose sob a forma incompleta, pela ausência de atingimento ocular, de síndroma de Heefort-Waldenstrom.</code> | <code>0.0</code> | | <code>A hipertrofia ventricular esquerda no adulto, achado frequente e muitas vezes fortuito, pode dever-se a condições de sobrecarga de pressão ventricular, hipertrofia dos miócitos de causa genética ou acumulação patológica de substâncias intra ou extra-celulares. As implicações terapêuticas e prognósticas das várias etiologias são muito distintas pelo que se torna essencial a busca do diagnóstico específico. Apresenta-se um caso de hipertrofia ventricular esquerda assintomática que após uma marcha diagnóstica sistemática se revelou como miocardiopatia hipertrófica sarcomérica de início tardio. Por vários dos exames complementares de diagnóstico terem sido equívocos ou inconclusivos, é um caso demonstrativo de que, por vezes, só a abordagem completa e exaustiva permite chegar ao diagnóstico definitivo. Partindo de um exemplo real e tendo por base as recomendações da Sociedade Europeia de Cardiologia, esquematizou-se uma abordagem diagnóstica faseada desta patologia.</code> | <code>A síndrome Mounier-Kuhn é uma doença rara, caracterizada pela dilatação marcada da traqueia e brônquios, sem etiologia completamente esclarecida. Descrevemos o caso clínico de um homem de 48 anos de idade, com história prévia de infeções respiratórias de repetição de longa data, admitido no serviço de urgência com clínica compatível com nova infeção respiratória e elevação de parâmetros inflamatórios. A tomografia computorizada revelou achados sugestivos da síndrome em questão. O diagnóstico da Síndrome Mounier-Kuhn passa frequentemente despercebido sendo muitas vezes confundido com outras entidades. O seu diagnóstico é com frequência acidental e os exames radiológicos assumem um papel indispensável. O tratamento desta entidade é essencialmente de suporte.</code> | <code>0.0</code> | * Loss: [<code>CosineSimilarityLoss</code>](https://sbert.net/docs/package_reference/sentence_transformer/losses.html#cosinesimilarityloss) with these parameters: ```json { "loss_fct": "torch.nn.modules.loss.MSELoss" } ``` ### Training Hyperparameters #### Non-Default Hyperparameters - `eval_strategy`: steps - `num_train_epochs`: 10 - `warmup_ratio`: 0.1 - `fp16`: True - `load_best_model_at_end`: True #### All Hyperparameters <details><summary>Click to expand</summary> - `overwrite_output_dir`: False - `do_predict`: False - `eval_strategy`: steps - `prediction_loss_only`: True - `per_device_train_batch_size`: 8 - `per_device_eval_batch_size`: 8 - `per_gpu_train_batch_size`: None - `per_gpu_eval_batch_size`: None - `gradient_accumulation_steps`: 1 - `eval_accumulation_steps`: None - `learning_rate`: 5e-05 - `weight_decay`: 0.0 - `adam_beta1`: 0.9 - `adam_beta2`: 0.999 - `adam_epsilon`: 1e-08 - `max_grad_norm`: 1.0 - `num_train_epochs`: 10 - `max_steps`: -1 - `lr_scheduler_type`: linear - `lr_scheduler_kwargs`: {} - `warmup_ratio`: 0.1 - `warmup_steps`: 0 - `log_level`: passive - `log_level_replica`: warning - `log_on_each_node`: True - `logging_nan_inf_filter`: True - `save_safetensors`: True - `save_on_each_node`: False - `save_only_model`: False - `restore_callback_states_from_checkpoint`: False - `no_cuda`: False - `use_cpu`: False - `use_mps_device`: False - `seed`: 42 - `data_seed`: None - `jit_mode_eval`: False - `use_ipex`: False - `bf16`: False - `fp16`: True - `fp16_opt_level`: O1 - `half_precision_backend`: auto - `bf16_full_eval`: False - `fp16_full_eval`: False - `tf32`: None - `local_rank`: 0 - `ddp_backend`: None - `tpu_num_cores`: None - `tpu_metrics_debug`: False - `debug`: [] - `dataloader_drop_last`: False - `dataloader_num_workers`: 0 - `dataloader_prefetch_factor`: None - `past_index`: -1 - `disable_tqdm`: False - `remove_unused_columns`: True - `label_names`: None - `load_best_model_at_end`: True - `ignore_data_skip`: False - `fsdp`: [] - `fsdp_min_num_params`: 0 - `fsdp_config`: {'min_num_params': 0, 'xla': False, 'xla_fsdp_v2': False, 'xla_fsdp_grad_ckpt': False} - `fsdp_transformer_layer_cls_to_wrap`: None - `accelerator_config`: {'split_batches': False, 'dispatch_batches': None, 'even_batches': True, 'use_seedable_sampler': True, 'non_blocking': False, 'gradient_accumulation_kwargs': None} - `deepspeed`: None - `label_smoothing_factor`: 0.0 - `optim`: adamw_torch - `optim_args`: None - `adafactor`: False - `group_by_length`: False - `length_column_name`: length - `ddp_find_unused_parameters`: None - `ddp_bucket_cap_mb`: None - `ddp_broadcast_buffers`: False - `dataloader_pin_memory`: True - `dataloader_persistent_workers`: False - `skip_memory_metrics`: True - `use_legacy_prediction_loop`: False - `push_to_hub`: False - `resume_from_checkpoint`: None - `hub_model_id`: None - `hub_strategy`: every_save - `hub_private_repo`: False - `hub_always_push`: False - `gradient_checkpointing`: False - `gradient_checkpointing_kwargs`: None - `include_inputs_for_metrics`: False - `eval_do_concat_batches`: True - `fp16_backend`: auto - `push_to_hub_model_id`: None - `push_to_hub_organization`: None - `mp_parameters`: - `auto_find_batch_size`: False - `full_determinism`: False - `torchdynamo`: None - `ray_scope`: last - `ddp_timeout`: 1800 - `torch_compile`: False - `torch_compile_backend`: None - `torch_compile_mode`: None - `dispatch_batches`: None - `split_batches`: None - `include_tokens_per_second`: False - `include_num_input_tokens_seen`: False - `neftune_noise_alpha`: None - `optim_target_modules`: None - `batch_eval_metrics`: False - `batch_sampler`: batch_sampler - `multi_dataset_batch_sampler`: proportional </details> ### Training Logs | Epoch | Step | Training Loss | Validation Loss | spearman_cosine | sts-test_spearman_cosine | |:----------:|:--------:|:-------------:|:---------------:|:---------------:|:------------------------:| | 0.5181 | 100 | 0.1677 | 0.1109 | 0.3495 | - | | 1.0363 | 200 | 0.0986 | 0.1124 | 0.3727 | - | | 1.5544 | 300 | 0.0742 | 0.1074 | 0.4131 | - | | 2.0725 | 400 | 0.068 | 0.0850 | 0.5223 | - | | 2.5907 | 500 | 0.0411 | 0.0816 | 0.5471 | - | | 3.1088 | 600 | 0.035 | 0.0766 | 0.5903 | - | | 3.6269 | 700 | 0.0197 | 0.0675 | 0.6320 | - | | 4.1451 | 800 | 0.0214 | 0.0697 | 0.6253 | - | | 4.6632 | 900 | 0.0117 | 0.0668 | 0.6467 | - | | 5.1813 | 1000 | 0.0101 | 0.0655 | 0.6491 | - | | 5.6995 | 1100 | 0.0066 | 0.0604 | 0.6800 | - | | 6.2176 | 1200 | 0.0057 | 0.0605 | 0.6776 | - | | 6.7358 | 1300 | 0.0037 | 0.0606 | 0.6765 | - | | 7.2539 | 1400 | 0.003 | 0.0603 | 0.6760 | - | | 7.7720 | 1500 | 0.0027 | 0.0587 | 0.6872 | - | | 8.2902 | 1600 | 0.0019 | 0.0588 | 0.6862 | - | | **8.8083** | **1700** | **0.0018** | **0.0584** | **0.6895** | **-** | | 9.3264 | 1800 | 0.0016 | 0.0587 | 0.6871 | - | | 9.8446 | 1900 | 0.0014 | 0.0589 | 0.6856 | - | | 10.0 | 1930 | - | - | - | 0.6895 | * The bold row denotes the saved checkpoint. <!-- ## Glossary *Clearly define terms in order to be accessible across audiences.* --> <!-- ## Model Card Authors *Lists the people who create the model card, providing recognition and accountability for the detailed work that goes into its construction.* --> <!-- ## Model Card Contact *Provides a way for people who have updates to the Model Card, suggestions, or questions, to contact the Model Card authors.* -->
[ "PCR" ]
BSC-LT/salamandra-2b-base-fp8
BSC-LT
text-generation
[ "transformers", "safetensors", "llama", "text-generation", "bg", "ca", "code", "cs", "cy", "da", "de", "el", "en", "es", "et", "eu", "fi", "fr", "ga", "gl", "hr", "hu", "it", "lt", "lv", "mt", "nl", "nn", "oc", "pl", "pt", "ro", "ru", "sh", "sk", "sl", "sr", "sv", "uk", "base_model:BSC-LT/salamandra-2b", "base_model:finetune:BSC-LT/salamandra-2b", "license:apache-2.0", "autotrain_compatible", "text-generation-inference", "endpoints_compatible", "region:eu" ]
2024-10-30T10:10:39Z
2024-11-07T18:48:47+00:00
22
0
--- base_model: BSC-LT/salamandra-2b language: - bg - ca - code - cs - cy - da - de - el - en - es - et - eu - fi - fr - ga - gl - hr - hu - it - lt - lv - mt - nl - nn - \no - oc - pl - pt - ro - ru - sh - sk - sl - sr - sv - uk library_name: transformers license: apache-2.0 pipeline_tag: text-generation --- ![image/png](https://cdn-uploads.huggingface.co/production/uploads/633b489acbdbadd99c0b75ef/rremJczEA0mULGHHKol6S.png) # Salamandra-2b-fp8 Model Card This model is the fp8-quantized version of [Salamandra-2b](https://huggingface.co/BSC-LT/salamandra-2b). The model weights are quantized from FP16 to FP8 (8-bit weights) using the FP8 quantization algorithm from [NeuralMagic](https://neuralmagic.com/blog/vllm-brings-fp8-inference-to-the-open-source-community/). Inferencing with this model can be done using [VLLM](https://docs.vllm.ai/en/stable/models/engine_args.html). Salamandra is a highly multilingual model pre-trained from scratch that comes in three different sizes — 2B, 7B and 40B parameters — with their respective base and instruction-tuned variants, promoted and financed by the Government of Catalonia through the [Aina Project](https://projecteaina.cat/) and the _Ministerio para la Transformación Digital y de la Función Pública_ - Funded by EU – NextGenerationEU within the framework of [ILENIA Project](https://proyectoilenia.es/) with reference 2022/TL22/00215337. This model card corresponds to the fp8-quantized version of Salamandra-2b. The entire Salamandra family is released under a permissive [Apache 2.0 license]((https://www.apache.org/licenses/LICENSE-2.0)). ## How to Use The following example code works under ``Python 3.9.16``, ``vllm==0.6.3.post1``, ``torch==2.4.0`` and ``torchvision==0.19.0``, though it should run on any current version of the libraries. This is an example of how to create a text completion using the model: ``` from vllm import LLM, SamplingParams model_name = "BSC-LT/salamandra-2b-base-fp8" llm = LLM(model=model_name) outputs = llm.generate("El mercat del barri ", sampling_params=SamplingParams( temperature=0.5, max_tokens=200) ) print(outputs[0].outputs[0].text) ``` ### Author International Business Machines (IBM). ### Copyright International Business Machines (IBM). ### Contact For further information, please send an email to <[email protected]>. ### Acknowledgements We appreciate the collaboration with IBM in this work. Specifically, the IBM team created fp8-quantized version of the Salamandra-2b model released here. ### Disclaimer Be aware that the model may contain biases or other unintended distortions. When third parties deploy systems or provide services based on this model, or use the model themselves, they bear the responsibility for mitigating any associated risks and ensuring compliance with applicable regulations, including those governing the use of Artificial Intelligence. Barcelona Supercomputing Center and International Business Machines shall not be held liable for any outcomes resulting from third-party use. ### License [Apache License, Version 2.0](https://www.apache.org/licenses/LICENSE-2.0)
[ "BEAR" ]
glif-loradex-trainer/insectagon_Keane_eyes
glif-loradex-trainer
text-to-image
[ "diffusers", "text-to-image", "template:sd-lora", "base_model:black-forest-labs/FLUX.1-dev", "base_model:finetune:black-forest-labs/FLUX.1-dev", "license:other", "region:us", "flux", "lora", "base_model:adapter:black-forest-labs/FLUX.1-dev" ]
2024-11-01T10:32:32Z
2024-11-01T10:33:44+00:00
22
0
--- base_model: black-forest-labs/FLUX.1-dev license: other license_name: flux-1-dev-non-commercial-license license_link: https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md tags: - diffusers - text-to-image - template:sd-lora - base_model:black-forest-labs/FLUX.1-dev - base_model:finetune:black-forest-labs/FLUX.1-dev - license:other - region:us - flux - lora widget: - output: url: samples/1730456976857__000003000_0.jpg text: A cartoon Jedi with green lightsaber [K3ane] - output: url: samples/1730457000473__000003000_1.jpg text: A lion roaring [K3ane] - output: url: samples/1730457024097__000003000_2.jpg text: AN ACTION SCENE [K3ane] - output: url: samples/1730457048123__000003000_3.jpg text: A woman holding a cartoon CAT [K3ane] - output: url: samples/1730457071770__000003000_4.jpg text: THE JOKER [K3ane] - output: url: samples/1730457095526__000003000_5.jpg text: BATMAN cartoon IN GOTHAM [K3ane] - output: url: samples/1730457119895__000003000_6.jpg text: a blue Teddy bear Kaiju vs Godzilla [K3ane] trigger: K3ane instance_prompt: K3ane --- # Keane_eyes Model trained with [AI Toolkit by Ostris](https://github.com/ostris/ai-toolkit) under the [Glif Loradex program](https://huggingface.co/glif-loradex-trainer) by [Glif](https://glif.app) user `insectagon`. <Gallery /> ## Trigger words You should use `K3ane` to trigger the image generation. ## Download model Weights for this model are available in Safetensors format. [Download](/glif-loradex-trainer/insectagon_Keane_eyes/tree/main) them in the Files & versions tab. ## License This model is licensed under the [flux-1-dev-non-commercial-license](https://huggingface.co/black-forest-labs/FLUX.1-dev/blob/main/LICENSE.md).
[ "BEAR" ]
LocalDoc/TEmA-small
LocalDoc
sentence-similarity
[ "pytorch", "bert", "labse", "sentence-similarity", "az", "base_model:sentence-transformers/LaBSE", "base_model:finetune:sentence-transformers/LaBSE", "doi:10.57967/hf/3429", "license:cc-by-4.0", "region:us" ]
2024-11-03T10:03:28Z
2024-11-03T17:26:45+00:00
22
0
--- base_model: - sentence-transformers/LaBSE language: - az license: cc-by-4.0 metrics: - pearsonr pipeline_tag: sentence-similarity tags: - labse widget: - source_sentence: Bu xoşbəxt bir insandır sentences: - Bu xoşbəxt bir itdir - Bu çox xoşbəxt bir insandır - Bu gün günəşli bir gündür example_title: Sentence Similarity --- # TEmA-small This model is a fine-tuned version of the [LaBSE](https://huggingface.co/sentence-transformers/LaBSE), which is specialized for sentence similarity tasks in Azerbaijan texts. It maps sentences and paragraphs to a 768-dimensional dense vector space, useful for tasks like clustering, semantic search, and more. ## Benchmark Results | STSBenchmark | biosses-sts | sickr-sts | sts12-sts | sts13-sts | sts15-sts | sts16-sts | Average Pearson | Model | |--------------|-------------|-----------|-----------|-----------|-----------|-----------|-----------------|------------------------------------| | 0.8253 | 0.7859 | 0.7924 | 0.8444 | 0.7490 | 0.8141 | 0.7600 | 0.7959 | TEmA-small | | 0.7872 | 0.8303 | 0.7801 | 0.7978 | 0.6963 | 0.8052 | 0.7794 | 0.7823 | Cohere/embed-multilingual-v3.0 | | 0.7927 | 0.6672 | 0.7758 | 0.8122 | 0.7312 | 0.7831 | 0.7416 | 0.7577 | BAAI/bge-m3 | | 0.7572 | 0.8139 | 0.7328 | 0.7646 | 0.6318 | 0.7542 | 0.7092 | 0.7377 | intfloat/multilingual-e5-large-instruct | | 0.7252 | 0.7801 | 0.7250 | 0.6725 | 0.7446 | 0.7301 | 0.7454 | 0.7318 | Cohere/embed-multilingual-v2.0 | | 0.7485 | 0.7714 | 0.7271 | 0.7170 | 0.6496 | 0.7570 | 0.7255 | 0.7280 | intfloat/multilingual-e5-large | | 0.7245 | 0.8237 | 0.6839 | 0.6570 | 0.7125 | 0.7612 | 0.7386 | 0.7288 | OpenAI/text-embedding-3-large | | 0.7363 | 0.8148 | 0.7067 | 0.7050 | 0.6535 | 0.7514 | 0.7070 | 0.7250 | sentence-transformers/LaBSE | | 0.7376 | 0.7917 | 0.7190 | 0.7441 | 0.6286 | 0.7461 | 0.7026 | 0.7242 | intfloat/multilingual-e5-small | | 0.7192 | 0.8198 | 0.7160 | 0.7338 | 0.5815 | 0.7318 | 0.6973 | 0.7142 | Cohere/embed-multilingual-light-v3.0 | | 0.6960 | 0.8185 | 0.6950 | 0.6752 | 0.5899 | 0.7186 | 0.6790 | 0.6960 | intfloat/multilingual-e5-base | | 0.5830 | 0.2486 | 0.5921 | 0.5593 | 0.5559 | 0.5404 | 0.5289 | 0.5155 | antoinelouis/colbert-xm | [STS-Benchmark](https://github.com/LocalDoc-Azerbaijan/STS-Benchmark) ## Accuracy Results - **Cosine Distance:** 96.63 - **Manhattan Distance:** 96.52 - **Euclidean Distance:** 96.57 ## Usage ```python from transformers import AutoTokenizer, AutoModel import torch # Mean Pooling - Take attention mask into account for correct averaging def mean_pooling(model_output, attention_mask): token_embeddings = model_output[0] #First element of model_output contains all token embeddings input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float() return torch.sum(token_embeddings * input_mask_expanded, 1) / torch.clamp(input_mask_expanded.sum(1), min=1e-9) # Function to normalize embeddings def normalize_embeddings(embeddings): return embeddings / embeddings.norm(dim=1, keepdim=True) # Sentences we want embeddings for sentences = [ "Bu xoşbəxt bir insandır", "Bu çox xoşbəxt bir insandır", "Bu gün günəşli bir gündür" ] # Load model from HuggingFace Hub tokenizer = AutoTokenizer.from_pretrained('LocalDoc/TEmA-small') model = AutoModel.from_pretrained('LocalDoc/TEmA-small') # Tokenize sentences encoded_input = tokenizer(sentences, padding=True, truncation=True, max_length=128, return_tensors='pt') # Compute token embeddings with torch.no_grad(): model_output = model(**encoded_input) # Perform pooling sentence_embeddings = mean_pooling(model_output, encoded_input['attention_mask']) # Normalize embeddings sentence_embeddings = normalize_embeddings(sentence_embeddings) # Calculate cosine similarities cosine_similarities = torch.nn.functional.cosine_similarity( sentence_embeddings[0].unsqueeze(0), sentence_embeddings[1:], dim=1 ) print("Cosine Similarities:") for i, score in enumerate(cosine_similarities): print(f"Sentence 1 <-> Sentence {i+2}: {score:.4f}") ```
[ "BIOSSES" ]
JunxiongWang/Llama3.1-Mamba-8B-dpo
JunxiongWang
null
[ "pytorch", "llama", "arxiv:2408.15237", "license:apache-2.0", "region:us" ]
2024-11-17T04:04:45Z
2024-11-17T04:20:27+00:00
22
0
--- license: apache-2.0 --- Zero-shot results when using the [Llama-3.1-70B-Instruct](https://huggingface.co/meta-llama/Llama-3.1-70B-Instruct) as the teacher model, and the [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Llama-3.2-3B-Instruct) as the initialized model | Task | Llama-3.1-8B-Instruct | Llama3.1-Mamba-8B-distill | Llama3.1-Mamba-8B-dpo | Llama3.1-Mamba2-8B-distill | Llama3.1-Mamba2-8B-dpo | |---------------------|-----------------------|--------------------------|-----------------------|---------------------------|-----------------------| | arc_challenge | 0.552 | 0.5384 | 0.5657 | 0.5265 | 0.5973 | | arc_easy | 0.8178 | 0.8224 | 0.8401 | 0.822 | 0.8481 | | hellaswag | 0.7921 | 0.7591 | 0.7736 | 0.7536 | 0.7969 | | mmlu (0 shot) | 0.6812 | 0.6213 | 0.636 | 0.6101 | 0.5974 | | openbookqa | 0.432 | 0.428 | 0.442 | 0.416 | 0.44 | | piqa | 0.8079 | 0.7933 | 0.8041 | 0.7889 | 0.8003 | | pubmedqa | 0.752 | 0.72 | 0.744 | 0.726 | 0.746 | | race | 0.4478 | 0.4211 | 0.4344 | 0.4211 | 0.4612 | | winogrande | 0.7388 | 0.7277 | 0.738 | 0.7174 | 0.7411 | | truthful | 0.4267 | 0.4002 | 0.4607 | 0.4031 | 0.5022 | ``` @article{junxiongdaniele2024mambainllama, title = {The Mamba in the Llama: Distilling and Accelerating Hybrid Models}, author = {Junxiong Wang and Daniele Paliotta and Avner May and Alexander M. Rush and Tri Dao}, journal = {arXiv preprint arXiv:2408.15237}, year = {2024} } ```
[ "PUBMEDQA" ]