Datasets:
mteb
/

Modalities:
Tabular
Text
Formats:
json
Libraries:
Datasets
Dask
Muennighoff commited on
Commit
ed930a0
·
verified ·
1 Parent(s): f5e9204

Scheduled Commit

Browse files
data/clustering_individual-2cea24b1-39e9-480a-ba22-c617ea05c1fe.jsonl CHANGED
@@ -8,3 +8,5 @@
8
  {"tstamp": 1724098030.1061, "task_type": "clustering", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1724098030.0615, "finish": 1724098030.1061, "ip": "", "conv_id": "d4b577432dcc49fab9c32b4ec95b98ca", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": ["Quiero renovar mi celular"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
9
  {"tstamp": 1724098200.2929, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724098200.2038, "finish": 1724098200.2929, "ip": "", "conv_id": "09d67d7034814900a1539f51561a0422", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["Quiero renovar mi celular", "Quiero cambiarme de plan", "Haré portabilidad", "Estoy pensando en cambiarme de operador", "¿Quiero cambiarme de celular", "Mañana me cambiaré de plan a uno más bajo"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
10
  {"tstamp": 1724098200.2929, "task_type": "clustering", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724098200.2038, "finish": 1724098200.2929, "ip": "", "conv_id": "0feb6b08f3a4471fad07e16412c19b7c", "model_name": "GritLM/GritLM-7B", "prompt": ["Quiero renovar mi celular", "Quiero cambiarme de plan", "Haré portabilidad", "Estoy pensando en cambiarme de operador", "¿Quiero cambiarme de celular", "Mañana me cambiaré de plan a uno más bajo"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
 
 
 
8
  {"tstamp": 1724098030.1061, "task_type": "clustering", "type": "chat", "model": "Salesforce/SFR-Embedding-2_R", "gen_params": {}, "start": 1724098030.0615, "finish": 1724098030.1061, "ip": "", "conv_id": "d4b577432dcc49fab9c32b4ec95b98ca", "model_name": "Salesforce/SFR-Embedding-2_R", "prompt": ["Quiero renovar mi celular"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
9
  {"tstamp": 1724098200.2929, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724098200.2038, "finish": 1724098200.2929, "ip": "", "conv_id": "09d67d7034814900a1539f51561a0422", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["Quiero renovar mi celular", "Quiero cambiarme de plan", "Haré portabilidad", "Estoy pensando en cambiarme de operador", "¿Quiero cambiarme de celular", "Mañana me cambiaré de plan a uno más bajo"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
10
  {"tstamp": 1724098200.2929, "task_type": "clustering", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724098200.2038, "finish": 1724098200.2929, "ip": "", "conv_id": "0feb6b08f3a4471fad07e16412c19b7c", "model_name": "GritLM/GritLM-7B", "prompt": ["Quiero renovar mi celular", "Quiero cambiarme de plan", "Haré portabilidad", "Estoy pensando en cambiarme de operador", "¿Quiero cambiarme de celular", "Mañana me cambiaré de plan a uno más bajo"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
11
+ {"tstamp": 1724157770.1539, "task_type": "clustering", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724157770.0657, "finish": 1724157770.1539, "ip": "", "conv_id": "91234af9f55b4c8293e496e5956a90fa", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": ["IBM Cloud", "Google Cloud", "Azure", "lasagna", "fettuccine", "spaghetti", "birch", "oak", "cedar", "elm"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
12
+ {"tstamp": 1724157770.1539, "task_type": "clustering", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1724157770.0657, "finish": 1724157770.1539, "ip": "", "conv_id": "0ac55229d9f44717b88c9a4a09329fb4", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": ["IBM Cloud", "Google Cloud", "Azure", "lasagna", "fettuccine", "spaghetti", "birch", "oak", "cedar", "elm"], "ncluster": 3, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
data/retrieval_battle-2cea24b1-39e9-480a-ba22-c617ea05c1fe.jsonl CHANGED
@@ -55,3 +55,4 @@
55
  {"tstamp": 1724147323.7292, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "b1622a49d84d41de81be59458d957680", "0_model_name": "BM25", "0_prompt": "How to combine GPT-2 and BERT for better question generation and answering?", "0_output": [["How to combine GPT-2 and BERT for better question generation and answering?", "Title: Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and BERT Worlds\n\nAbstract: Automatic question generation aims at the generation of questions from a context, with the corresponding answers being sub-spans of the given passage. Whereas, most of the methods mostly rely on heuristic rules to generate questions, more recently also neural network approaches have been proposed. In this work, we propose a variant of the self-attention Transformer network architectures model to generate meaningful and diverse questions. To this end, we propose an easy to use model consisting of the conjunction of the Transformer decoder GPT-2 model with Transformer encoder BERT for the downstream task for question answering. The model is trained in an end-to-end fashion, where the language model is trained to produce a question-answer-aware input representation that facilitates to generate an answer focused question. Our result of neural question generation from text on the SQuAD 1.1 dataset suggests that our method can produce semantically correct and diverse questions. Additionally, we assessed the performance of our proposed method for the downstream task of question answering. The analysis shows that our proposed generation & answering collaboration framework relatively improves both tasks and is particularly powerful in the semi-supervised setup. The results further suggest a robust and comparably lean pipeline facilitating question generation in the small-data regime."]], "0_corpus": "arxiv", "1_conv_id": "a1b61891a5fc41888a6df192888fa1c6", "1_model_name": "text-embedding-3-large", "1_prompt": "How to combine GPT-2 and BERT for better question generation and answering?", "1_output": [["How to combine GPT-2 and BERT for better question generation and answering?", "Title: Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and BERT Worlds\n\nAbstract: Automatic question generation aims at the generation of questions from a context, with the corresponding answers being sub-spans of the given passage. Whereas, most of the methods mostly rely on heuristic rules to generate questions, more recently also neural network approaches have been proposed. In this work, we propose a variant of the self-attention Transformer network architectures model to generate meaningful and diverse questions. To this end, we propose an easy to use model consisting of the conjunction of the Transformer decoder GPT-2 model with Transformer encoder BERT for the downstream task for question answering. The model is trained in an end-to-end fashion, where the language model is trained to produce a question-answer-aware input representation that facilitates to generate an answer focused question. Our result of neural question generation from text on the SQuAD 1.1 dataset suggests that our method can produce semantically correct and diverse questions. Additionally, we assessed the performance of our proposed method for the downstream task of question answering. The analysis shows that our proposed generation & answering collaboration framework relatively improves both tasks and is particularly powerful in the semi-supervised setup. The results further suggest a robust and comparably lean pipeline facilitating question generation in the small-data regime."]], "1_corpus": "arxiv"}
56
  {"tstamp": 1724147373.7737, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "ee7d7009cb09443086721d839839d30b", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": "Why did the machines in The Matrix keep humans around?", "0_output": [["Why did the machines in The Matrix keep humans around?", "Scifi Stackexchange\n\nQ: Why use full brained humans as batteries? \nPossible Duplicate:\nIs the basic premise of humans as a power source in The Matrix reasonable?\nWhy Did the Machines Even Bother With a Matrix? \n\nThe machines in The Matrix series use humans as batteries. That's OK. But why did the machines not alter their livestock such that the matrix itself would be unnecessary? \nConsider the recently proposed headless chicken farm. Something similar for the human batteries would seem to be an ideal solution for logical machines. No longer would they need to spend resources maintaining a virtual reality for their batteries. The energy, physical hardware and programs running the matrix could then be put to who knows what other uses.\n\nA: The explanation for the use of human brains would seem to be the fact that our complex thoughts lead to higher level of brain activity, thus resulting in exponentially more synapses firing (i.e. more electrical activity to harvest).\nThat makes human brains far more efficient than the brains of animals, even with the overhead of maintaining the matrix. Plus, that movie would have really sucked... dumb cows and chickens revolt against the machines! ;)\n"]], "0_corpus": "stackexchange", "1_conv_id": "b1e3a8689e72427ea133d86987f91a26", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "Why did the machines in The Matrix keep humans around?", "1_output": [["Why did the machines in The Matrix keep humans around?", "Scifi Stackexchange\n\nQ: Do the Machines in \"The Matrix\" Adhere to the Three Laws of Robotics? In the \"real world\" of the Matrix, do the machines follow Asimov's Three Rules? Were they even programmed with them?\nEDIT: I realize that killing people goes against the 1st law, but by keeping people in the Matrix (which the machines view as a symbiosis) rather than simply killing off these \"annoying\" creatures, I wonder if the killings can be seen as \"preserving the greater good\" or something along those lines; and thus justify violating the laws.\n\nA: \nA robot may not injure a human being or, through inaction, allow a\n human being to come to harm.\nA robot must obey the orders given to it by human beings, except where\n such orders would conflict with the First Law.\nA robot must protect its own existence as long as such protection does\n not conflict with the First or Second Laws.\n\nNo, no, and no.\n"]], "1_corpus": "stackexchange"}
57
  {"tstamp": 1724150191.1245, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "14abff6b4bf4468abd5725f738f67892", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "0_output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "0_corpus": "arxiv", "1_conv_id": "d0bfba6aabaf459ab1bfdc30a214e502", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "1_output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "1_corpus": "arxiv"}
 
 
55
  {"tstamp": 1724147323.7292, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "b1622a49d84d41de81be59458d957680", "0_model_name": "BM25", "0_prompt": "How to combine GPT-2 and BERT for better question generation and answering?", "0_output": [["How to combine GPT-2 and BERT for better question generation and answering?", "Title: Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and BERT Worlds\n\nAbstract: Automatic question generation aims at the generation of questions from a context, with the corresponding answers being sub-spans of the given passage. Whereas, most of the methods mostly rely on heuristic rules to generate questions, more recently also neural network approaches have been proposed. In this work, we propose a variant of the self-attention Transformer network architectures model to generate meaningful and diverse questions. To this end, we propose an easy to use model consisting of the conjunction of the Transformer decoder GPT-2 model with Transformer encoder BERT for the downstream task for question answering. The model is trained in an end-to-end fashion, where the language model is trained to produce a question-answer-aware input representation that facilitates to generate an answer focused question. Our result of neural question generation from text on the SQuAD 1.1 dataset suggests that our method can produce semantically correct and diverse questions. Additionally, we assessed the performance of our proposed method for the downstream task of question answering. The analysis shows that our proposed generation & answering collaboration framework relatively improves both tasks and is particularly powerful in the semi-supervised setup. The results further suggest a robust and comparably lean pipeline facilitating question generation in the small-data regime."]], "0_corpus": "arxiv", "1_conv_id": "a1b61891a5fc41888a6df192888fa1c6", "1_model_name": "text-embedding-3-large", "1_prompt": "How to combine GPT-2 and BERT for better question generation and answering?", "1_output": [["How to combine GPT-2 and BERT for better question generation and answering?", "Title: Learning to Answer by Learning to Ask: Getting the Best of GPT-2 and BERT Worlds\n\nAbstract: Automatic question generation aims at the generation of questions from a context, with the corresponding answers being sub-spans of the given passage. Whereas, most of the methods mostly rely on heuristic rules to generate questions, more recently also neural network approaches have been proposed. In this work, we propose a variant of the self-attention Transformer network architectures model to generate meaningful and diverse questions. To this end, we propose an easy to use model consisting of the conjunction of the Transformer decoder GPT-2 model with Transformer encoder BERT for the downstream task for question answering. The model is trained in an end-to-end fashion, where the language model is trained to produce a question-answer-aware input representation that facilitates to generate an answer focused question. Our result of neural question generation from text on the SQuAD 1.1 dataset suggests that our method can produce semantically correct and diverse questions. Additionally, we assessed the performance of our proposed method for the downstream task of question answering. The analysis shows that our proposed generation & answering collaboration framework relatively improves both tasks and is particularly powerful in the semi-supervised setup. The results further suggest a robust and comparably lean pipeline facilitating question generation in the small-data regime."]], "1_corpus": "arxiv"}
56
  {"tstamp": 1724147373.7737, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "ee7d7009cb09443086721d839839d30b", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": "Why did the machines in The Matrix keep humans around?", "0_output": [["Why did the machines in The Matrix keep humans around?", "Scifi Stackexchange\n\nQ: Why use full brained humans as batteries? \nPossible Duplicate:\nIs the basic premise of humans as a power source in The Matrix reasonable?\nWhy Did the Machines Even Bother With a Matrix? \n\nThe machines in The Matrix series use humans as batteries. That's OK. But why did the machines not alter their livestock such that the matrix itself would be unnecessary? \nConsider the recently proposed headless chicken farm. Something similar for the human batteries would seem to be an ideal solution for logical machines. No longer would they need to spend resources maintaining a virtual reality for their batteries. The energy, physical hardware and programs running the matrix could then be put to who knows what other uses.\n\nA: The explanation for the use of human brains would seem to be the fact that our complex thoughts lead to higher level of brain activity, thus resulting in exponentially more synapses firing (i.e. more electrical activity to harvest).\nThat makes human brains far more efficient than the brains of animals, even with the overhead of maintaining the matrix. Plus, that movie would have really sucked... dumb cows and chickens revolt against the machines! ;)\n"]], "0_corpus": "stackexchange", "1_conv_id": "b1e3a8689e72427ea133d86987f91a26", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "Why did the machines in The Matrix keep humans around?", "1_output": [["Why did the machines in The Matrix keep humans around?", "Scifi Stackexchange\n\nQ: Do the Machines in \"The Matrix\" Adhere to the Three Laws of Robotics? In the \"real world\" of the Matrix, do the machines follow Asimov's Three Rules? Were they even programmed with them?\nEDIT: I realize that killing people goes against the 1st law, but by keeping people in the Matrix (which the machines view as a symbiosis) rather than simply killing off these \"annoying\" creatures, I wonder if the killings can be seen as \"preserving the greater good\" or something along those lines; and thus justify violating the laws.\n\nA: \nA robot may not injure a human being or, through inaction, allow a\n human being to come to harm.\nA robot must obey the orders given to it by human beings, except where\n such orders would conflict with the First Law.\nA robot must protect its own existence as long as such protection does\n not conflict with the First or Second Laws.\n\nNo, no, and no.\n"]], "1_corpus": "stackexchange"}
57
  {"tstamp": 1724150191.1245, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "14abff6b4bf4468abd5725f738f67892", "0_model_name": "GritLM/GritLM-7B", "0_prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "0_output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "0_corpus": "arxiv", "1_conv_id": "d0bfba6aabaf459ab1bfdc30a214e502", "1_model_name": "mixedbread-ai/mxbai-embed-large-v1", "1_prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "1_output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "1_corpus": "arxiv"}
58
+ {"tstamp": 1724157669.0947, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "36d51d16aabc4cdd8003937dfd753431", "0_model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "0_prompt": "when did the new day win the tag team titles", "0_output": [["when did the new day win the tag team titles", "The New Day (professional wrestling)\n\nOne of the most popular and decorated teams in WWE history, The New Day holds several records, including the most reigns as SmackDown Tag Team Champions at seven, and the longest-reigning Raw Tag Team Champions at 483 days. As a team, they also hold the record for most WWE-branded tag team championship reigns at 12 (seven SmackDown, four Raw, and one NXT)—the only team WWE recognizes with more is The Dudley Boyz at 18, but this includes their eight Extreme Championship Wrestling and one World Championship Wrestling tag team title reigns. While performing as a trio, The New Day defend their tag team titles under the Freebird rule, with all three members being recognized as champions.\nThe stable was prominently formed by Xavier Woods with Big E and Kofi Kingston on the July 21, 2014, episode of Raw, but made their debut as The New Day on the November 28, 2014, episode of SmackDown. In April 2015 at Extreme Rules, they won their first WWE Tag Team Championship. Their second reign, which began at that year's SummerSlam in August, became the longest in the title's history, the first to last more than a year, and the last with the title being the sole tag team championship on WWE's main roster, as it was renamed Raw Tag Team Championship in September 2016 following the creation of the SmackDown Tag Team Championship. In December 2016, they set the record for the longest male tag team championship reign in WWE history at 483 days (surpassing Demolition's 28-year-old record of 478 days with WWE's original World Tag Team Championship) and at that time, the longest reigning champions for any WWE championship over the prior two decades; since then, both accomplishments have been surpassed."]], "0_corpus": "wikipedia", "1_conv_id": "512fc1c8c9184c08a90c8d9a0e62a90c", "1_model_name": "intfloat/multilingual-e5-large-instruct", "1_prompt": "when did the new day win the tag team titles", "1_output": [["when did the new day win the tag team titles", "The New Day (professional wrestling)\n\nOne of the most popular and decorated teams in WWE history, The New Day holds several records, including the most reigns as SmackDown Tag Team Champions at seven, and the longest-reigning Raw Tag Team Champions at 483 days. As a team, they also hold the record for most WWE-branded tag team championship reigns at 12 (seven SmackDown, four Raw, and one NXT)—the only team WWE recognizes with more is The Dudley Boyz at 18, but this includes their eight Extreme Championship Wrestling and one World Championship Wrestling tag team title reigns. While performing as a trio, The New Day defend their tag team titles under the Freebird rule, with all three members being recognized as champions.\nThe stable was prominently formed by Xavier Woods with Big E and Kofi Kingston on the July 21, 2014, episode of Raw, but made their debut as The New Day on the November 28, 2014, episode of SmackDown. In April 2015 at Extreme Rules, they won their first WWE Tag Team Championship. Their second reign, which began at that year's SummerSlam in August, became the longest in the title's history, the first to last more than a year, and the last with the title being the sole tag team championship on WWE's main roster, as it was renamed Raw Tag Team Championship in September 2016 following the creation of the SmackDown Tag Team Championship. In December 2016, they set the record for the longest male tag team championship reign in WWE history at 483 days (surpassing Demolition's 28-year-old record of 478 days with WWE's original World Tag Team Championship) and at that time, the longest reigning champions for any WWE championship over the prior two decades; since then, both accomplishments have been surpassed."]], "1_corpus": "wikipedia"}
data/retrieval_individual-2cea24b1-39e9-480a-ba22-c617ea05c1fe.jsonl CHANGED
@@ -184,3 +184,7 @@
184
  {"tstamp": 1724150176.8905, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724150174.6886, "finish": 1724150176.8905, "ip": "", "conv_id": "14abff6b4bf4468abd5725f738f67892", "model_name": "GritLM/GritLM-7B", "prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "corpus": "arxiv"}
185
  {"tstamp": 1724150176.8905, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1724150174.6886, "finish": 1724150176.8905, "ip": "", "conv_id": "d0bfba6aabaf459ab1bfdc30a214e502", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "corpus": "arxiv"}
186
  {"tstamp": 1724153001.4495, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724153001.1784, "finish": 1724153001.4495, "ip": "", "conv_id": "ca0445fa533a465ba9069be9150646a2", "model_name": "GritLM/GritLM-7B", "prompt": "when did the olympics start", "output": [["when did the olympics start", "Ancient Olympic Games\n\nThe ancient Olympic Games (, ta Olympia) were a series of athletic competitions among representatives of city-states and were one of the Panhellenic Games of Ancient Greece. They were held at the Panhellenic religious sanctuary of Olympia, in honor of Zeus, and the Greeks gave them a mythological origin. The originating Olympic Games are traditionally dated to 776 BC. The games were held every four years, or Olympiad, which became a unit of time in historical chronologies. These Olympiads were referred to based on the winner of their stadion sprint, e.g., \"the third year of the eighteenth Olympiad, when Ladas of Argos won the stadion\". They continued to be celebrated when Greece came under Roman rule in the 2nd century BC. Their last recorded celebration was in AD 393, under the emperor Theodosius I, but archaeological evidence indicates that some games were still held after this date. The games likely came to an end under Theodosius II, possibly in connection with a fire that burned down the temple of the Olympian Zeus during his reign.\nDuring the celebration of the games, the Olympic truce (ekecheiría) was announced so that athletes and religious pilgrims could travel from their cities to the games in safety. The prizes for the victors were olive leaf wreaths or crowns. The games became a political tool used by city-states to assert dominance over their rival city states. Politicians would announce political alliances at the games, and in times of war, priests would offer sacrifices to the gods for victory. The games were also used to help spread Hellenistic culture throughout the Mediterranean. The Olympics also featured religious celebrations. The statue of Zeus at Olympia was counted as one of the seven wonders of the ancient world. Sculptors and poets would congregate each Olympiad to display their works of art to would-be patrons."]], "corpus": "wikipedia"}
 
 
 
 
 
184
  {"tstamp": 1724150176.8905, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724150174.6886, "finish": 1724150176.8905, "ip": "", "conv_id": "14abff6b4bf4468abd5725f738f67892", "model_name": "GritLM/GritLM-7B", "prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "corpus": "arxiv"}
185
  {"tstamp": 1724150176.8905, "task_type": "retrieval", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1724150174.6886, "finish": 1724150176.8905, "ip": "", "conv_id": "d0bfba6aabaf459ab1bfdc30a214e502", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": "Research on structured dropout as a way to improve transformer models during both training and inference.", "output": [["Research on structured dropout as a way to improve transformer models during both training and inference.", "Title: Reducing Transformer Depth on Demand with Structured Dropout\n\nAbstract: Overparameterized transformer networks have obtained state of the art results in various natural language processing tasks, such as machine translation, language modeling, and question answering. These models contain hundreds of millions of parameters, necessitating a large amount of computation and making them prone to overfitting. In this work, we explore LayerDrop, a form of structured dropout, which has a regularization effect during training and allows for efficient pruning at inference time. In particular, we show that it is possible to select sub-networks of any depth from one large network without having to finetune them and with limited impact on performance. We demonstrate the effectiveness of our approach by improving the state of the art on machine translation, language modeling, summarization, question answering, and language understanding benchmarks. Moreover, we show that our approach leads to small BERT-like models of higher quality compared to training from scratch or using distillation."]], "corpus": "arxiv"}
186
  {"tstamp": 1724153001.4495, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724153001.1784, "finish": 1724153001.4495, "ip": "", "conv_id": "ca0445fa533a465ba9069be9150646a2", "model_name": "GritLM/GritLM-7B", "prompt": "when did the olympics start", "output": [["when did the olympics start", "Ancient Olympic Games\n\nThe ancient Olympic Games (, ta Olympia) were a series of athletic competitions among representatives of city-states and were one of the Panhellenic Games of Ancient Greece. They were held at the Panhellenic religious sanctuary of Olympia, in honor of Zeus, and the Greeks gave them a mythological origin. The originating Olympic Games are traditionally dated to 776 BC. The games were held every four years, or Olympiad, which became a unit of time in historical chronologies. These Olympiads were referred to based on the winner of their stadion sprint, e.g., \"the third year of the eighteenth Olympiad, when Ladas of Argos won the stadion\". They continued to be celebrated when Greece came under Roman rule in the 2nd century BC. Their last recorded celebration was in AD 393, under the emperor Theodosius I, but archaeological evidence indicates that some games were still held after this date. The games likely came to an end under Theodosius II, possibly in connection with a fire that burned down the temple of the Olympian Zeus during his reign.\nDuring the celebration of the games, the Olympic truce (ekecheiría) was announced so that athletes and religious pilgrims could travel from their cities to the games in safety. The prizes for the victors were olive leaf wreaths or crowns. The games became a political tool used by city-states to assert dominance over their rival city states. Politicians would announce political alliances at the games, and in times of war, priests would offer sacrifices to the gods for victory. The games were also used to help spread Hellenistic culture throughout the Mediterranean. The Olympics also featured religious celebrations. The statue of Zeus at Olympia was counted as one of the seven wonders of the ancient world. Sculptors and poets would congregate each Olympiad to display their works of art to would-be patrons."]], "corpus": "wikipedia"}
187
+ {"tstamp": 1724157634.8274, "task_type": "retrieval", "type": "chat", "model": "Alibaba-NLP/gte-Qwen2-7B-instruct", "gen_params": {}, "start": 1724157634.5325, "finish": 1724157634.8274, "ip": "", "conv_id": "36d51d16aabc4cdd8003937dfd753431", "model_name": "Alibaba-NLP/gte-Qwen2-7B-instruct", "prompt": "when did the new day win the tag team titles", "output": [["when did the new day win the tag team titles", "The New Day (professional wrestling)\n\nOne of the most popular and decorated teams in WWE history, The New Day holds several records, including the most reigns as SmackDown Tag Team Champions at seven, and the longest-reigning Raw Tag Team Champions at 483 days. As a team, they also hold the record for most WWE-branded tag team championship reigns at 12 (seven SmackDown, four Raw, and one NXT)—the only team WWE recognizes with more is The Dudley Boyz at 18, but this includes their eight Extreme Championship Wrestling and one World Championship Wrestling tag team title reigns. While performing as a trio, The New Day defend their tag team titles under the Freebird rule, with all three members being recognized as champions.\nThe stable was prominently formed by Xavier Woods with Big E and Kofi Kingston on the July 21, 2014, episode of Raw, but made their debut as The New Day on the November 28, 2014, episode of SmackDown. In April 2015 at Extreme Rules, they won their first WWE Tag Team Championship. Their second reign, which began at that year's SummerSlam in August, became the longest in the title's history, the first to last more than a year, and the last with the title being the sole tag team championship on WWE's main roster, as it was renamed Raw Tag Team Championship in September 2016 following the creation of the SmackDown Tag Team Championship. In December 2016, they set the record for the longest male tag team championship reign in WWE history at 483 days (surpassing Demolition's 28-year-old record of 478 days with WWE's original World Tag Team Championship) and at that time, the longest reigning champions for any WWE championship over the prior two decades; since then, both accomplishments have been surpassed."]], "corpus": "wikipedia"}
188
+ {"tstamp": 1724157634.8274, "task_type": "retrieval", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1724157634.5325, "finish": 1724157634.8274, "ip": "", "conv_id": "512fc1c8c9184c08a90c8d9a0e62a90c", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": "when did the new day win the tag team titles", "output": [["when did the new day win the tag team titles", "The New Day (professional wrestling)\n\nOne of the most popular and decorated teams in WWE history, The New Day holds several records, including the most reigns as SmackDown Tag Team Champions at seven, and the longest-reigning Raw Tag Team Champions at 483 days. As a team, they also hold the record for most WWE-branded tag team championship reigns at 12 (seven SmackDown, four Raw, and one NXT)—the only team WWE recognizes with more is The Dudley Boyz at 18, but this includes their eight Extreme Championship Wrestling and one World Championship Wrestling tag team title reigns. While performing as a trio, The New Day defend their tag team titles under the Freebird rule, with all three members being recognized as champions.\nThe stable was prominently formed by Xavier Woods with Big E and Kofi Kingston on the July 21, 2014, episode of Raw, but made their debut as The New Day on the November 28, 2014, episode of SmackDown. In April 2015 at Extreme Rules, they won their first WWE Tag Team Championship. Their second reign, which began at that year's SummerSlam in August, became the longest in the title's history, the first to last more than a year, and the last with the title being the sole tag team championship on WWE's main roster, as it was renamed Raw Tag Team Championship in September 2016 following the creation of the SmackDown Tag Team Championship. In December 2016, they set the record for the longest male tag team championship reign in WWE history at 483 days (surpassing Demolition's 28-year-old record of 478 days with WWE's original World Tag Team Championship) and at that time, the longest reigning champions for any WWE championship over the prior two decades; since then, both accomplishments have been surpassed."]], "corpus": "wikipedia"}
189
+ {"tstamp": 1724157684.2243, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1724157682.0705, "finish": 1724157684.2243, "ip": "", "conv_id": "21730b4a45944f54a8cbe2707a46801a", "model_name": "voyage-multilingual-2", "prompt": "What's the purpose of `if __name__ == '__main__'`?", "output": [["What's the purpose of `if __name__ == '__main__'`?", "Stackoverflow Stackexchange\n\nQ: What does `if name == \"__main__\"` mean in Python? \nPossible Duplicate:\nWhat does <if name==“main”:> do? \n\nI have wrote scripts in Python for quite a while now and I study more of Python as I need it. When reading other people's code I meet if name == \"__main__\": construct quite often. \nWhat is it good for?\n\nA: Basically, \nThere's a distinction between the \"main\" script file and external files which were imported or referenced in another way. If the script is the \"main\" script then the special variable __name__ will equal \"__main__\".\nYou can use this to protect the \"execution\" code from the classes and variables the script has. This enables you to import a script and use classes and variables without actually running that script's main code if it has any.\nSee also: What does if name == “main”: do?\n"]], "corpus": "stackexchange"}
190
+ {"tstamp": 1724157684.2243, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1724157682.0705, "finish": 1724157684.2243, "ip": "", "conv_id": "f7feebccce47462992f648f7ec37b04c", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "What's the purpose of `if __name__ == '__main__'`?", "output": [["What's the purpose of `if __name__ == '__main__'`?", "Stackoverflow Stackexchange\n\nQ: Why does it do this ? if - __name__ == '__main__' \nDuplicate of:\nWhat does if __name__== \"__main__\" do? \n\nConsider this code:\nif __name__ == '__main__':\n import pdb\n pdb.run(\"interact()\\n\")\n\nWhat does the following line mean?\nif(__name__=='__main__')\n\nI fainted.\n\nA: That is a check to see if you are directly running the script or if it is included in a library.\nWhen you run a python script like this:\npython myScript.py\n\nIt sends a parameter, telling you to run the programs first method, which is widely called \"main\", so when __name__ is __main__ you know that the program was executed from a command line or double clicked.\n"]], "corpus": "stackexchange"}
data/sts_individual-2cea24b1-39e9-480a-ba22-c617ea05c1fe.jsonl CHANGED
@@ -19,3 +19,5 @@
19
  {"tstamp": 1724156032.4261, "task_type": "sts", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1724156032.2275, "finish": 1724156032.4261, "ip": "", "conv_id": "ab2609d1a29942afbaee22ce5f2b19cf", "model_name": "text-embedding-004", "txt0": "US", "txt1": "United States", "txt2": "United State of America", "output": ""}
20
  {"tstamp": 1724156160.08, "task_type": "sts", "type": "chat", "model": "embed-english-v3.0", "gen_params": {}, "start": 1724156159.8003, "finish": 1724156160.08, "ip": "", "conv_id": "79aaafd4fe804451bc11f30a3837430d", "model_name": "embed-english-v3.0", "txt0": "US", "txt1": "United States", "txt2": "The United States of America", "output": ""}
21
  {"tstamp": 1724156160.08, "task_type": "sts", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724156159.8003, "finish": 1724156160.08, "ip": "", "conv_id": "fcfa01ecc6e5441387aa409ccb070097", "model_name": "nomic-ai/nomic-embed-text-v1.5", "txt0": "US", "txt1": "United States", "txt2": "The United States of America", "output": ""}
 
 
 
19
  {"tstamp": 1724156032.4261, "task_type": "sts", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1724156032.2275, "finish": 1724156032.4261, "ip": "", "conv_id": "ab2609d1a29942afbaee22ce5f2b19cf", "model_name": "text-embedding-004", "txt0": "US", "txt1": "United States", "txt2": "United State of America", "output": ""}
20
  {"tstamp": 1724156160.08, "task_type": "sts", "type": "chat", "model": "embed-english-v3.0", "gen_params": {}, "start": 1724156159.8003, "finish": 1724156160.08, "ip": "", "conv_id": "79aaafd4fe804451bc11f30a3837430d", "model_name": "embed-english-v3.0", "txt0": "US", "txt1": "United States", "txt2": "The United States of America", "output": ""}
21
  {"tstamp": 1724156160.08, "task_type": "sts", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724156159.8003, "finish": 1724156160.08, "ip": "", "conv_id": "fcfa01ecc6e5441387aa409ccb070097", "model_name": "nomic-ai/nomic-embed-text-v1.5", "txt0": "US", "txt1": "United States", "txt2": "The United States of America", "output": ""}
22
+ {"tstamp": 1724157849.0656, "task_type": "sts", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1724157849.022, "finish": 1724157849.0656, "ip": "", "conv_id": "d86c3cfa6e9b4355a2a8418b8f0ad5f3", "model_name": "GritLM/GritLM-7B", "txt0": "People are shopping.", "txt1": "Numerous customers browsing for produce in a market", "txt2": "People are showering.", "output": ""}
23
+ {"tstamp": 1724157849.0656, "task_type": "sts", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1724157849.022, "finish": 1724157849.0656, "ip": "", "conv_id": "fa8ee13f697f42d89909fdc8fdc82743", "model_name": "nomic-ai/nomic-embed-text-v1.5", "txt0": "People are shopping.", "txt1": "Numerous customers browsing for produce in a market", "txt2": "People are showering.", "output": ""}