Scheduled Commit
Browse files
data/clustering_individual-928d45d9-19ab-4d05-9aae-4b6574886d0d.jsonl
CHANGED
@@ -12,3 +12,5 @@
|
|
12 |
{"tstamp": 1722544921.2496, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722544921.1715, "finish": 1722544921.2496, "ip": "", "conv_id": "a4bc72a1095345c9b524c8072c2524e3", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["crop", "livestock", "vegetable", "dairy", "rye", "sourdough", "pumpernickel", "coastal", "cold", "semi-arid", "Oracle Cloud", "Azure", "AWS", "rock", "reggae", "electronic", "country", "hip-hop"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
13 |
{"tstamp": 1722545046.079, "task_type": "clustering", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1722545045.9992, "finish": 1722545046.079, "ip": "", "conv_id": "56a500ec287645638bdeb79d91a19c56", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": ["bifocal", "convex", "toric", "prismatic", "star", "planet", "comet", "asteroid", "nebula", "galaxy", "republic", "theocracy", "monarchy", "dictatorship", "yellow", "green", "blue", "orange"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
14 |
{"tstamp": 1722545046.079, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722545045.9992, "finish": 1722545046.079, "ip": "", "conv_id": "a7604d2a2df84e6d82bb2aef31fb739e", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["bifocal", "convex", "toric", "prismatic", "star", "planet", "comet", "asteroid", "nebula", "galaxy", "republic", "theocracy", "monarchy", "dictatorship", "yellow", "green", "blue", "orange"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
|
|
|
|
|
12 |
{"tstamp": 1722544921.2496, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722544921.1715, "finish": 1722544921.2496, "ip": "", "conv_id": "a4bc72a1095345c9b524c8072c2524e3", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["crop", "livestock", "vegetable", "dairy", "rye", "sourdough", "pumpernickel", "coastal", "cold", "semi-arid", "Oracle Cloud", "Azure", "AWS", "rock", "reggae", "electronic", "country", "hip-hop"], "ncluster": 5, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
13 |
{"tstamp": 1722545046.079, "task_type": "clustering", "type": "chat", "model": "mixedbread-ai/mxbai-embed-large-v1", "gen_params": {}, "start": 1722545045.9992, "finish": 1722545046.079, "ip": "", "conv_id": "56a500ec287645638bdeb79d91a19c56", "model_name": "mixedbread-ai/mxbai-embed-large-v1", "prompt": ["bifocal", "convex", "toric", "prismatic", "star", "planet", "comet", "asteroid", "nebula", "galaxy", "republic", "theocracy", "monarchy", "dictatorship", "yellow", "green", "blue", "orange"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
14 |
{"tstamp": 1722545046.079, "task_type": "clustering", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722545045.9992, "finish": 1722545046.079, "ip": "", "conv_id": "a7604d2a2df84e6d82bb2aef31fb739e", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": ["bifocal", "convex", "toric", "prismatic", "star", "planet", "comet", "asteroid", "nebula", "galaxy", "republic", "theocracy", "monarchy", "dictatorship", "yellow", "green", "blue", "orange"], "ncluster": 4, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
15 |
+
{"tstamp": 1722545572.1229, "task_type": "clustering", "type": "chat", "model": "text-embedding-004", "gen_params": {}, "start": 1722545571.8556, "finish": 1722545572.1229, "ip": "", "conv_id": "25106531000143478edcb840cdc095de", "model_name": "text-embedding-004", "prompt": ["democracy", "dictatorship", "monarchy", "republic", "theocracy", "oligarchy", "pencil", "marker", "crayon"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
16 |
+
{"tstamp": 1722545572.1229, "task_type": "clustering", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722545571.8556, "finish": 1722545572.1229, "ip": "", "conv_id": "91d21cc6d69341cca43d002613028371", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": ["democracy", "dictatorship", "monarchy", "republic", "theocracy", "oligarchy", "pencil", "marker", "crayon"], "ncluster": 2, "output": "", "ndim": "3D (press for 2D)", "dim_method": "PCA", "clustering_method": "KMeans"}
|
data/retrieval_battle-928d45d9-19ab-4d05-9aae-4b6574886d0d.jsonl
CHANGED
@@ -18,3 +18,7 @@
|
|
18 |
{"tstamp": 1722545337.2965, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "47edbe2046874c01b87fa9d07ba0eab9", "0_model_name": "Salesforce/SFR-Embedding-2_R", "0_prompt": "Which test was devised to determine whether robots can think?", "0_output": [["Which test was devised to determine whether robots can think?", "Turing test\n\nThe Turing test, originally called the imitation game by Alan Turing in 1950, is a test of a machine's ability to exhibit intelligent behaviour equivalent to, or indistinguishable from, that of a human. Turing proposed that a human evaluator would judge natural language conversations between a human and a machine designed to generate human-like responses. The evaluator would be aware that one of the two partners in conversation was a machine, and all participants would be separated from one another. The conversation would be limited to a text-only channel, such as a computer keyboard and screen, so the result would not depend on the machine's ability to render words as speech. If the evaluator could not reliably tell the machine from the human, the machine would be said to have passed the test. The test results would not depend on the machine's ability to give correct answers to questions, only on how closely its answers resembled those a human would give. Since the Turing test is a test of indistinguishability in performance capacity, the verbal version generalizes naturally to all of human performance capacity, verbal as well as nonverbal (robotic)."]], "0_corpus": "wikipedia", "1_conv_id": "194fc9b4aa6e48f89658f98780eb2551", "1_model_name": "BM25", "1_prompt": "Which test was devised to determine whether robots can think?", "1_output": [["Which test was devised to determine whether robots can think?", "Artificial intelligence\n\nIn the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text. These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in \"AI\" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in \"AI\".\nAbout 800,000 \"AI\"-related U.S. job openings existed in 2022.\nPhilosophy\nDefining artificial intelligence\nAlan Turing wrote in 1950 \"I propose to consider the question 'can machines think'?\" He advised changing the question from whether a machine \"thinks\", to \"whether or not it is possible for machinery to show intelligent behaviour\". He devised the Turing test, which measures the ability of a machine to simulate human conversation. Since we can only observe the behavior of the machine, it does not matter if it is \"actually\" thinking or literally has a \"mind\". Turing notes that we can not determine these things about other people but \"it is usual to have a polite convention that everyone thinks.\""]], "1_corpus": "wikipedia"}
|
19 |
{"tstamp": 1722545338.45, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "621c1326eeab4875aa238e67dfb13464", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": "when does it's a wonderful life comes on", "0_output": [["when does it's a wonderful life comes on", "It's a Wonderful Life\n\nOn May 26, 1947, the Federal Bureau of Investigation issued a memo stating, \"With regard to the picture It's a Wonderful Life, [redacted] stated in substance that the film represented rather obvious attempts to discredit bankers by casting Lionel Barrymore as a 'scrooge-type' so that he would be the most hated man in the picture. This, according to these sources, is a common trick used by Communists. [In] addition, [redacted] stated that, in his opinion, this picture deliberately maligned the upper class, attempting to show the people who had money were mean and despicable characters.\" Film historian Andrew Sarris observed as \"curious\" that \"the censors never noticed that the villainous Mr. Potter gets away with robbery without being caught or punished in any way\".\nIn 1990, It's a Wonderful Life was deemed \"culturally, historically, or aesthetically significant\" by the United States Library of Congress and selected for preservation in their National Film Registry.\nIn 2002, Channel 4 in the United Kingdom ranked It's a Wonderful Life as the seventh-greatest film ever made in its poll \"The 100 Greatest Films\". The channel airs the film to British viewers annually on Christmas Eve.\nIn June 2008, AFI revealed its 10 Top 10, the best 10 films in 10 \"classic\" American film genres, after polling over 1,500 people from the creative community. It's a Wonderful Life was acknowledged as the third-best film in the fantasy genre."]], "0_corpus": "wikipedia", "1_conv_id": "89ef42365c4142d08da2405ab7e90d39", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "when does it's a wonderful life comes on", "1_output": [["when does it's a wonderful life comes on", "It's a Wonderful Life\n\nAnother musical stage adaptation of the film, titled It's a Wonderful Life – The Musical, was written by Bruce Greer and Keith Ferguson. This version premiered at the Majestic Theatre, Dallas, Texas, in 1998. It was an annual Christmas show at the theater for five years. It has since been performed at venues all around the United States.\nIn July 2019, it was revealed that another stage musical adaptation was in development with music and lyrics by Paul McCartney (making his musical theatre debut), lyrics and book by Lee Hall and produced by Bill Kenwright. The musical was set to debut late 2020, but is still yet to be announced.\nA 1986 skit on Saturday Night Live features William Shatner introducing the \"lost ending\" of It's a Wonderful Life in which George Bailey (Dana Carvey) and the citizens of Bedford Falls discover that Mr. Potter (Jon Lovitz) has stolen George's money and take turns beating him up in his office.\nIn 1992, the final episode of Tiny Toon Adventures parodied It's A Wonderful Life entitled \"It's A Wonderful Tiny Toon Christmas\". In it, Buster Bunny feels sad after the failure of his play and wishes he had never become a Tiny Toon, so a guardian angel shows Buster what life would have been like without him."]], "1_corpus": "wikipedia"}
|
20 |
{"tstamp": 1722545359.7608, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "a2448e21fa044f958450cb670f28eaa4", "0_model_name": "BM25", "0_prompt": "Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "0_output": [["Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "Title: BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer\n\nAbstract: An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology (e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for none and dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets."]], "0_corpus": "arxiv", "1_conv_id": "73631749430c455091032c3a73321718", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "1_output": [["Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "Title: BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer\n\nAbstract: An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology (e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for none and dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets."]], "1_corpus": "arxiv"}
|
|
|
|
|
|
|
|
|
|
18 |
{"tstamp": 1722545337.2965, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "47edbe2046874c01b87fa9d07ba0eab9", "0_model_name": "Salesforce/SFR-Embedding-2_R", "0_prompt": "Which test was devised to determine whether robots can think?", "0_output": [["Which test was devised to determine whether robots can think?", "Turing test\n\nThe Turing test, originally called the imitation game by Alan Turing in 1950, is a test of a machine's ability to exhibit intelligent behaviour equivalent to, or indistinguishable from, that of a human. Turing proposed that a human evaluator would judge natural language conversations between a human and a machine designed to generate human-like responses. The evaluator would be aware that one of the two partners in conversation was a machine, and all participants would be separated from one another. The conversation would be limited to a text-only channel, such as a computer keyboard and screen, so the result would not depend on the machine's ability to render words as speech. If the evaluator could not reliably tell the machine from the human, the machine would be said to have passed the test. The test results would not depend on the machine's ability to give correct answers to questions, only on how closely its answers resembled those a human would give. Since the Turing test is a test of indistinguishability in performance capacity, the verbal version generalizes naturally to all of human performance capacity, verbal as well as nonverbal (robotic)."]], "0_corpus": "wikipedia", "1_conv_id": "194fc9b4aa6e48f89658f98780eb2551", "1_model_name": "BM25", "1_prompt": "Which test was devised to determine whether robots can think?", "1_output": [["Which test was devised to determine whether robots can think?", "Artificial intelligence\n\nIn the late teens and early 2020s, AGI companies began to deliver programs that created enormous interest. In 2015, AlphaGo, developed by DeepMind, beat the world champion Go player. The program was taught only the rules of the game and developed strategy by itself. GPT-3 is a large language model that was released in 2020 by OpenAI and is capable of generating high-quality human-like text. These programs, and others, inspired an aggressive AI boom, where large companies began investing billions in AI research. According to AI Impacts, about $50 billion annually was invested in \"AI\" around 2022 in the U.S. alone and about 20% of the new U.S. Computer Science PhD graduates have specialized in \"AI\".\nAbout 800,000 \"AI\"-related U.S. job openings existed in 2022.\nPhilosophy\nDefining artificial intelligence\nAlan Turing wrote in 1950 \"I propose to consider the question 'can machines think'?\" He advised changing the question from whether a machine \"thinks\", to \"whether or not it is possible for machinery to show intelligent behaviour\". He devised the Turing test, which measures the ability of a machine to simulate human conversation. Since we can only observe the behavior of the machine, it does not matter if it is \"actually\" thinking or literally has a \"mind\". Turing notes that we can not determine these things about other people but \"it is usual to have a polite convention that everyone thinks.\""]], "1_corpus": "wikipedia"}
|
19 |
{"tstamp": 1722545338.45, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "621c1326eeab4875aa238e67dfb13464", "0_model_name": "intfloat/e5-mistral-7b-instruct", "0_prompt": "when does it's a wonderful life comes on", "0_output": [["when does it's a wonderful life comes on", "It's a Wonderful Life\n\nOn May 26, 1947, the Federal Bureau of Investigation issued a memo stating, \"With regard to the picture It's a Wonderful Life, [redacted] stated in substance that the film represented rather obvious attempts to discredit bankers by casting Lionel Barrymore as a 'scrooge-type' so that he would be the most hated man in the picture. This, according to these sources, is a common trick used by Communists. [In] addition, [redacted] stated that, in his opinion, this picture deliberately maligned the upper class, attempting to show the people who had money were mean and despicable characters.\" Film historian Andrew Sarris observed as \"curious\" that \"the censors never noticed that the villainous Mr. Potter gets away with robbery without being caught or punished in any way\".\nIn 1990, It's a Wonderful Life was deemed \"culturally, historically, or aesthetically significant\" by the United States Library of Congress and selected for preservation in their National Film Registry.\nIn 2002, Channel 4 in the United Kingdom ranked It's a Wonderful Life as the seventh-greatest film ever made in its poll \"The 100 Greatest Films\". The channel airs the film to British viewers annually on Christmas Eve.\nIn June 2008, AFI revealed its 10 Top 10, the best 10 films in 10 \"classic\" American film genres, after polling over 1,500 people from the creative community. It's a Wonderful Life was acknowledged as the third-best film in the fantasy genre."]], "0_corpus": "wikipedia", "1_conv_id": "89ef42365c4142d08da2405ab7e90d39", "1_model_name": "nomic-ai/nomic-embed-text-v1.5", "1_prompt": "when does it's a wonderful life comes on", "1_output": [["when does it's a wonderful life comes on", "It's a Wonderful Life\n\nAnother musical stage adaptation of the film, titled It's a Wonderful Life – The Musical, was written by Bruce Greer and Keith Ferguson. This version premiered at the Majestic Theatre, Dallas, Texas, in 1998. It was an annual Christmas show at the theater for five years. It has since been performed at venues all around the United States.\nIn July 2019, it was revealed that another stage musical adaptation was in development with music and lyrics by Paul McCartney (making his musical theatre debut), lyrics and book by Lee Hall and produced by Bill Kenwright. The musical was set to debut late 2020, but is still yet to be announced.\nA 1986 skit on Saturday Night Live features William Shatner introducing the \"lost ending\" of It's a Wonderful Life in which George Bailey (Dana Carvey) and the citizens of Bedford Falls discover that Mr. Potter (Jon Lovitz) has stolen George's money and take turns beating him up in his office.\nIn 1992, the final episode of Tiny Toon Adventures parodied It's A Wonderful Life entitled \"It's A Wonderful Tiny Toon Christmas\". In it, Buster Bunny feels sad after the failure of his play and wishes he had never become a Tiny Toon, so a guardian angel shows Buster what life would have been like without him."]], "1_corpus": "wikipedia"}
|
20 |
{"tstamp": 1722545359.7608, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "a2448e21fa044f958450cb670f28eaa4", "0_model_name": "BM25", "0_prompt": "Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "0_output": [["Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "Title: BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer\n\nAbstract: An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology (e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for none and dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets."]], "0_corpus": "arxiv", "1_conv_id": "73631749430c455091032c3a73321718", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "1_output": [["Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "Title: BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer\n\nAbstract: An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology (e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for none and dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets."]], "1_corpus": "arxiv"}
|
21 |
+
{"tstamp": 1722545468.0056, "task_type": "retrieval", "type": "rightvote", "models": ["", ""], "ip": "", "0_conv_id": "80a257794852495a9df012347dcf555c", "0_model_name": "sentence-transformers/all-MiniLM-L6-v2", "0_prompt": "Paper analyzing modeling decisions for large language models with a limited GPU budget", "0_output": [["Paper analyzing modeling decisions for large language models with a limited GPU budget", "Title: Slow Manifolds for Infinite-Dimensional Evolution Equations\n\nAbstract: We extend classical finite-dimensional Fenichel theory in two directions to infinite dimensions. Under comparably weak assumptions we show that the solution of an infinite-dimensional fast-slow system is approximated well by the corresponding slow flow. After that we construct a two-parameter family of slow manifolds $S_{\\epsilon,\\zeta}$ under more restrictive assumptions on the linear part of the slow equation. The second parameter $\\zeta$ does not appear in the finite-dimensional setting and describes a certain splitting of the slow variable space in a fast decaying part and its complement. The finite-dimensional setting is contained as a special case in which $S_{\\epsilon,\\zeta}$ does not depend on $\\zeta$. Finally, we apply our new techniques to three examples of fast-slow systems of partial differential equations."]], "0_corpus": "arxiv", "1_conv_id": "15b5b5b444ae49a293873393cde587ec", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "Paper analyzing modeling decisions for large language models with a limited GPU budget", "1_output": [["Paper analyzing modeling decisions for large language models with a limited GPU budget", "Title: What Language Model to Train if You Have One Million GPU Hours?\n\nAbstract: The crystallization of modeling methods around the Transformer architecture has been a boon for practitioners. Simple, well-motivated architectural variations can transfer across tasks and scale, increasing the impact of modeling research. However, with the emergence of state-of-the-art 100B+ parameters models, large language models are increasingly expensive to accurately design and train. Notably, it can be difficult to evaluate how modeling decisions may impact emergent capabilities, given that these capabilities arise mainly from sheer scale alone. In the process of building BLOOM--the Big Science Large Open-science Open-access Multilingual language model--our goal is to identify an architecture and training setup that makes the best use of our 1,000,000 A100-GPU-hours budget. Specifically, we perform an ablation study at the billion-parameter scale comparing different modeling practices and their impact on zero-shot generalization. In addition, we study the impact of various popular pre-training corpora on zero-shot generalization. We also study the performance of a multilingual model and how it compares to the English-only one. Finally, we consider the scaling behaviour of Transformers to choose the target model size, shape, and training setup. All our models and code are open-sourced at https://huggingface.co/bigscience ."]], "1_corpus": "arxiv"}
|
22 |
+
{"tstamp": 1722545516.4462, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "de9e910ccf8d428c9a61b62a0d4c3e55", "0_model_name": "nomic-ai/nomic-embed-text-v1.5", "0_prompt": "do uk xbox games work in the us?", "0_output": [["do uk xbox games work in the us?", "Gaming Stackexchange\n\nQ: Are Xbox One games region locked? If I buy an Xbox One from the United States, then go to Thailand will games that I buy in Thailand work for my US Console?\n\nA: Yes! I have been doing research on this (regioned games) myself for a while as I am from the US but I live part time in Germany. Here are my findings:\n\n\n*\n\n*XBox 360 games are regioned (NTSC, etc.) so games don't work everywhere\n\n*XBox One games are no longer regioned so games will work everywhere\n\n*However Blu-ray DVDs are still regioned, so if you try to watch Blu-ray movies from Thailand on your XBox it will not work\n\n"]], "0_corpus": "stackexchange", "1_conv_id": "01e3e11a5bf84293888e307edc3d18cf", "1_model_name": "BAAI/bge-large-en-v1.5", "1_prompt": "do uk xbox games work in the us?", "1_output": [["do uk xbox games work in the us?", "Gaming Stackexchange\n\nQ: Are Xbox One games region locked? If I buy an Xbox One from the United States, then go to Thailand will games that I buy in Thailand work for my US Console?\n\nA: Yes! I have been doing research on this (regioned games) myself for a while as I am from the US but I live part time in Germany. Here are my findings:\n\n\n*\n\n*XBox 360 games are regioned (NTSC, etc.) so games don't work everywhere\n\n*XBox One games are no longer regioned so games will work everywhere\n\n*However Blu-ray DVDs are still regioned, so if you try to watch Blu-ray movies from Thailand on your XBox it will not work\n\n"]], "1_corpus": "stackexchange"}
|
23 |
+
{"tstamp": 1722545539.6905, "task_type": "retrieval", "type": "leftvote", "models": ["", ""], "ip": "", "0_conv_id": "31872e7565fd4e3e9f01cc7c4eaac056", "0_model_name": "jinaai/jina-embeddings-v2-base-en", "0_prompt": "Ordering tasks for best efficiency", "0_output": [["Ordering tasks for best efficiency", "Task management\n\nTask management is the process of overseeing a task through its lifecycle. It involves planning, testing, tracking, and reporting. Task management can help individuals achieve goals or enable groups of individuals to collaborate and share knowledge for the accomplishment of collective goals. Tasks are also differentiated by complexity, from low to high.\nEffective task management requires overseeing all aspects of a task, including its status, priority, time, human and financial resource assignments, recurrence, dependencies, notifications, etc. These can be lumped together broadly as the fundamental activities of task management.\nManaging tasks for multiple individuals or teams can be facilitated by specialized software, such as workflow or project-management software. This type of software is sometimes referred to as a productivity system."]], "0_corpus": "wikipedia", "1_conv_id": "7d669d6cd731433294c7370c1fe3f5f9", "1_model_name": "voyage-multilingual-2", "1_prompt": "Ordering tasks for best efficiency", "1_output": [["Ordering tasks for best efficiency", "Load balancing (computing)\n\nFor this reason, there are several techniques to get an idea of the different execution times. First of all, in the fortunate scenario of having tasks of relatively homogeneous size, it is possible to consider that each of them will require approximately the average execution time. If, on the other hand, the execution time is very irregular, more sophisticated techniques must be used. One technique is to add some metadata to each task. Depending on the previous execution time for similar metadata, it is possible to make inferences for a future task based on statistics.\nDependencies\nIn some cases, tasks depend on each other. These interdependencies can be illustrated by a directed acyclic graph. Intuitively, some tasks cannot begin until others are completed.\nAssuming that the required time for each of the tasks is known in advance, an optimal execution order must lead to the minimization of the total execution time. Although this is an NP-hard problem and therefore can be difficult to be solved exactly. There are algorithms, like job scheduler, that calculate optimal task distributions using metaheuristic methods.\nSegregation of tasks\nAnother feature of the tasks critical for the design of a load balancing algorithm is their ability to be broken down into subtasks during execution. The \"Tree-Shaped Computation\" algorithm presented later takes great advantage of this specificity.\nStatic and dynamic algorithms"]], "1_corpus": "wikipedia"}
|
24 |
+
{"tstamp": 1722545558.2316, "task_type": "retrieval", "type": "tievote", "models": ["", ""], "ip": "", "0_conv_id": "52df97545305452a871e07dbd84e9625", "0_model_name": "BM25", "0_prompt": "who sings god gave rock and roll to you", "0_output": [["who sings god gave rock and roll to you", "God Gave Rock and Roll to You\n\n\"God Gave Rock and Roll to You\" is a 1973 song by the British band Argent and covered by Kiss as \"God Gave Rock 'N' Roll to You II\" in 1991, with modified lyrics."]], "0_corpus": "wikipedia", "1_conv_id": "de60f3ee6ba040ed9f658133177ed5c2", "1_model_name": "GritLM/GritLM-7B", "1_prompt": "who sings god gave rock and roll to you", "1_output": [["who sings god gave rock and roll to you", "God Gave Rock and Roll to You\n\n\"God Gave Rock and Roll to You\" is a 1973 song by the British band Argent and covered by Kiss as \"God Gave Rock 'N' Roll to You II\" in 1991, with modified lyrics."]], "1_corpus": "wikipedia"}
|
data/retrieval_individual-928d45d9-19ab-4d05-9aae-4b6574886d0d.jsonl
CHANGED
@@ -67,3 +67,11 @@
|
|
67 |
{"tstamp": 1722545349.0792, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722545348.5222, "finish": 1722545349.0792, "ip": "", "conv_id": "73631749430c455091032c3a73321718", "model_name": "GritLM/GritLM-7B", "prompt": "Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "output": [["Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "Title: BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer\n\nAbstract: An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology (e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for none and dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets."]], "corpus": "arxiv"}
|
68 |
{"tstamp": 1722545369.2006, "task_type": "retrieval", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1722545369.0737, "finish": 1722545369.2006, "ip": "", "conv_id": "80a257794852495a9df012347dcf555c", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": "Paper analyzing modeling decisions for large language models with a limited GPU budget", "output": [["Paper analyzing modeling decisions for large language models with a limited GPU budget", "Title: Slow Manifolds for Infinite-Dimensional Evolution Equations\n\nAbstract: We extend classical finite-dimensional Fenichel theory in two directions to infinite dimensions. Under comparably weak assumptions we show that the solution of an infinite-dimensional fast-slow system is approximated well by the corresponding slow flow. After that we construct a two-parameter family of slow manifolds $S_{\\epsilon,\\zeta}$ under more restrictive assumptions on the linear part of the slow equation. The second parameter $\\zeta$ does not appear in the finite-dimensional setting and describes a certain splitting of the slow variable space in a fast decaying part and its complement. The finite-dimensional setting is contained as a special case in which $S_{\\epsilon,\\zeta}$ does not depend on $\\zeta$. Finally, we apply our new techniques to three examples of fast-slow systems of partial differential equations."]], "corpus": "arxiv"}
|
69 |
{"tstamp": 1722545369.2006, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722545369.0737, "finish": 1722545369.2006, "ip": "", "conv_id": "15b5b5b444ae49a293873393cde587ec", "model_name": "GritLM/GritLM-7B", "prompt": "Paper analyzing modeling decisions for large language models with a limited GPU budget", "output": [["Paper analyzing modeling decisions for large language models with a limited GPU budget", "Title: What Language Model to Train if You Have One Million GPU Hours?\n\nAbstract: The crystallization of modeling methods around the Transformer architecture has been a boon for practitioners. Simple, well-motivated architectural variations can transfer across tasks and scale, increasing the impact of modeling research. However, with the emergence of state-of-the-art 100B+ parameters models, large language models are increasingly expensive to accurately design and train. Notably, it can be difficult to evaluate how modeling decisions may impact emergent capabilities, given that these capabilities arise mainly from sheer scale alone. In the process of building BLOOM--the Big Science Large Open-science Open-access Multilingual language model--our goal is to identify an architecture and training setup that makes the best use of our 1,000,000 A100-GPU-hours budget. Specifically, we perform an ablation study at the billion-parameter scale comparing different modeling practices and their impact on zero-shot generalization. In addition, we study the impact of various popular pre-training corpora on zero-shot generalization. We also study the performance of a multilingual model and how it compares to the English-only one. Finally, we consider the scaling behaviour of Transformers to choose the target model size, shape, and training setup. All our models and code are open-sourced at https://huggingface.co/bigscience ."]], "corpus": "arxiv"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
67 |
{"tstamp": 1722545349.0792, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722545348.5222, "finish": 1722545349.0792, "ip": "", "conv_id": "73631749430c455091032c3a73321718", "model_name": "GritLM/GritLM-7B", "prompt": "Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "output": [["Paper proposing end-to-end dialogue state tracking using BERT for scalable and efficient slot value identification.", "Title: BERT-DST: Scalable End-to-End Dialogue State Tracking with Bidirectional Encoder Representations from Transformer\n\nAbstract: An important yet rarely tackled problem in dialogue state tracking (DST) is scalability for dynamic ontology (e.g., movie, restaurant) and unseen slot values. We focus on a specific condition, where the ontology is unknown to the state tracker, but the target slot value (except for none and dontcare), possibly unseen during training, can be found as word segment in the dialogue context. Prior approaches often rely on candidate generation from n-gram enumeration or slot tagger outputs, which can be inefficient or suffer from error propagation. We propose BERT-DST, an end-to-end dialogue state tracker which directly extracts slot values from the dialogue context. We use BERT as dialogue context encoder whose contextualized language representations are suitable for scalable DST to identify slot values from their semantic context. Furthermore, we employ encoder parameter sharing across all slots with two advantages: (1) Number of parameters does not grow linearly with the ontology. (2) Language representation knowledge can be transferred among slots. Empirical evaluation shows BERT-DST with cross-slot parameter sharing outperforms prior work on the benchmark scalable DST datasets Sim-M and Sim-R, and achieves competitive performance on the standard DSTC2 and WOZ 2.0 datasets."]], "corpus": "arxiv"}
|
68 |
{"tstamp": 1722545369.2006, "task_type": "retrieval", "type": "chat", "model": "sentence-transformers/all-MiniLM-L6-v2", "gen_params": {}, "start": 1722545369.0737, "finish": 1722545369.2006, "ip": "", "conv_id": "80a257794852495a9df012347dcf555c", "model_name": "sentence-transformers/all-MiniLM-L6-v2", "prompt": "Paper analyzing modeling decisions for large language models with a limited GPU budget", "output": [["Paper analyzing modeling decisions for large language models with a limited GPU budget", "Title: Slow Manifolds for Infinite-Dimensional Evolution Equations\n\nAbstract: We extend classical finite-dimensional Fenichel theory in two directions to infinite dimensions. Under comparably weak assumptions we show that the solution of an infinite-dimensional fast-slow system is approximated well by the corresponding slow flow. After that we construct a two-parameter family of slow manifolds $S_{\\epsilon,\\zeta}$ under more restrictive assumptions on the linear part of the slow equation. The second parameter $\\zeta$ does not appear in the finite-dimensional setting and describes a certain splitting of the slow variable space in a fast decaying part and its complement. The finite-dimensional setting is contained as a special case in which $S_{\\epsilon,\\zeta}$ does not depend on $\\zeta$. Finally, we apply our new techniques to three examples of fast-slow systems of partial differential equations."]], "corpus": "arxiv"}
|
69 |
{"tstamp": 1722545369.2006, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722545369.0737, "finish": 1722545369.2006, "ip": "", "conv_id": "15b5b5b444ae49a293873393cde587ec", "model_name": "GritLM/GritLM-7B", "prompt": "Paper analyzing modeling decisions for large language models with a limited GPU budget", "output": [["Paper analyzing modeling decisions for large language models with a limited GPU budget", "Title: What Language Model to Train if You Have One Million GPU Hours?\n\nAbstract: The crystallization of modeling methods around the Transformer architecture has been a boon for practitioners. Simple, well-motivated architectural variations can transfer across tasks and scale, increasing the impact of modeling research. However, with the emergence of state-of-the-art 100B+ parameters models, large language models are increasingly expensive to accurately design and train. Notably, it can be difficult to evaluate how modeling decisions may impact emergent capabilities, given that these capabilities arise mainly from sheer scale alone. In the process of building BLOOM--the Big Science Large Open-science Open-access Multilingual language model--our goal is to identify an architecture and training setup that makes the best use of our 1,000,000 A100-GPU-hours budget. Specifically, we perform an ablation study at the billion-parameter scale comparing different modeling practices and their impact on zero-shot generalization. In addition, we study the impact of various popular pre-training corpora on zero-shot generalization. We also study the performance of a multilingual model and how it compares to the English-only one. Finally, we consider the scaling behaviour of Transformers to choose the target model size, shape, and training setup. All our models and code are open-sourced at https://huggingface.co/bigscience ."]], "corpus": "arxiv"}
|
70 |
+
{"tstamp": 1722545508.7893, "task_type": "retrieval", "type": "chat", "model": "nomic-ai/nomic-embed-text-v1.5", "gen_params": {}, "start": 1722545508.5837, "finish": 1722545508.7893, "ip": "", "conv_id": "de9e910ccf8d428c9a61b62a0d4c3e55", "model_name": "nomic-ai/nomic-embed-text-v1.5", "prompt": "do uk xbox games work in the us?", "output": [["do uk xbox games work in the us?", "Gaming Stackexchange\n\nQ: Are Xbox One games region locked? If I buy an Xbox One from the United States, then go to Thailand will games that I buy in Thailand work for my US Console?\n\nA: Yes! I have been doing research on this (regioned games) myself for a while as I am from the US but I live part time in Germany. Here are my findings:\n\n\n*\n\n*XBox 360 games are regioned (NTSC, etc.) so games don't work everywhere\n\n*XBox One games are no longer regioned so games will work everywhere\n\n*However Blu-ray DVDs are still regioned, so if you try to watch Blu-ray movies from Thailand on your XBox it will not work\n\n"]], "corpus": "stackexchange"}
|
71 |
+
{"tstamp": 1722545508.7893, "task_type": "retrieval", "type": "chat", "model": "BAAI/bge-large-en-v1.5", "gen_params": {}, "start": 1722545508.5837, "finish": 1722545508.7893, "ip": "", "conv_id": "01e3e11a5bf84293888e307edc3d18cf", "model_name": "BAAI/bge-large-en-v1.5", "prompt": "do uk xbox games work in the us?", "output": [["do uk xbox games work in the us?", "Gaming Stackexchange\n\nQ: Are Xbox One games region locked? If I buy an Xbox One from the United States, then go to Thailand will games that I buy in Thailand work for my US Console?\n\nA: Yes! I have been doing research on this (regioned games) myself for a while as I am from the US but I live part time in Germany. Here are my findings:\n\n\n*\n\n*XBox 360 games are regioned (NTSC, etc.) so games don't work everywhere\n\n*XBox One games are no longer regioned so games will work everywhere\n\n*However Blu-ray DVDs are still regioned, so if you try to watch Blu-ray movies from Thailand on your XBox it will not work\n\n"]], "corpus": "stackexchange"}
|
72 |
+
{"tstamp": 1722545522.0382, "task_type": "retrieval", "type": "chat", "model": "jinaai/jina-embeddings-v2-base-en", "gen_params": {}, "start": 1722545521.5577, "finish": 1722545522.0382, "ip": "", "conv_id": "31872e7565fd4e3e9f01cc7c4eaac056", "model_name": "jinaai/jina-embeddings-v2-base-en", "prompt": "Ordering tasks for best efficiency", "output": [["Ordering tasks for best efficiency", "Task management\n\nTask management is the process of overseeing a task through its lifecycle. It involves planning, testing, tracking, and reporting. Task management can help individuals achieve goals or enable groups of individuals to collaborate and share knowledge for the accomplishment of collective goals. Tasks are also differentiated by complexity, from low to high.\nEffective task management requires overseeing all aspects of a task, including its status, priority, time, human and financial resource assignments, recurrence, dependencies, notifications, etc. These can be lumped together broadly as the fundamental activities of task management.\nManaging tasks for multiple individuals or teams can be facilitated by specialized software, such as workflow or project-management software. This type of software is sometimes referred to as a productivity system."]], "corpus": "wikipedia"}
|
73 |
+
{"tstamp": 1722545522.0382, "task_type": "retrieval", "type": "chat", "model": "voyage-multilingual-2", "gen_params": {}, "start": 1722545521.5577, "finish": 1722545522.0382, "ip": "", "conv_id": "7d669d6cd731433294c7370c1fe3f5f9", "model_name": "voyage-multilingual-2", "prompt": "Ordering tasks for best efficiency", "output": [["Ordering tasks for best efficiency", "Load balancing (computing)\n\nFor this reason, there are several techniques to get an idea of the different execution times. First of all, in the fortunate scenario of having tasks of relatively homogeneous size, it is possible to consider that each of them will require approximately the average execution time. If, on the other hand, the execution time is very irregular, more sophisticated techniques must be used. One technique is to add some metadata to each task. Depending on the previous execution time for similar metadata, it is possible to make inferences for a future task based on statistics.\nDependencies\nIn some cases, tasks depend on each other. These interdependencies can be illustrated by a directed acyclic graph. Intuitively, some tasks cannot begin until others are completed.\nAssuming that the required time for each of the tasks is known in advance, an optimal execution order must lead to the minimization of the total execution time. Although this is an NP-hard problem and therefore can be difficult to be solved exactly. There are algorithms, like job scheduler, that calculate optimal task distributions using metaheuristic methods.\nSegregation of tasks\nAnother feature of the tasks critical for the design of a load balancing algorithm is their ability to be broken down into subtasks during execution. The \"Tree-Shaped Computation\" algorithm presented later takes great advantage of this specificity.\nStatic and dynamic algorithms"]], "corpus": "wikipedia"}
|
74 |
+
{"tstamp": 1722545553.8769, "task_type": "retrieval", "type": "chat", "model": "BM25", "gen_params": {}, "start": 1722545551.6163, "finish": 1722545553.8769, "ip": "", "conv_id": "52df97545305452a871e07dbd84e9625", "model_name": "BM25", "prompt": "who sings god gave rock and roll to you", "output": [["who sings god gave rock and roll to you", "God Gave Rock and Roll to You\n\n\"God Gave Rock and Roll to You\" is a 1973 song by the British band Argent and covered by Kiss as \"God Gave Rock 'N' Roll to You II\" in 1991, with modified lyrics."]], "corpus": "wikipedia"}
|
75 |
+
{"tstamp": 1722545553.8769, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722545551.6163, "finish": 1722545553.8769, "ip": "", "conv_id": "de60f3ee6ba040ed9f658133177ed5c2", "model_name": "GritLM/GritLM-7B", "prompt": "who sings god gave rock and roll to you", "output": [["who sings god gave rock and roll to you", "God Gave Rock and Roll to You\n\n\"God Gave Rock and Roll to You\" is a 1973 song by the British band Argent and covered by Kiss as \"God Gave Rock 'N' Roll to You II\" in 1991, with modified lyrics."]], "corpus": "wikipedia"}
|
76 |
+
{"tstamp": 1722545573.247, "task_type": "retrieval", "type": "chat", "model": "intfloat/multilingual-e5-large-instruct", "gen_params": {}, "start": 1722545573.019, "finish": 1722545573.247, "ip": "", "conv_id": "52eacdc036d2433797dede2f649a7727", "model_name": "intfloat/multilingual-e5-large-instruct", "prompt": "how many skill points in dragon age origins?", "output": [["how many skill points in dragon age origins?", "Gaming Stackexchange\n\nQ: How many total skill points does a character get in Dead Island? Considering there is no respec mechanism in Dead Island, I'm trying to plan out my character build a little in advance. \nIn examining the skill tree I see there are: \n\n\n*\n\n*21 possible points in Fury\n\n*39 possible points in Combat\n\n*39 possible points in Survival\n\n\nI am assuming that all characters have the same number of points to put in their tree (I'm currently playing Xian). Judging from the achievements, the current level cap in-game is 60, but I don't know if there may be instances where you level up and get 2 upgrades available or something.\nSo how many total skill points can a level 60 character have? \n\nA: There's no other way of gaining skill points other than leveling up, which only grants one skill point.\nSo the amount of skill points you would have at level 60 would be 59.\n"]], "corpus": "stackexchange"}
|
77 |
+
{"tstamp": 1722545573.247, "task_type": "retrieval", "type": "chat", "model": "GritLM/GritLM-7B", "gen_params": {}, "start": 1722545573.019, "finish": 1722545573.247, "ip": "", "conv_id": "a66e74da1d2b452192df906da85ca38a", "model_name": "GritLM/GritLM-7B", "prompt": "how many skill points in dragon age origins?", "output": [["how many skill points in dragon age origins?", "Gaming Stackexchange\n\nQ: Do I earn enough skill points to unlock everything? \nPossible Duplicate:\nHow many skill points will I have at max level? \n\nI'd like to know if I should expect enough skill points to unlock most of the skills, or if I;m only going to get enough to specialise to the end of a few skill paths.\nSo, how many skill points are available in total at max level?\nAnd, how many would I need to unlock everything available to Commander Shepard?\n\nA: As Johnjon notes, there are 181 skillpoints in the game.\nMaxing a single skill requires 21 points.\nThere are enough points in the game to max out a total of 8 skills, with a few left over.\nShepard, conveniently enough, has 8 skills, plus one slot for a Bonus Power.\nSo, you can come very close, but you can't quite make it to maxing out all 9. You can max out 8 of them, and have 13 points remaining for the 9th, which lets you get to rank 4. You'll have 2 points left over. Alternately, you could max out 7 skills, get two to rank 5, and have 3 points left over.\n"]], "corpus": "stackexchange"}
|