Datasets:
Add 'mnli' config data files
Browse files
README.md
CHANGED
@@ -48,6 +48,39 @@ dataset_info:
|
|
48 |
num_examples: 1063
|
49 |
download_size: 322394
|
50 |
dataset_size: 605704
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
51 |
- config_name: mrpc
|
52 |
features:
|
53 |
- name: sentence1
|
@@ -155,6 +188,18 @@ configs:
|
|
155 |
path: cola/validation-*
|
156 |
- split: test
|
157 |
path: cola/test-*
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
158 |
- config_name: mrpc
|
159 |
data_files:
|
160 |
- split: train
|
|
|
48 |
num_examples: 1063
|
49 |
download_size: 322394
|
50 |
dataset_size: 605704
|
51 |
+
- config_name: mnli
|
52 |
+
features:
|
53 |
+
- name: premise
|
54 |
+
dtype: string
|
55 |
+
- name: hypothesis
|
56 |
+
dtype: string
|
57 |
+
- name: label
|
58 |
+
dtype:
|
59 |
+
class_label:
|
60 |
+
names:
|
61 |
+
'0': entailment
|
62 |
+
'1': neutral
|
63 |
+
'2': contradiction
|
64 |
+
- name: idx
|
65 |
+
dtype: int32
|
66 |
+
splits:
|
67 |
+
- name: train
|
68 |
+
num_bytes: 74619646
|
69 |
+
num_examples: 392702
|
70 |
+
- name: validation_matched
|
71 |
+
num_bytes: 1833783
|
72 |
+
num_examples: 9815
|
73 |
+
- name: validation_mismatched
|
74 |
+
num_bytes: 1949231
|
75 |
+
num_examples: 9832
|
76 |
+
- name: test_matched
|
77 |
+
num_bytes: 1848654
|
78 |
+
num_examples: 9796
|
79 |
+
- name: test_mismatched
|
80 |
+
num_bytes: 1950703
|
81 |
+
num_examples: 9847
|
82 |
+
download_size: 56899587
|
83 |
+
dataset_size: 82202017
|
84 |
- config_name: mrpc
|
85 |
features:
|
86 |
- name: sentence1
|
|
|
188 |
path: cola/validation-*
|
189 |
- split: test
|
190 |
path: cola/test-*
|
191 |
+
- config_name: mnli
|
192 |
+
data_files:
|
193 |
+
- split: train
|
194 |
+
path: mnli/train-*
|
195 |
+
- split: validation_matched
|
196 |
+
path: mnli/validation_matched-*
|
197 |
+
- split: validation_mismatched
|
198 |
+
path: mnli/validation_mismatched-*
|
199 |
+
- split: test_matched
|
200 |
+
path: mnli/test_matched-*
|
201 |
+
- split: test_mismatched
|
202 |
+
path: mnli/test_mismatched-*
|
203 |
- config_name: mrpc
|
204 |
data_files:
|
205 |
- split: train
|
dataset_infos.json
CHANGED
@@ -290,40 +290,33 @@
|
|
290 |
},
|
291 |
"mnli": {
|
292 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
293 |
-
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n
|
294 |
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
295 |
"license": "",
|
296 |
"features": {
|
297 |
"premise": {
|
298 |
"dtype": "string",
|
299 |
-
"id": null,
|
300 |
"_type": "Value"
|
301 |
},
|
302 |
"hypothesis": {
|
303 |
"dtype": "string",
|
304 |
-
"id": null,
|
305 |
"_type": "Value"
|
306 |
},
|
307 |
"label": {
|
308 |
-
"num_classes": 3,
|
309 |
"names": [
|
310 |
"entailment",
|
311 |
"neutral",
|
312 |
"contradiction"
|
313 |
],
|
314 |
-
"names_file": null,
|
315 |
-
"id": null,
|
316 |
"_type": "ClassLabel"
|
317 |
},
|
318 |
"idx": {
|
319 |
"dtype": "int32",
|
320 |
-
"id": null,
|
321 |
"_type": "Value"
|
322 |
}
|
323 |
},
|
324 |
-
"
|
325 |
-
"
|
326 |
-
"builder_name": "glue",
|
327 |
"config_name": "mnli",
|
328 |
"version": {
|
329 |
"version_str": "1.0.0",
|
@@ -333,47 +326,40 @@
|
|
333 |
"patch": 0
|
334 |
},
|
335 |
"splits": {
|
336 |
-
"test_matched": {
|
337 |
-
"name": "test_matched",
|
338 |
-
"num_bytes": 1854787,
|
339 |
-
"num_examples": 9796,
|
340 |
-
"dataset_name": "glue"
|
341 |
-
},
|
342 |
-
"test_mismatched": {
|
343 |
-
"name": "test_mismatched",
|
344 |
-
"num_bytes": 1956866,
|
345 |
-
"num_examples": 9847,
|
346 |
-
"dataset_name": "glue"
|
347 |
-
},
|
348 |
"train": {
|
349 |
"name": "train",
|
350 |
-
"num_bytes":
|
351 |
"num_examples": 392702,
|
352 |
-
"dataset_name":
|
353 |
},
|
354 |
"validation_matched": {
|
355 |
"name": "validation_matched",
|
356 |
-
"num_bytes":
|
357 |
"num_examples": 9815,
|
358 |
-
"dataset_name":
|
359 |
},
|
360 |
"validation_mismatched": {
|
361 |
"name": "validation_mismatched",
|
362 |
-
"num_bytes":
|
363 |
"num_examples": 9832,
|
364 |
-
"dataset_name":
|
365 |
-
}
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
"
|
370 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
}
|
372 |
},
|
373 |
-
"download_size":
|
374 |
-
"
|
375 |
-
"
|
376 |
-
"size_in_bytes": 395255588
|
377 |
},
|
378 |
"mnli_mismatched": {
|
379 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
|
|
290 |
},
|
291 |
"mnli": {
|
292 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
293 |
+
"citation": "@InProceedings{N18-1101,\n author = \"Williams, Adina\n and Nangia, Nikita\n and Bowman, Samuel\",\n title = \"A Broad-Coverage Challenge Corpus for\n Sentence Understanding through Inference\",\n booktitle = \"Proceedings of the 2018 Conference of\n the North American Chapter of the\n Association for Computational Linguistics:\n Human Language Technologies, Volume 1 (Long\n Papers)\",\n year = \"2018\",\n publisher = \"Association for Computational Linguistics\",\n pages = \"1112--1122\",\n location = \"New Orleans, Louisiana\",\n url = \"http://aclweb.org/anthology/N18-1101\"\n}\n@article{bowman2015large,\n title={A large annotated corpus for learning natural language inference},\n author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D},\n journal={arXiv preprint arXiv:1508.05326},\n year={2015}\n}\n@inproceedings{wang2019glue,\n title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding},\n author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.},\n note={In the Proceedings of ICLR.},\n year={2019}\n}\n",
|
294 |
"homepage": "http://www.nyu.edu/projects/bowman/multinli/",
|
295 |
"license": "",
|
296 |
"features": {
|
297 |
"premise": {
|
298 |
"dtype": "string",
|
|
|
299 |
"_type": "Value"
|
300 |
},
|
301 |
"hypothesis": {
|
302 |
"dtype": "string",
|
|
|
303 |
"_type": "Value"
|
304 |
},
|
305 |
"label": {
|
|
|
306 |
"names": [
|
307 |
"entailment",
|
308 |
"neutral",
|
309 |
"contradiction"
|
310 |
],
|
|
|
|
|
311 |
"_type": "ClassLabel"
|
312 |
},
|
313 |
"idx": {
|
314 |
"dtype": "int32",
|
|
|
315 |
"_type": "Value"
|
316 |
}
|
317 |
},
|
318 |
+
"builder_name": "glue-ci",
|
319 |
+
"dataset_name": "glue-ci",
|
|
|
320 |
"config_name": "mnli",
|
321 |
"version": {
|
322 |
"version_str": "1.0.0",
|
|
|
326 |
"patch": 0
|
327 |
},
|
328 |
"splits": {
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
329 |
"train": {
|
330 |
"name": "train",
|
331 |
+
"num_bytes": 74619646,
|
332 |
"num_examples": 392702,
|
333 |
+
"dataset_name": null
|
334 |
},
|
335 |
"validation_matched": {
|
336 |
"name": "validation_matched",
|
337 |
+
"num_bytes": 1833783,
|
338 |
"num_examples": 9815,
|
339 |
+
"dataset_name": null
|
340 |
},
|
341 |
"validation_mismatched": {
|
342 |
"name": "validation_mismatched",
|
343 |
+
"num_bytes": 1949231,
|
344 |
"num_examples": 9832,
|
345 |
+
"dataset_name": null
|
346 |
+
},
|
347 |
+
"test_matched": {
|
348 |
+
"name": "test_matched",
|
349 |
+
"num_bytes": 1848654,
|
350 |
+
"num_examples": 9796,
|
351 |
+
"dataset_name": null
|
352 |
+
},
|
353 |
+
"test_mismatched": {
|
354 |
+
"name": "test_mismatched",
|
355 |
+
"num_bytes": 1950703,
|
356 |
+
"num_examples": 9847,
|
357 |
+
"dataset_name": null
|
358 |
}
|
359 |
},
|
360 |
+
"download_size": 56899587,
|
361 |
+
"dataset_size": 82202017,
|
362 |
+
"size_in_bytes": 139101604
|
|
|
363 |
},
|
364 |
"mnli_mismatched": {
|
365 |
"description": "GLUE, the General Language Understanding Evaluation benchmark\n(https://gluebenchmark.com/) is a collection of resources for training,\nevaluating, and analyzing natural language understanding systems.\n\n",
|
mnli/test_matched-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:260fbb26acee4e781e8bbe41d41356b129d8a4fd3f485ac764c928725c49c454
|
3 |
+
size 1212996
|
mnli/test_mismatched-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dfac0b6d17a6ac08d07a86a8ac8d95e92e5dbe28f4512801b685f543a6ec6e09
|
3 |
+
size 1251702
|
mnli/train-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ad52fb2458a4e192c97bcab4b3574a8b7a0a59dc92d6d757ff64f7fd9fc88fbf
|
3 |
+
size 51981599
|
mnli/validation_matched-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a665d08615288ea2e6df41320f6e7dd70083c42d13bc0597c97c818b5c9c1aa5
|
3 |
+
size 1208425
|
mnli/validation_mismatched-00000-of-00001.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3ae79dd1ea4a4eefa96309034a94071bdac58340fd98581fbb303afa3b157b87
|
3 |
+
size 1244865
|